Merge pull request #39 from Frix-x/develop

v.2.5.0
This commit is contained in:
Félix Boisselier
2024-01-09 15:37:23 +01:00
committed by GitHub
19 changed files with 652 additions and 572 deletions

View File

@@ -52,7 +52,7 @@ gcode:
ACCELEROMETER_MEASURE CHIP={accel_chip} NAME=axemap ACCELEROMETER_MEASURE CHIP={accel_chip} NAME=axemap
RESPOND MSG="Analysis of the movements..." RESPOND MSG="Analysis of the movements..."
RUN_SHELL_COMMAND CMD=shaketune PARAMS="AXESMAP {accel}" RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type axesmap --accel {accel} --chip_name {accel_chip}"
# Restore the previous acceleration values # Restore the previous acceleration values
SET_VELOCITY_LIMIT ACCEL={old_accel} ACCEL_TO_DECEL={old_accel_to_decel} SQUARE_CORNER_VELOCITY={old_sqv} SET_VELOCITY_LIMIT ACCEL={old_accel} ACCEL_TO_DECEL={old_accel_to_decel} SQUARE_CORNER_VELOCITY={old_sqv}

View File

@@ -10,6 +10,8 @@ gcode:
{% set max_freq = params.FREQ_END|default(133.3)|float %} {% set max_freq = params.FREQ_END|default(133.3)|float %}
{% set hz_per_sec = params.HZ_PER_SEC|default(1)|float %} {% set hz_per_sec = params.HZ_PER_SEC|default(1)|float %}
{% set axis = params.AXIS|default("all")|string|lower %} {% set axis = params.AXIS|default("all")|string|lower %}
{% set keep_results = params.KEEP_N_RESULTS|default(3)|int %}
{% set keep_csv = params.KEEP_CSV|default(True) %}
{% set X, Y = False, False %} {% set X, Y = False, False %}
@@ -29,7 +31,7 @@ gcode:
RESPOND MSG="X axis frequency profile generation..." RESPOND MSG="X axis frequency profile generation..."
RESPOND MSG="This may take some time (1-3min)" RESPOND MSG="This may take some time (1-3min)"
RUN_SHELL_COMMAND CMD=shaketune PARAMS=SHAPER RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type shaper {% if keep_csv %}--keep_csv{% endif %}"
{% endif %} {% endif %}
{% if Y %} {% if Y %}
@@ -38,5 +40,8 @@ gcode:
RESPOND MSG="Y axis frequency profile generation..." RESPOND MSG="Y axis frequency profile generation..."
RESPOND MSG="This may take some time (1-3min)" RESPOND MSG="This may take some time (1-3min)"
RUN_SHELL_COMMAND CMD=shaketune PARAMS=SHAPER RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type shaper {% if keep_csv %}--keep_csv{% endif %}"
{% endif %} {% endif %}
M400
RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type clean --keep_results {keep_results}"

View File

@@ -9,6 +9,8 @@ gcode:
{% set min_freq = params.FREQ_START|default(5)|float %} {% set min_freq = params.FREQ_START|default(5)|float %}
{% set max_freq = params.FREQ_END|default(133.33)|float %} {% set max_freq = params.FREQ_END|default(133.33)|float %}
{% set hz_per_sec = params.HZ_PER_SEC|default(1)|float %} {% set hz_per_sec = params.HZ_PER_SEC|default(1)|float %}
{% set keep_results = params.KEEP_N_RESULTS|default(3)|int %}
{% set keep_csv = params.KEEP_CSV|default(True) %}
TEST_RESONANCES AXIS=1,1 OUTPUT=raw_data NAME=b FREQ_START={min_freq} FREQ_END={max_freq} HZ_PER_SEC={hz_per_sec} TEST_RESONANCES AXIS=1,1 OUTPUT=raw_data NAME=b FREQ_START={min_freq} FREQ_END={max_freq} HZ_PER_SEC={hz_per_sec}
M400 M400
@@ -18,4 +20,6 @@ gcode:
RESPOND MSG="Belts comparative frequency profile generation..." RESPOND MSG="Belts comparative frequency profile generation..."
RESPOND MSG="This may take some time (3-5min)" RESPOND MSG="This may take some time (3-5min)"
RUN_SHELL_COMMAND CMD=shaketune PARAMS=BELTS RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type belts {% if keep_csv %}--keep_csv{% endif %}"
M400
RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type clean --keep_results {keep_results}"

View File

@@ -16,6 +16,9 @@ gcode:
{% set accel = params.ACCEL|default(3000)|int %} # accel value used to move on the pattern {% set accel = params.ACCEL|default(3000)|int %} # accel value used to move on the pattern
{% set accel_chip = params.ACCEL_CHIP|default("adxl345") %} # ADXL chip name in the config {% set accel_chip = params.ACCEL_CHIP|default("adxl345") %} # ADXL chip name in the config
{% set keep_results = params.KEEP_N_RESULTS|default(3)|int %}
{% set keep_csv = params.KEEP_CSV|default(True) %}
{% set mid_x = printer.toolhead.axis_maximum.x|float / 2 %} {% set mid_x = printer.toolhead.axis_maximum.x|float / 2 %}
{% set mid_y = printer.toolhead.axis_maximum.y|float / 2 %} {% set mid_y = printer.toolhead.axis_maximum.y|float / 2 %}
{% set nb_samples = ((max_speed - min_speed) / speed_increment + 1) | int %} {% set nb_samples = ((max_speed - min_speed) / speed_increment + 1) | int %}
@@ -153,7 +156,9 @@ gcode:
RESPOND MSG="Machine and motors vibration graph generation..." RESPOND MSG="Machine and motors vibration graph generation..."
RESPOND MSG="This may take some time (3-5min)" RESPOND MSG="This may take some time (3-5min)"
RUN_SHELL_COMMAND CMD=shaketune PARAMS="VIBRATIONS {direction}" RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type vibrations --axis_name {direction} --accel {accel} --chip_name {accel_chip} {% if keep_csv %}--keep_csv{% endif %}"
M400
RUN_SHELL_COMMAND CMD=shaketune PARAMS="--type clean --keep_results {keep_results}"
# Restore the previous acceleration values # Restore the previous acceleration values
SET_VELOCITY_LIMIT ACCEL={old_accel} ACCEL_TO_DECEL={old_accel_to_decel} SQUARE_CORNER_VELOCITY={old_sqv} SET_VELOCITY_LIMIT ACCEL={old_accel} ACCEL_TO_DECEL={old_accel_to_decel} SQUARE_CORNER_VELOCITY={old_sqv}

View File

@@ -13,29 +13,13 @@
import optparse import optparse
import numpy as np import numpy as np
import locale from locale_utils import print_with_c_locale
from scipy.signal import butter, filtfilt from scipy.signal import butter, filtfilt
NUM_POINTS = 500 NUM_POINTS = 500
# Set the best locale for time and date formating (generation of the titles)
try:
locale.setlocale(locale.LC_TIME, locale.getdefaultlocale())
except locale.Error:
locale.setlocale(locale.LC_TIME, 'C')
# Override the built-in print function to avoid problem in Klipper due to locale settings
original_print = print
def print_with_c_locale(*args, **kwargs):
original_locale = locale.setlocale(locale.LC_ALL, None)
locale.setlocale(locale.LC_ALL, 'C')
original_print(*args, **kwargs)
locale.setlocale(locale.LC_ALL, original_locale)
print = print_with_c_locale
###################################################################### ######################################################################
# Computation # Computation
###################################################################### ######################################################################
@@ -160,7 +144,7 @@ def main():
opts.error("Invalid acceleration value. It should be a numeric value.") opts.error("Invalid acceleration value. It should be a numeric value.")
results = axesmap_calibration(args, accel_value) results = axesmap_calibration(args, accel_value)
print(results) print_with_c_locale(results)
if options.output is not None: if options.output is not None:
with open(options.output, 'w') as f: with open(options.output, 'w') as f:

View File

@@ -0,0 +1,121 @@
#!/usr/bin/env python3
# Common functions for the Shake&Tune package
# Written by Frix_x#0161 #
import math
import os, sys
from importlib import import_module
from pathlib import Path
import numpy as np
from scipy.signal import spectrogram
from git import GitCommandError, Repo
def parse_log(logname):
with open(logname) as f:
for header in f:
if not header.startswith('#'):
break
if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
# Raw accelerometer data
return np.loadtxt(logname, comments='#', delimiter=',')
# Power spectral density data or shaper calibration data
raise ValueError("File %s does not contain raw accelerometer data and therefore "
"is not supported by Shake&Tune. Please use the official Klipper "
"script to process it instead." % (logname,))
def setup_klipper_import(kdir):
kdir = os.path.expanduser(kdir)
sys.path.append(os.path.join(kdir, 'klippy'))
return import_module('.shaper_calibrate', 'extras')
# This is used to print the current S&T version on top of the png graph file
def get_git_version():
try:
# Get the absolute path of the script, resolving any symlinks
# Then get 2 times to parent dir to be at the git root folder
script_path = Path(__file__).resolve()
repo_path = script_path.parents[2]
repo = Repo(repo_path)
try:
version = repo.git.describe('--tags')
except GitCommandError:
# If no tag is found, use the simplified commit SHA instead
version = repo.head.commit.hexsha[:7]
return version
except Exception as e:
return None
# This is Klipper's spectrogram generation function adapted to use Scipy
def compute_spectrogram(data):
N = data.shape[0]
Fs = N / (data[-1, 0] - data[0, 0])
# Round up to a power of 2 for faster FFT
M = 1 << int(.5 * Fs - 1).bit_length()
window = np.kaiser(M, 6.)
def _specgram(x):
return spectrogram(x, fs=Fs, window=window, nperseg=M, noverlap=M//2,
detrend='constant', scaling='density', mode='psd')
d = {'x': data[:, 1], 'y': data[:, 2], 'z': data[:, 3]}
f, t, pdata = _specgram(d['x'])
for axis in 'yz':
pdata += _specgram(d[axis])[2]
return pdata, t, f
# Compute natural resonant frequency and damping ratio by using the half power bandwidth method with interpolated frequencies
def compute_mechanical_parameters(psd, freqs):
max_power_index = np.argmax(psd)
fr = freqs[max_power_index]
max_power = psd[max_power_index]
half_power = max_power / math.sqrt(2)
idx_below = np.where(psd[:max_power_index] <= half_power)[0][-1]
idx_above = np.where(psd[max_power_index:] <= half_power)[0][0] + max_power_index
freq_below_half_power = freqs[idx_below] + (half_power - psd[idx_below]) * (freqs[idx_below + 1] - freqs[idx_below]) / (psd[idx_below + 1] - psd[idx_below])
freq_above_half_power = freqs[idx_above - 1] + (half_power - psd[idx_above - 1]) * (freqs[idx_above] - freqs[idx_above - 1]) / (psd[idx_above] - psd[idx_above - 1])
bandwidth = freq_above_half_power - freq_below_half_power
zeta = bandwidth / (2 * fr)
return fr, zeta, max_power_index
# This find all the peaks in a curve by looking at when the derivative term goes from positive to negative
# Then only the peaks found above a threshold are kept to avoid capturing peaks in the low amplitude noise of a signal
def detect_peaks(data, indices, detection_threshold, relative_height_threshold=None, window_size=5, vicinity=3):
# Smooth the curve using a moving average to avoid catching peaks everywhere in noisy signals
kernel = np.ones(window_size) / window_size
smoothed_data = np.convolve(data, kernel, mode='valid')
mean_pad = [np.mean(data[:window_size])] * (window_size // 2)
smoothed_data = np.concatenate((mean_pad, smoothed_data))
# Find peaks on the smoothed curve
smoothed_peaks = np.where((smoothed_data[:-2] < smoothed_data[1:-1]) & (smoothed_data[1:-1] > smoothed_data[2:]))[0] + 1
smoothed_peaks = smoothed_peaks[smoothed_data[smoothed_peaks] > detection_threshold]
# Additional validation for peaks based on relative height
valid_peaks = smoothed_peaks
if relative_height_threshold is not None:
valid_peaks = []
for peak in smoothed_peaks:
peak_height = smoothed_data[peak] - np.min(smoothed_data[max(0, peak-vicinity):min(len(smoothed_data), peak+vicinity+1)])
if peak_height > relative_height_threshold * smoothed_data[peak]:
valid_peaks.append(peak)
# Refine peak positions on the original curve
refined_peaks = []
for peak in valid_peaks:
local_max = peak + np.argmax(data[max(0, peak-vicinity):min(len(data), peak+vicinity+1)]) - vicinity
refined_peaks.append(local_max)
num_peaks = len(refined_peaks)
return num_peaks, np.array(refined_peaks), indices[refined_peaks]

View File

@@ -12,17 +12,18 @@
##################################################################### #####################################################################
import optparse, matplotlib, sys, importlib, os import optparse, matplotlib, sys, importlib, os
from datetime import datetime
from collections import namedtuple from collections import namedtuple
import numpy as np import numpy as np
import scipy import matplotlib.pyplot as plt
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager import matplotlib.font_manager, matplotlib.ticker, matplotlib.colors
import matplotlib.ticker, matplotlib.gridspec, matplotlib.colors from scipy.interpolate import griddata
import matplotlib.patches
import locale
from datetime import datetime
matplotlib.use('Agg') matplotlib.use('Agg')
from locale_utils import set_locale, print_with_c_locale
from common_func import compute_spectrogram, detect_peaks, get_git_version, parse_log, setup_klipper_import
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # For paired peaks names ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # For paired peaks names
@@ -44,32 +45,10 @@ KLIPPAIN_COLORS = {
} }
# Set the best locale for time and date formating (generation of the titles)
try:
locale.setlocale(locale.LC_TIME, locale.getdefaultlocale())
except locale.Error:
locale.setlocale(locale.LC_TIME, 'C')
# Override the built-in print function to avoid problem in Klipper due to locale settings
original_print = print
def print_with_c_locale(*args, **kwargs):
original_locale = locale.setlocale(locale.LC_ALL, None)
locale.setlocale(locale.LC_ALL, 'C')
original_print(*args, **kwargs)
locale.setlocale(locale.LC_ALL, original_locale)
print = print_with_c_locale
###################################################################### ######################################################################
# Computation of the PSD graph # Computation of the PSD graph
###################################################################### ######################################################################
# Calculate estimated "power spectral density" using existing Klipper tools
def calc_freq_response(data):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
return helper.process_accelerometer_data(data)
# Calculate or estimate a "similarity" factor between two PSD curves and scale it to a percentage. This is # Calculate or estimate a "similarity" factor between two PSD curves and scale it to a percentage. This is
# used here to quantify how close the two belts path behavior and responses are close together. # used here to quantify how close the two belts path behavior and responses are close together.
def compute_curve_similarity_factor(signal1, signal2): def compute_curve_similarity_factor(signal1, signal2):
@@ -92,29 +71,6 @@ def compute_curve_similarity_factor(signal1, signal2):
return scaled_similarity return scaled_similarity
# This find all the peaks in a curve by looking at when the derivative term goes from positive to negative
# Then only the peaks found above a threshold are kept to avoid capturing peaks in the low amplitude noise of a signal
def detect_peaks(psd, freqs, window_size=5, vicinity=3):
# Smooth the curve using a moving average to avoid catching peaks everywhere in noisy signals
kernel = np.ones(window_size) / window_size
smoothed_psd = np.convolve(psd, kernel, mode='valid')
mean_pad = [np.mean(psd[:window_size])] * (window_size // 2)
smoothed_psd = np.concatenate((mean_pad, smoothed_psd))
# Find peaks on the smoothed curve
smoothed_peaks = np.where((smoothed_psd[:-2] < smoothed_psd[1:-1]) & (smoothed_psd[1:-1] > smoothed_psd[2:]))[0] + 1
detection_threshold = PEAKS_DETECTION_THRESHOLD * psd.max()
smoothed_peaks = smoothed_peaks[smoothed_psd[smoothed_peaks] > detection_threshold]
# Refine peak positions on the original curve
refined_peaks = []
for peak in smoothed_peaks:
local_max = peak + np.argmax(psd[max(0, peak-vicinity):min(len(psd), peak+vicinity+1)]) - vicinity
refined_peaks.append(local_max)
return np.array(refined_peaks), freqs[refined_peaks]
# This function create pairs of peaks that are close in frequency on two curves (that are known # This function create pairs of peaks that are close in frequency on two curves (that are known
# to be resonances points and must be similar on both belts on a CoreXY kinematic) # to be resonances points and must be similar on both belts on a CoreXY kinematic)
def pair_peaks(peaks1, freqs1, psd1, peaks2, freqs2, psd2): def pair_peaks(peaks1, freqs1, psd1, peaks2, freqs2, psd2):
@@ -159,30 +115,6 @@ def pair_peaks(peaks1, freqs1, psd1, peaks2, freqs2, psd2):
return paired_peaks, unpaired_peaks1, unpaired_peaks2 return paired_peaks, unpaired_peaks1, unpaired_peaks2
######################################################################
# Computation of a basic signal spectrogram
######################################################################
def compute_spectrogram(data):
N = data.shape[0]
Fs = N / (data[-1, 0] - data[0, 0])
# Round up to a power of 2 for faster FFT
M = 1 << int(.5 * Fs - 1).bit_length()
window = np.kaiser(M, 6.)
def _specgram(x):
x_detrended = x - np.mean(x) # Detrending by subtracting the mean value
return scipy.signal.spectrogram(
x_detrended, fs=Fs, window=window, nperseg=M, noverlap=M//2,
detrend='constant', scaling='density', mode='psd')
d = {'x': data[:, 1], 'y': data[:, 2], 'z': data[:, 3]}
f, t, pdata = _specgram(d['x'])
for axis in 'yz':
pdata += _specgram(d[axis])[2]
return pdata, t, f
###################################################################### ######################################################################
# Computation of the differential spectrogram # Computation of the differential spectrogram
###################################################################### ######################################################################
@@ -198,7 +130,7 @@ def interpolate_2d(target_x, target_y, source_x, source_y, source_data):
source_values = source_data.flatten() source_values = source_data.flatten()
# Interpolate and reshape the interpolated data to match the target grid shape and replace NaN with zeros # Interpolate and reshape the interpolated data to match the target grid shape and replace NaN with zeros
interpolated_data = scipy.interpolate.griddata(source_points, source_values, target_points, method='nearest') interpolated_data = griddata(source_points, source_values, target_points, method='nearest')
interpolated_data = interpolated_data.reshape((len(target_y), len(target_x))) interpolated_data = interpolated_data.reshape((len(target_y), len(target_x)))
interpolated_data = np.nan_to_num(interpolated_data) interpolated_data = np.nan_to_num(interpolated_data)
@@ -208,14 +140,14 @@ def interpolate_2d(target_x, target_y, source_x, source_y, source_data):
# Main logic function to combine two similar spectrogram - ie. from both belts paths - by substracting signals in order to create # Main logic function to combine two similar spectrogram - ie. from both belts paths - by substracting signals in order to create
# a new composite spectrogram. This result of a divergent but mostly centered new spectrogram (center will be white) with some colored zones # a new composite spectrogram. This result of a divergent but mostly centered new spectrogram (center will be white) with some colored zones
# highlighting differences in the belts paths. The summative spectrogram is used for the MHI calculation. # highlighting differences in the belts paths. The summative spectrogram is used for the MHI calculation.
def combined_spectrogram(data1, data2): def compute_combined_spectrogram(data1, data2):
pdata1, bins1, t1 = compute_spectrogram(data1) pdata1, bins1, t1 = compute_spectrogram(data1)
pdata2, bins2, t2 = compute_spectrogram(data2) pdata2, bins2, t2 = compute_spectrogram(data2)
# Interpolate the spectrograms # Interpolate the spectrograms
pdata2_interpolated = interpolate_2d(bins1, t1, bins2, t2, pdata2) pdata2_interpolated = interpolate_2d(bins1, t1, bins2, t2, pdata2)
# Cobine them in two form: a summed diff for the MHI computation and a diverging diff for the spectrogram colors # Combine them in two form: a summed diff for the MHI computation and a diverging diff for the spectrogram colors
combined_sum = np.abs(pdata1 - pdata2_interpolated) combined_sum = np.abs(pdata1 - pdata2_interpolated)
combined_divergent = pdata1 - pdata2_interpolated combined_divergent = pdata1 - pdata2_interpolated
@@ -251,26 +183,27 @@ def compute_mhi(combined_data, similarity_coefficient, num_unpaired_peaks):
# LUT to transform the MHI into a textual value easy to understand for the users of the script # LUT to transform the MHI into a textual value easy to understand for the users of the script
def mhi_lut(mhi): def mhi_lut(mhi):
if 0 <= mhi <= 30: ranges = [
return "Excellent mechanical health" (0, 30, "Excellent mechanical health"),
elif 30 < mhi <= 45: (30, 45, "Good mechanical health"),
return "Good mechanical health" (45, 55, "Acceptable mechanical health"),
elif 45 < mhi <= 55: (55, 70, "Potential signs of a mechanical issue"),
return "Acceptable mechanical health" (70, 85, "Likely a mechanical issue"),
elif 55 < mhi <= 70: (85, 100, "Mechanical issue detected")
return "Potential signs of a mechanical issue" ]
elif 70 < mhi <= 85: for lower, upper, message in ranges:
return "Likely a mechanical issue" if lower < mhi <= upper:
elif 85 < mhi <= 100: return message
return "Mechanical issue detected"
return "Error computing MHI value"
###################################################################### ######################################################################
# Graphing # Graphing
###################################################################### ######################################################################
def plot_compare_frequency(ax, lognames, signal1, signal2, max_freq): def plot_compare_frequency(ax, lognames, signal1, signal2, similarity_factor, max_freq):
# Get the belt name for the legend to avoid putting the full file name # Get the belt name for the legend to avoid putting the full file name
signal1_belt = (lognames[0].split('/')[-1]).split('_')[-1][0] signal1_belt = (lognames[0].split('/')[-1]).split('_')[-1][0]
signal2_belt = (lognames[1].split('/')[-1]).split('_')[-1][0] signal2_belt = (lognames[1].split('/')[-1]).split('_')[-1][0]
@@ -282,7 +215,7 @@ def plot_compare_frequency(ax, lognames, signal1, signal2, max_freq):
signal1_belt += " (axis 1, 1)" signal1_belt += " (axis 1, 1)"
signal2_belt += " (axis 1,-1)" signal2_belt += " (axis 1,-1)"
else: else:
print("Warning: belts doesn't seem to have the correct name A and B (extracted from the filename.csv)") print_with_c_locale("Warning: belts doesn't seem to have the correct name A and B (extracted from the filename.csv)")
# Plot the two belts PSD signals # Plot the two belts PSD signals
ax.plot(signal1.freqs, signal1.psd, label="Belt " + signal1_belt, color=KLIPPAIN_COLORS['purple']) ax.plot(signal1.freqs, signal1.psd, label="Belt " + signal1_belt, color=KLIPPAIN_COLORS['purple'])
@@ -331,13 +264,11 @@ def plot_compare_frequency(ax, lognames, signal1, signal2, max_freq):
ha='left', fontsize=13, color='red', weight='bold') ha='left', fontsize=13, color='red', weight='bold')
unpaired_peak_count += 1 unpaired_peak_count += 1
# Compute the similarity (using cross-correlation of the PSD signals) # Add estimated similarity to the graph
ax2 = ax.twinx() # To split the legends in two box ax2 = ax.twinx() # To split the legends in two box
ax2.yaxis.set_visible(False) ax2.yaxis.set_visible(False)
similarity_factor = compute_curve_similarity_factor(signal1, signal2)
ax2.plot([], [], ' ', label=f'Estimated similarity: {similarity_factor:.1f}%') ax2.plot([], [], ' ', label=f'Estimated similarity: {similarity_factor:.1f}%')
ax2.plot([], [], ' ', label=f'Number of unpaired peaks: {unpaired_peak_count}') ax2.plot([], [], ' ', label=f'Number of unpaired peaks: {unpaired_peak_count}')
print(f"Belts estimated similarity: {similarity_factor:.1f}%")
# Setting axis parameters, grid and graph title # Setting axis parameters, grid and graph title
ax.set_xlabel('Frequency (Hz)') ax.set_xlabel('Frequency (Hz)')
@@ -371,25 +302,20 @@ def plot_compare_frequency(ax, lognames, signal1, signal2, max_freq):
ax.legend(loc='upper left', prop=fontP) ax.legend(loc='upper left', prop=fontP)
ax2.legend(loc='upper right', prop=fontP) ax2.legend(loc='upper right', prop=fontP)
return similarity_factor, unpaired_peak_count return
def plot_difference_spectrogram(ax, data1, data2, signal1, signal2, similarity_factor, max_freq): def plot_difference_spectrogram(ax, signal1, signal2, t, bins, combined_divergent, textual_mhi, max_freq):
combined_sum, combined_divergent, bins, t = combined_spectrogram(data1, data2)
# Compute the MHI value from the differential spectrogram sum of gradient, salted with
# the similarity factor and the number or unpaired peaks from the belts frequency profile
# Be careful, this value is highly opinionated and is pretty experimental!
mhi, textual_mhi = compute_mhi(combined_sum, similarity_factor, len(signal1.unpaired_peaks) + len(signal2.unpaired_peaks))
print(f"[experimental] Mechanical Health Indicator: {textual_mhi.lower()} ({mhi:.1f}%)")
ax.set_title(f"Differential Spectrogram", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold') ax.set_title(f"Differential Spectrogram", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold')
ax.plot([], [], ' ', label=f'{textual_mhi} (experimental)') ax.plot([], [], ' ', label=f'{textual_mhi} (experimental)')
# Draw the differential spectrogram with a specific custom norm to get orange or purple values where there is signal or white near zeros # Draw the differential spectrogram with a specific custom norm to get orange or purple values where there is signal or white near zeros
# imgshow is better suited here than pcolormesh since its result is already rasterized and we doesn't need to keep vector graphics
# when saving to a final .png file. Using it also allow to save ~150-200MB of RAM during the "fig.savefig" operation.
colors = [KLIPPAIN_COLORS['dark_orange'], KLIPPAIN_COLORS['orange'], 'white', KLIPPAIN_COLORS['purple'], KLIPPAIN_COLORS['dark_purple']] colors = [KLIPPAIN_COLORS['dark_orange'], KLIPPAIN_COLORS['orange'], 'white', KLIPPAIN_COLORS['purple'], KLIPPAIN_COLORS['dark_purple']]
cm = matplotlib.colors.LinearSegmentedColormap.from_list('klippain_divergent', list(zip([0, 0.25, 0.5, 0.75, 1], colors))) cm = matplotlib.colors.LinearSegmentedColormap.from_list('klippain_divergent', list(zip([0, 0.25, 0.5, 0.75, 1], colors)))
norm = matplotlib.colors.TwoSlopeNorm(vmin=np.min(combined_divergent), vcenter=0, vmax=np.max(combined_divergent)) norm = matplotlib.colors.TwoSlopeNorm(vmin=np.min(combined_divergent), vcenter=0, vmax=np.max(combined_divergent))
ax.pcolormesh(t, bins, combined_divergent.T, cmap=cm, norm=norm, shading='gouraud') ax.imshow(combined_divergent.T, cmap=cm, norm=norm, aspect='auto', extent=[t[0], t[-1], bins[0], bins[-1]], interpolation='bilinear', origin='lower')
ax.set_xlabel('Frequency (hz)') ax.set_xlabel('Frequency (hz)')
ax.set_xlim([0., max_freq]) ax.set_xlim([0., max_freq])
@@ -441,60 +367,62 @@ def sigmoid_scale(x, k=1):
# Original Klipper function to get the PSD data of a raw accelerometer signal # Original Klipper function to get the PSD data of a raw accelerometer signal
def compute_signal_data(data, max_freq): def compute_signal_data(data, max_freq):
calibration_data = calc_freq_response(data) helper = shaper_calibrate.ShaperCalibrate(printer=None)
calibration_data = helper.process_accelerometer_data(data)
freqs = calibration_data.freq_bins[calibration_data.freq_bins <= max_freq] freqs = calibration_data.freq_bins[calibration_data.freq_bins <= max_freq]
psd = calibration_data.get_psd('all')[calibration_data.freq_bins <= max_freq] psd = calibration_data.get_psd('all')[calibration_data.freq_bins <= max_freq]
peaks, _ = detect_peaks(psd, freqs)
_, peaks, _ = detect_peaks(psd, freqs, PEAKS_DETECTION_THRESHOLD * psd.max())
return SignalData(freqs=freqs, psd=psd, peaks=peaks, paired_peaks=None, unpaired_peaks=None) return SignalData(freqs=freqs, psd=psd, peaks=peaks, paired_peaks=None, unpaired_peaks=None)
###################################################################### ######################################################################
# Startup and main routines # Startup and main routines
###################################################################### ######################################################################
def parse_log(logname):
with open(logname) as f:
for header in f:
if not header.startswith('#'):
break
if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
# Raw accelerometer data
return np.loadtxt(logname, comments='#', delimiter=',')
# Power spectral density data or shaper calibration data
raise ValueError("File %s does not contain raw accelerometer data and therefore "
"is not supported by this script. Please use the official Klipper "
"graph_accelerometer.py script to process it instead." % (logname,))
def setup_klipper_import(kdir):
global shaper_calibrate
kdir = os.path.expanduser(kdir)
sys.path.append(os.path.join(kdir, 'klippy'))
shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
def belts_calibration(lognames, klipperdir="~/klipper", max_freq=200.): def belts_calibration(lognames, klipperdir="~/klipper", max_freq=200.):
setup_klipper_import(klipperdir) set_locale()
global shaper_calibrate
shaper_calibrate = setup_klipper_import(klipperdir)
# Parse data # Parse data
datas = [parse_log(fn) for fn in lognames] datas = [parse_log(fn) for fn in lognames]
if len(datas) > 2: if len(datas) > 2:
raise ValueError("Incorrect number of .csv files used (this function needs two files to compare them)") raise ValueError("Incorrect number of .csv files used (this function needs exactly two files to compare them)!")
# Compute calibration data for the two datasets with automatic peaks detection # Compute calibration data for the two datasets with automatic peaks detection
signal1 = compute_signal_data(datas[0], max_freq) signal1 = compute_signal_data(datas[0], max_freq)
signal2 = compute_signal_data(datas[1], max_freq) signal2 = compute_signal_data(datas[1], max_freq)
combined_sum, combined_divergent, bins, t = compute_combined_spectrogram(datas[0], datas[1])
del datas
# Pair the peaks across the two datasets # Pair the peaks across the two datasets
paired_peaks, unpaired_peaks1, unpaired_peaks2 = pair_peaks(signal1.peaks, signal1.freqs, signal1.psd, paired_peaks, unpaired_peaks1, unpaired_peaks2 = pair_peaks(signal1.peaks, signal1.freqs, signal1.psd,
signal2.peaks, signal2.freqs, signal2.psd) signal2.peaks, signal2.freqs, signal2.psd)
signal1 = signal1._replace(paired_peaks=paired_peaks, unpaired_peaks=unpaired_peaks1) signal1 = signal1._replace(paired_peaks = paired_peaks, unpaired_peaks = unpaired_peaks1)
signal2 = signal2._replace(paired_peaks=paired_peaks, unpaired_peaks=unpaired_peaks2) signal2 = signal2._replace(paired_peaks = paired_peaks, unpaired_peaks = unpaired_peaks2)
fig = matplotlib.pyplot.figure() # Compute the similarity (using cross-correlation of the PSD signals)
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[4, 3]) similarity_factor = compute_curve_similarity_factor(signal1, signal2)
ax1 = fig.add_subplot(gs[0]) print_with_c_locale(f"Belts estimated similarity: {similarity_factor:.1f}%")
ax2 = fig.add_subplot(gs[1]) # Compute the MHI value from the differential spectrogram sum of gradient, salted with the similarity factor and the number of
# unpaired peaks from the belts frequency profile. Be careful, this value is highly opinionated and is pretty experimental!
mhi, textual_mhi = compute_mhi(combined_sum, similarity_factor, len(signal1.unpaired_peaks) + len(signal2.unpaired_peaks))
print_with_c_locale(f"[experimental] Mechanical Health Indicator: {textual_mhi.lower()} ({mhi:.1f}%)")
# Create graph layout
fig, (ax1, ax2) = plt.subplots(2, 1, gridspec_kw={
'height_ratios':[4, 3],
'bottom':0.050,
'top':0.890,
'left':0.085,
'right':0.966,
'hspace':0.169,
'wspace':0.200
})
fig.set_size_inches(8.3, 11.6)
# Add title # Add title
title_line1 = "RELATIVE BELT CALIBRATION TOOL" title_line1 = "RELATIVE BELT CALIBRATION TOOL"
@@ -504,23 +432,24 @@ def belts_calibration(lognames, klipperdir="~/klipper", max_freq=200.):
dt = datetime.strptime(f"{filename.split('_')[1]} {filename.split('_')[2]}", "%Y%m%d %H%M%S") dt = datetime.strptime(f"{filename.split('_')[1]} {filename.split('_')[2]}", "%Y%m%d %H%M%S")
title_line2 = dt.strftime('%x %X') title_line2 = dt.strftime('%x %X')
except: except:
print("Warning: CSV filenames look to be different than expected (%s , %s)" % (lognames[0], lognames[1])) print_with_c_locale("Warning: CSV filenames look to be different than expected (%s , %s)" % (lognames[0], lognames[1]))
title_line2 = lognames[0].split('/')[-1] + " / " + lognames[1].split('/')[-1] title_line2 = lognames[0].split('/')[-1] + " / " + lognames[1].split('/')[-1]
fig.text(0.12, 0.957, title_line2, ha='left', va='top', fontsize=16, color=KLIPPAIN_COLORS['dark_purple']) fig.text(0.12, 0.957, title_line2, ha='left', va='top', fontsize=16, color=KLIPPAIN_COLORS['dark_purple'])
# Plot the graphs # Plot the graphs
similarity_factor, _ = plot_compare_frequency(ax1, lognames, signal1, signal2, max_freq) plot_compare_frequency(ax1, lognames, signal1, signal2, similarity_factor, max_freq)
plot_difference_spectrogram(ax2, datas[0], datas[1], signal1, signal2, similarity_factor, max_freq) plot_difference_spectrogram(ax2, signal1, signal2, t, bins, combined_divergent, textual_mhi, max_freq)
fig.set_size_inches(8.3, 11.6)
fig.tight_layout()
fig.subplots_adjust(top=0.89)
# Adding a small Klippain logo to the top left corner of the figure # Adding a small Klippain logo to the top left corner of the figure
ax_logo = fig.add_axes([0.001, 0.899, 0.1, 0.1], anchor='NW', zorder=-1) ax_logo = fig.add_axes([0.001, 0.8995, 0.1, 0.1], anchor='NW')
ax_logo.imshow(matplotlib.pyplot.imread(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'klippain.png'))) ax_logo.imshow(plt.imread(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'klippain.png')))
ax_logo.axis('off') ax_logo.axis('off')
# Adding Shake&Tune version in the top right corner
st_version = get_git_version()
if st_version is not None:
fig.text(0.995, 0.985, st_version, ha='right', va='bottom', fontsize=8, color=KLIPPAIN_COLORS['purple'])
return fig return fig
@@ -541,7 +470,7 @@ def main():
opts.error("You must specify an output file.png to use the script (option -o)") opts.error("You must specify an output file.png to use the script (option -o)")
fig = belts_calibration(args, options.klipperdir, options.max_freq) fig = belts_calibration(args, options.klipperdir, options.max_freq)
fig.savefig(options.output) fig.savefig(options.output, dpi=150)
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -14,17 +14,17 @@
################ !!! DO NOT EDIT BELOW THIS LINE !!! ################ ################ !!! DO NOT EDIT BELOW THIS LINE !!! ################
##################################################################### #####################################################################
import optparse, matplotlib, sys, importlib, os, math import optparse, matplotlib, os
from textwrap import wrap
import numpy as np
import scipy
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker, matplotlib.gridspec
import locale
from datetime import datetime from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager, matplotlib.ticker
matplotlib.use('Agg') matplotlib.use('Agg')
from locale_utils import set_locale, print_with_c_locale
from common_func import compute_mechanical_parameters, compute_spectrogram, detect_peaks, get_git_version, parse_log, setup_klipper_import
PEAKS_DETECTION_THRESHOLD = 0.05 PEAKS_DETECTION_THRESHOLD = 0.05
PEAKS_EFFECT_THRESHOLD = 0.12 PEAKS_EFFECT_THRESHOLD = 0.12
@@ -38,131 +38,36 @@ KLIPPAIN_COLORS = {
} }
# Set the best locale for time and date formating (generation of the titles)
try:
locale.setlocale(locale.LC_TIME, locale.getdefaultlocale())
except locale.Error:
locale.setlocale(locale.LC_TIME, 'C')
# Override the built-in print function to avoid problem in Klipper due to locale settings
original_print = print
def print_with_c_locale(*args, **kwargs):
original_locale = locale.setlocale(locale.LC_ALL, None)
locale.setlocale(locale.LC_ALL, 'C')
original_print(*args, **kwargs)
locale.setlocale(locale.LC_ALL, original_locale)
print = print_with_c_locale
###################################################################### ######################################################################
# Computation # Computation
###################################################################### ######################################################################
# Find the best shaper parameters using Klipper's official algorithm selection # Find the best shaper parameters using Klipper's official algorithm selection
def calibrate_shaper_with_damping(datas, max_smoothing): def calibrate_shaper(datas, max_smoothing):
helper = shaper_calibrate.ShaperCalibrate(printer=None) helper = shaper_calibrate.ShaperCalibrate(printer=None)
calibration_data = helper.process_accelerometer_data(datas)
calibration_data = helper.process_accelerometer_data(datas[0])
for data in datas[1:]:
calibration_data.add_data(helper.process_accelerometer_data(data))
calibration_data.normalize_to_frequencies() calibration_data.normalize_to_frequencies()
shaper, all_shapers = helper.find_best_shaper(calibration_data, max_smoothing, print) shaper, all_shapers = helper.find_best_shaper(calibration_data, max_smoothing, print_with_c_locale)
fr, zeta, _ = compute_mechanical_parameters(calibration_data.psd_sum, calibration_data.freq_bins)
freqs = calibration_data.freq_bins print_with_c_locale("Recommended shaper is %s @ %.1f Hz" % (shaper.name, shaper.freq))
psd = calibration_data.psd_sum print_with_c_locale("Axis has a main resonant frequency at %.1fHz with an estimated damping ratio of %.3f" % (fr, zeta))
fr, zeta = compute_damping_ratio(psd, freqs)
print("Recommended shaper is %s @ %.1f Hz" % (shaper.name, shaper.freq))
print("Axis has a main resonant frequency at %.1fHz with an estimated damping ratio of %.3f" % (fr, zeta))
return shaper.name, all_shapers, calibration_data, fr, zeta return shaper.name, all_shapers, calibration_data, fr, zeta
# Compute damping ratio by using the half power bandwidth method with interpolated frequencies
def compute_damping_ratio(psd, freqs):
max_power_index = np.argmax(psd)
fr = freqs[max_power_index]
max_power = psd[max_power_index]
half_power = max_power / math.sqrt(2)
idx_below = np.where(psd[:max_power_index] <= half_power)[0][-1]
idx_above = np.where(psd[max_power_index:] <= half_power)[0][0] + max_power_index
freq_below_half_power = freqs[idx_below] + (half_power - psd[idx_below]) * (freqs[idx_below + 1] - freqs[idx_below]) / (psd[idx_below + 1] - psd[idx_below])
freq_above_half_power = freqs[idx_above - 1] + (half_power - psd[idx_above - 1]) * (freqs[idx_above] - freqs[idx_above - 1]) / (psd[idx_above] - psd[idx_above - 1])
bandwidth = freq_above_half_power - freq_below_half_power
zeta = bandwidth / (2 * fr)
return fr, zeta
def compute_spectrogram(data):
N = data.shape[0]
Fs = N / (data[-1, 0] - data[0, 0])
# Round up to a power of 2 for faster FFT
M = 1 << int(.5 * Fs - 1).bit_length()
window = np.kaiser(M, 6.)
def _specgram(x):
x_detrended = x - np.mean(x) # Detrending by subtracting the mean value
return scipy.signal.spectrogram(
x_detrended, fs=Fs, window=window, nperseg=M, noverlap=M//2,
detrend='constant', scaling='density', mode='psd')
d = {'x': data[:, 1], 'y': data[:, 2], 'z': data[:, 3]}
f, t, pdata = _specgram(d['x'])
for axis in 'yz':
pdata += _specgram(d[axis])[2]
return pdata, t, f
# This find all the peaks in a curve by looking at when the derivative term goes from positive to negative
# Then only the peaks found above a threshold are kept to avoid capturing peaks in the low amplitude noise of a signal
# An added "virtual" threshold allow me to quantify in an opiniated way the peaks that "could have" effect on the printer
# behavior and are likely known to produce or contribute to the ringing/ghosting in printed parts
def detect_peaks(psd, freqs, window_size=5, vicinity=3):
# Smooth the curve using a moving average to avoid catching peaks everywhere in noisy signals
kernel = np.ones(window_size) / window_size
smoothed_psd = np.convolve(psd, kernel, mode='valid')
mean_pad = [np.mean(psd[:window_size])] * (window_size // 2)
smoothed_psd = np.concatenate((mean_pad, smoothed_psd))
# Find peaks on the smoothed curve
smoothed_peaks = np.where((smoothed_psd[:-2] < smoothed_psd[1:-1]) & (smoothed_psd[1:-1] > smoothed_psd[2:]))[0] + 1
detection_threshold = PEAKS_DETECTION_THRESHOLD * psd.max()
effect_threshold = PEAKS_EFFECT_THRESHOLD * psd.max()
smoothed_peaks = smoothed_peaks[smoothed_psd[smoothed_peaks] > detection_threshold]
# Refine peak positions on the original curve
refined_peaks = []
for peak in smoothed_peaks:
local_max = peak + np.argmax(psd[max(0, peak-vicinity):min(len(psd), peak+vicinity+1)]) - vicinity
refined_peaks.append(local_max)
peak_freqs = ["{:.1f}".format(f) for f in freqs[refined_peaks]]
num_peaks = len(refined_peaks)
num_peaks_above_effect_threshold = np.sum(psd[refined_peaks] > effect_threshold)
print("Peaks detected on the graph: %d @ %s Hz (%d above effect threshold)" % (num_peaks, ", ".join(map(str, peak_freqs)), num_peaks_above_effect_threshold))
return np.array(refined_peaks), num_peaks, num_peaks_above_effect_threshold
###################################################################### ######################################################################
# Graphing # Graphing
###################################################################### ######################################################################
def plot_freq_response_with_damping(ax, calibration_data, shapers, performance_shaper, fr, zeta, max_freq): def plot_freq_response(ax, calibration_data, shapers, performance_shaper, peaks, peaks_freqs, peaks_threshold, fr, zeta, max_freq):
freqs = calibration_data.freq_bins freqs = calibration_data.freqs
psd = calibration_data.psd_sum[freqs <= max_freq] psd = calibration_data.psd_sum
px = calibration_data.psd_x[freqs <= max_freq] px = calibration_data.psd_x
py = calibration_data.psd_y[freqs <= max_freq] py = calibration_data.psd_y
pz = calibration_data.psd_z[freqs <= max_freq] pz = calibration_data.psd_z
freqs = freqs[freqs <= max_freq]
fontP = matplotlib.font_manager.FontProperties() fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('x-small') fontP.set_size('x-small')
@@ -171,7 +76,7 @@ def plot_freq_response_with_damping(ax, calibration_data, shapers, performance_s
ax.set_ylabel('Power spectral density') ax.set_ylabel('Power spectral density')
ax.set_ylim([0, psd.max() + psd.max() * 0.05]) ax.set_ylim([0, psd.max() + psd.max() * 0.05])
ax.plot(freqs, psd, label='X+Y+Z', color='purple') ax.plot(freqs, psd, label='X+Y+Z', color='purple', zorder=5)
ax.plot(freqs, px, label='X', color='red') ax.plot(freqs, px, label='X', color='red')
ax.plot(freqs, py, label='Y', color='green') ax.plot(freqs, py, label='Y', color='green')
ax.plot(freqs, pz, label='Z', color='blue') ax.plot(freqs, pz, label='Z', color='blue')
@@ -232,13 +137,9 @@ def plot_freq_response_with_damping(ax, calibration_data, shapers, performance_s
# Draw the detected peaks and name them # Draw the detected peaks and name them
# This also draw the detection threshold and warning threshold (aka "effect zone") # This also draw the detection threshold and warning threshold (aka "effect zone")
peaks, _, _ = detect_peaks(psd, freqs) ax.plot(peaks_freqs, psd[peaks], "x", color='black', markersize=8)
peaks_warning_threshold = PEAKS_DETECTION_THRESHOLD * psd.max()
peaks_effect_threshold = PEAKS_EFFECT_THRESHOLD * psd.max()
ax.plot(freqs[peaks], psd[peaks], "x", color='black', markersize=8)
for idx, peak in enumerate(peaks): for idx, peak in enumerate(peaks):
if psd[peak] > peaks_effect_threshold: if psd[peak] > peaks_threshold[1]:
fontcolor = 'red' fontcolor = 'red'
fontweight = 'bold' fontweight = 'bold'
else: else:
@@ -247,46 +148,48 @@ def plot_freq_response_with_damping(ax, calibration_data, shapers, performance_s
ax.annotate(f"{idx+1}", (freqs[peak], psd[peak]), ax.annotate(f"{idx+1}", (freqs[peak], psd[peak]),
textcoords="offset points", xytext=(8, 5), textcoords="offset points", xytext=(8, 5),
ha='left', fontsize=13, color=fontcolor, weight=fontweight) ha='left', fontsize=13, color=fontcolor, weight=fontweight)
ax.axhline(y=peaks_warning_threshold, color='black', linestyle='--', linewidth=0.5) ax.axhline(y=peaks_threshold[0], color='black', linestyle='--', linewidth=0.5)
ax.axhline(y=peaks_effect_threshold, color='black', linestyle='--', linewidth=0.5) ax.axhline(y=peaks_threshold[1], color='black', linestyle='--', linewidth=0.5)
ax.fill_between(freqs, 0, peaks_warning_threshold, color='green', alpha=0.15, label='Relax Region') ax.fill_between(freqs, 0, peaks_threshold[0], color='green', alpha=0.15, label='Relax Region')
ax.fill_between(freqs, peaks_warning_threshold, peaks_effect_threshold, color='orange', alpha=0.2, label='Warning Region') ax.fill_between(freqs, peaks_threshold[0], peaks_threshold[1], color='orange', alpha=0.2, label='Warning Region')
# Add the main resonant frequency and damping ratio of the axis to the graph title # Add the main resonant frequency and damping ratio of the axis to the graph title
ax.set_title("Axis Frequency Profile (ω0=%.1fHz, ζ=%.3f)" % (fr, zeta), fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold') ax.set_title("Axis Frequency Profile (ω0=%.1fHz, ζ=%.3f)" % (fr, zeta), fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold')
ax.legend(loc='upper left', prop=fontP) ax.legend(loc='upper left', prop=fontP)
ax2.legend(loc='upper right', prop=fontP) ax2.legend(loc='upper right', prop=fontP)
return freqs[peaks] return
# Plot a time-frequency spectrogram to see how the system respond over time during the # Plot a time-frequency spectrogram to see how the system respond over time during the
# resonnance test. This can highlight hidden spots from the standard PSD graph from other harmonics # resonnance test. This can highlight hidden spots from the standard PSD graph from other harmonics
def plot_spectrogram(ax, data, peaks, max_freq): def plot_spectrogram(ax, t, bins, pdata, peaks, max_freq):
pdata, bins, t = compute_spectrogram(data) ax.set_title("Time-Frequency Spectrogram", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold')
# We need to normalize the data to get a proper signal on the spectrogram # We need to normalize the data to get a proper signal on the spectrogram
# However, while using "LogNorm" provide too much background noise, using # However, while using "LogNorm" provide too much background noise, using
# "Normalize" make only the resonnance appearing and hide interesting elements # "Normalize" make only the resonnance appearing and hide interesting elements
# So we need to filter out the lower part of the data (ie. find the proper vmin for LogNorm) # So we need to filter out the lower part of the data (ie. find the proper vmin for LogNorm)
vmin_value = np.percentile(pdata, SPECTROGRAM_LOW_PERCENTILE_FILTER) vmin_value = np.percentile(pdata, SPECTROGRAM_LOW_PERCENTILE_FILTER)
ax.set_title("Time-Frequency Spectrogram", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold') # Draw the spectrogram using imgshow that is better suited here than pcolormesh since its result is already rasterized and
ax.pcolormesh(t, bins, pdata.T, norm=matplotlib.colors.LogNorm(vmin=vmin_value), # we doesn't need to keep vector graphics when saving to a final .png file. Using it also allow to
cmap='inferno', shading='gouraud') # save ~150-200MB of RAM during the "fig.savefig" operation.
cm = 'inferno'
# Add peaks lines in the spectrogram to get hint from peaks found in the first graph norm = matplotlib.colors.LogNorm(vmin=vmin_value)
if peaks is not None: ax.imshow(pdata.T, norm=norm, cmap=cm, aspect='auto', extent=[t[0], t[-1], bins[0], bins[-1]], origin='lower', interpolation='antialiased')
for idx, peak in enumerate(peaks):
ax.axvline(peak, color='cyan', linestyle='dotted', linewidth=0.75)
ax.annotate(f"Peak {idx+1}", (peak, t[-1]*0.9),
textcoords="data", color='cyan', rotation=90, fontsize=10,
verticalalignment='top', horizontalalignment='right')
ax.set_xlim([0., max_freq]) ax.set_xlim([0., max_freq])
ax.set_ylabel('Time (s)') ax.set_ylabel('Time (s)')
ax.set_xlabel('Frequency (Hz)') ax.set_xlabel('Frequency (Hz)')
# Add peaks lines in the spectrogram to get hint from peaks found in the first graph
if peaks is not None:
for idx, peak in enumerate(peaks):
ax.axvline(peak, color='cyan', linestyle='dotted', linewidth=1)
ax.annotate(f"Peak {idx+1}", (peak, bins[-1]*0.9),
textcoords="data", color='cyan', rotation=90, fontsize=10,
verticalalignment='top', horizontalalignment='right')
return return
@@ -295,40 +198,52 @@ def plot_spectrogram(ax, data, peaks, max_freq):
# Startup and main routines # Startup and main routines
###################################################################### ######################################################################
def parse_log(logname):
with open(logname) as f:
for header in f:
if not header.startswith('#'):
break
if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
# Raw accelerometer data
return np.loadtxt(logname, comments='#', delimiter=',')
# Power spectral density data or shaper calibration data
raise ValueError("File %s does not contain raw accelerometer data and therefore "
"is not supported by this script. Please use the official Klipper "
"calibrate_shaper.py script to process it instead." % (logname,))
def setup_klipper_import(kdir):
global shaper_calibrate
kdir = os.path.expanduser(kdir)
sys.path.append(os.path.join(kdir, 'klippy'))
shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
def shaper_calibration(lognames, klipperdir="~/klipper", max_smoothing=None, max_freq=200.): def shaper_calibration(lognames, klipperdir="~/klipper", max_smoothing=None, max_freq=200.):
setup_klipper_import(klipperdir) set_locale()
global shaper_calibrate
shaper_calibrate = setup_klipper_import(klipperdir)
# Parse data # Parse data
datas = [parse_log(fn) for fn in lognames] datas = [parse_log(fn) for fn in lognames]
if len(datas) > 1:
print_with_c_locale("Warning: incorrect number of .csv files detected. Only the first one will be used!")
# Calibrate shaper and generate outputs # Compute shapers, PSD outputs and spectrogram
performance_shaper, shapers, calibration_data, fr, zeta = calibrate_shaper_with_damping(datas, max_smoothing) performance_shaper, shapers, calibration_data, fr, zeta = calibrate_shaper(datas[0], max_smoothing)
pdata, bins, t = compute_spectrogram(datas[0])
del datas
fig = matplotlib.pyplot.figure() # Select only the relevant part of the PSD data
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[4, 3]) freqs = calibration_data.freq_bins
ax1 = fig.add_subplot(gs[0]) calibration_data.psd_sum = calibration_data.psd_sum[freqs <= max_freq]
ax2 = fig.add_subplot(gs[1]) calibration_data.psd_x = calibration_data.psd_x[freqs <= max_freq]
calibration_data.psd_y = calibration_data.psd_y[freqs <= max_freq]
calibration_data.psd_z = calibration_data.psd_z[freqs <= max_freq]
calibration_data.freqs = freqs[freqs <= max_freq]
# Peak detection algorithm
peaks_threshold = [
PEAKS_DETECTION_THRESHOLD * calibration_data.psd_sum.max(),
PEAKS_EFFECT_THRESHOLD * calibration_data.psd_sum.max()
]
num_peaks, peaks, peaks_freqs = detect_peaks(calibration_data.psd_sum, calibration_data.freqs, peaks_threshold[0])
# Print the peaks info in the console
peak_freqs_formated = ["{:.1f}".format(f) for f in peaks_freqs]
num_peaks_above_effect_threshold = np.sum(calibration_data.psd_sum[peaks] > peaks_threshold[1])
print_with_c_locale("Peaks detected on the graph: %d @ %s Hz (%d above effect threshold)" % (num_peaks, ", ".join(map(str, peak_freqs_formated)), num_peaks_above_effect_threshold))
# Create graph layout
fig, (ax1, ax2) = plt.subplots(2, 1, gridspec_kw={
'height_ratios':[4, 3],
'bottom':0.050,
'top':0.890,
'left':0.085,
'right':0.966,
'hspace':0.169,
'wspace':0.200
})
fig.set_size_inches(8.3, 11.6)
# Add title # Add title
title_line1 = "INPUT SHAPER CALIBRATION TOOL" title_line1 = "INPUT SHAPER CALIBRATION TOOL"
@@ -338,23 +253,24 @@ def shaper_calibration(lognames, klipperdir="~/klipper", max_smoothing=None, max
dt = datetime.strptime(f"{filename_parts[1]} {filename_parts[2]}", "%Y%m%d %H%M%S") dt = datetime.strptime(f"{filename_parts[1]} {filename_parts[2]}", "%Y%m%d %H%M%S")
title_line2 = dt.strftime('%x %X') + ' -- ' + filename_parts[3].upper().split('.')[0] + ' axis' title_line2 = dt.strftime('%x %X') + ' -- ' + filename_parts[3].upper().split('.')[0] + ' axis'
except: except:
print("Warning: CSV filename look to be different than expected (%s)" % (lognames[0])) print_with_c_locale("Warning: CSV filename look to be different than expected (%s)" % (lognames[0]))
title_line2 = lognames[0].split('/')[-1] title_line2 = lognames[0].split('/')[-1]
fig.text(0.12, 0.957, title_line2, ha='left', va='top', fontsize=16, color=KLIPPAIN_COLORS['dark_purple']) fig.text(0.12, 0.957, title_line2, ha='left', va='top', fontsize=16, color=KLIPPAIN_COLORS['dark_purple'])
# Plot the graphs # Plot the graphs
peaks = plot_freq_response_with_damping(ax1, calibration_data, shapers, performance_shaper, fr, zeta, max_freq) plot_freq_response(ax1, calibration_data, shapers, performance_shaper, peaks, peaks_freqs, peaks_threshold, fr, zeta, max_freq)
plot_spectrogram(ax2, datas[0], peaks, max_freq) plot_spectrogram(ax2, t, bins, pdata, peaks_freqs, max_freq)
fig.set_size_inches(8.3, 11.6)
fig.tight_layout()
fig.subplots_adjust(top=0.89)
# Adding a small Klippain logo to the top left corner of the figure # Adding a small Klippain logo to the top left corner of the figure
ax_logo = fig.add_axes([0.001, 0.899, 0.1, 0.1], anchor='NW', zorder=-1) ax_logo = fig.add_axes([0.001, 0.8995, 0.1, 0.1], anchor='NW')
ax_logo.imshow(matplotlib.pyplot.imread(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'klippain.png'))) ax_logo.imshow(plt.imread(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'klippain.png')))
ax_logo.axis('off') ax_logo.axis('off')
# Adding Shake&Tune version in the top right corner
st_version = get_git_version()
if st_version is not None:
fig.text(0.995, 0.985, st_version, ha='right', va='bottom', fontsize=8, color=KLIPPAIN_COLORS['purple'])
return fig return fig
@@ -379,7 +295,7 @@ def main():
opts.error("Too small max_smoothing specified (must be at least 0.05)") opts.error("Too small max_smoothing specified (must be at least 0.05)")
fig = shaper_calibration(args, options.klipperdir, options.max_smoothing, options.max_freq) fig = shaper_calibration(args, options.klipperdir, options.max_smoothing, options.max_freq)
fig.savefig(options.output) fig.savefig(options.output, dpi=150)
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -11,16 +11,18 @@
################ !!! DO NOT EDIT BELOW THIS LINE !!! ################ ################ !!! DO NOT EDIT BELOW THIS LINE !!! ################
##################################################################### #####################################################################
import optparse, matplotlib, re, sys, importlib, os, operator import optparse, matplotlib, re, os, operator
from datetime import datetime
from collections import OrderedDict from collections import OrderedDict
import numpy as np import numpy as np
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager import matplotlib.pyplot as plt
import matplotlib.ticker, matplotlib.gridspec import matplotlib.font_manager, matplotlib.ticker, matplotlib.gridspec
import locale
from datetime import datetime
matplotlib.use('Agg') matplotlib.use('Agg')
from locale_utils import set_locale, print_with_c_locale
from common_func import compute_mechanical_parameters, detect_peaks, get_git_version, parse_log, setup_klipper_import
PEAKS_DETECTION_THRESHOLD = 0.05 PEAKS_DETECTION_THRESHOLD = 0.05
PEAKS_RELATIVE_HEIGHT_THRESHOLD = 0.04 PEAKS_RELATIVE_HEIGHT_THRESHOLD = 0.04
@@ -28,44 +30,29 @@ VALLEY_DETECTION_THRESHOLD = 0.1 # Lower is more sensitive
KLIPPAIN_COLORS = { KLIPPAIN_COLORS = {
"purple": "#70088C", "purple": "#70088C",
"orange": "#FF8D32",
"dark_purple": "#150140", "dark_purple": "#150140",
"dark_orange": "#F24130" "dark_orange": "#F24130",
"red_pink": "#F2055C"
} }
# Set the best locale for time and date formating (generation of the titles)
try:
locale.setlocale(locale.LC_TIME, locale.getdefaultlocale())
except locale.Error:
locale.setlocale(locale.LC_TIME, 'C')
# Override the built-in print function to avoid problem in Klipper due to locale settings
original_print = print
def print_with_c_locale(*args, **kwargs):
original_locale = locale.setlocale(locale.LC_ALL, None)
locale.setlocale(locale.LC_ALL, 'C')
original_print(*args, **kwargs)
locale.setlocale(locale.LC_ALL, original_locale)
print = print_with_c_locale
###################################################################### ######################################################################
# Computation # Computation
###################################################################### ######################################################################
# Call to the official Klipper input shaper object to do the PSD computation
def calc_freq_response(data): def calc_freq_response(data):
# Use Klipper standard input shaper objects to do the computation
helper = shaper_calibrate.ShaperCalibrate(printer=None) helper = shaper_calibrate.ShaperCalibrate(printer=None)
return helper.process_accelerometer_data(data) return helper.process_accelerometer_data(data)
def calc_psd(datas, group, max_freq): def compute_vibration_spectrogram(datas, group, max_freq):
psd_list = [] psd_list = []
first_freqs = None first_freqs = None
signal_axes = ['x', 'y', 'z', 'all'] signal_axes = ['x', 'y', 'z', 'all']
for i in range(0, len(datas), group): for i in range(0, len(datas), group):
# Round up to the nearest power of 2 for faster FFT # Round up to the nearest power of 2 for faster FFT
N = datas[i].shape[0] N = datas[i].shape[0]
T = datas[i][-1,0] - datas[i][0,0] T = datas[i][-1,0] - datas[i][0,0]
@@ -116,56 +103,42 @@ def calc_psd(datas, group, max_freq):
pz = signal_normalized['z'][first_freqs <= max_freq] pz = signal_normalized['z'][first_freqs <= max_freq]
psd_list.append([psd, px, py, pz]) psd_list.append([psd, px, py, pz])
return first_freqs[first_freqs <= max_freq], psd_list return np.array(first_freqs[first_freqs <= max_freq]), np.array(psd_list)
def calc_powertot(psd_list, freqs): def compute_speed_profile(speeds, freqs, psd_list):
pwrtot_sum = [] # Preallocate arrays as psd_list is known and consistent
pwrtot_x = [] pwrtot_sum = np.zeros(len(psd_list))
pwrtot_y = [] pwrtot_x = np.zeros(len(psd_list))
pwrtot_z = [] pwrtot_y = np.zeros(len(psd_list))
pwrtot_z = np.zeros(len(psd_list))
for psd in psd_list: for i, psd in enumerate(psd_list):
pwrtot_sum.append(np.trapz(psd[0], freqs)) pwrtot_sum[i] = np.trapz(psd[0], freqs)
pwrtot_x.append(np.trapz(psd[1], freqs)) pwrtot_x[i] = np.trapz(psd[1], freqs)
pwrtot_y.append(np.trapz(psd[2], freqs)) pwrtot_y[i] = np.trapz(psd[2], freqs)
pwrtot_z.append(np.trapz(psd[3], freqs)) pwrtot_z[i] = np.trapz(psd[3], freqs)
return [pwrtot_sum, pwrtot_x, pwrtot_y, pwrtot_z]
# This find all the peaks in a curve by looking at when the derivative term goes from positive to negative
# Then only the peaks found above a threshold are kept to avoid capturing peaks in the low amplitude noise of a signal
# Additionaly, we validate that a peak is a real peak based of its neighbors as we can have pretty flat zones in vibration
# graphs with a lot of false positive due to small "noise" in these flat zones
def detect_peaks(power_total, speeds, window_size=10, vicinity=10):
# Smooth the curve using a moving average to avoid catching peaks everywhere in noisy signals
kernel = np.ones(window_size) / window_size
smoothed_psd = np.convolve(power_total, kernel, mode='valid')
mean_pad = [np.mean(power_total[:window_size])] * (window_size // 2)
smoothed_psd = np.concatenate((mean_pad, smoothed_psd))
# Find peaks on the smoothed curve (and excluding the last value of the serie often detected when in a flat zone)
smoothed_peaks = np.where((smoothed_psd[:-3] < smoothed_psd[1:-2]) & (smoothed_psd[1:-2] > smoothed_psd[2:-1]))[0] + 1
detection_threshold = PEAKS_DETECTION_THRESHOLD * power_total.max()
valid_peaks = [] # Resample the signals to get a better detection of the valleys of low energy
for peak in smoothed_peaks: # and avoid getting limited by the speed increment defined by the user
peak_height = smoothed_psd[peak] - np.min(smoothed_psd[max(0, peak-vicinity):min(len(smoothed_psd), peak+vicinity+1)]) resampled_speeds, resampled_power_sum = resample_signal(speeds, pwrtot_sum)
if peak_height > PEAKS_RELATIVE_HEIGHT_THRESHOLD * smoothed_psd[peak] and smoothed_psd[peak] > detection_threshold: _, resampled_pwrtot_x = resample_signal(speeds, pwrtot_x)
valid_peaks.append(peak) _, resampled_pwrtot_y = resample_signal(speeds, pwrtot_y)
_, resampled_pwrtot_z = resample_signal(speeds, pwrtot_z)
# Refine peak positions on the original curve
refined_peaks = []
for peak in valid_peaks:
local_max = peak + np.argmax(power_total[max(0, peak-vicinity):min(len(power_total), peak+vicinity+1)]) - vicinity
refined_peaks.append(local_max)
peak_speeds = ["{:.1f}".format(speeds[i]) for i in refined_peaks] return resampled_speeds, [resampled_power_sum, resampled_pwrtot_x, resampled_pwrtot_y, resampled_pwrtot_z]
num_peaks = len(refined_peaks)
print("Vibrations peaks detected: %d @ %s mm/s (avoid running these speeds in your slicer profile)" % (num_peaks, ", ".join(map(str, peak_speeds))))
return np.array(refined_peaks), num_peaks
def compute_motor_profile(power_spectral_densities):
# Sum the PSD across all speeds for each frequency of the spectrogram. Basically this
# is equivalent to sum up all the spectrogram column by column to plot the total on the right
motor_total_vibration = np.sum([psd[0] for psd in power_spectral_densities], axis=0)
# Then a very little smoothing of the signal is applied to avoid too much noise and sharp peaks on it and simplify
# the resonance frequency and damping ratio estimation later on. Also, too much smoothing is bad and would alter the results
smoothed_motor_total_vibration = np.convolve(motor_total_vibration, np.ones(10)/10, mode='same')
return smoothed_motor_total_vibration
# The goal is to find zone outside of peaks (flat low energy zones) to advise them as good speeds range to use in the slicer # The goal is to find zone outside of peaks (flat low energy zones) to advise them as good speeds range to use in the slicer
@@ -213,44 +186,39 @@ def identify_low_energy_zones(power_total):
def resample_signal(speeds, power_total, new_spacing=0.1): def resample_signal(speeds, power_total, new_spacing=0.1):
new_speeds = np.arange(speeds[0], speeds[-1] + new_spacing, new_spacing) new_speeds = np.arange(speeds[0], speeds[-1] + new_spacing, new_spacing)
new_power_total = np.interp(new_speeds, speeds, power_total) new_power_total = np.interp(new_speeds, speeds, power_total)
return new_speeds, new_power_total return np.array(new_speeds), np.array(new_power_total)
###################################################################### ######################################################################
# Graphing # Graphing
###################################################################### ######################################################################
def plot_total_power(ax, speeds, power_total): def plot_speed_profile(ax, speeds, power_total, num_peaks, peaks, low_energy_zones):
resampled_speeds, resampled_power_total = resample_signal(speeds, power_total[0]) # For this function, we have two array for the speeds. Indeed, since the power total sum was resampled to better detect
# the valleys of low energy later on, we also need the resampled speed array to plot it. For the rest
ax.set_title("Vibrations decomposition", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold') ax.set_title("Machine speed profile", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold')
ax.set_xlabel('Speed (mm/s)') ax.set_xlabel('Speed (mm/s)')
ax.set_ylabel('Energy') ax.set_ylabel('Energy')
ax2 = ax.twinx() ax2 = ax.twinx()
ax2.yaxis.set_visible(False) ax2.yaxis.set_visible(False)
power_total_sum = np.array(resampled_power_total) max_y = power_total[0].max() + power_total[0].max() * 0.05
speed_array = np.array(resampled_speeds) ax.set_xlim([speeds.min(), speeds.max()])
max_y = power_total_sum.max() + power_total_sum.max() * 0.05
ax.set_xlim([speed_array.min(), speed_array.max()])
ax.set_ylim([0, max_y]) ax.set_ylim([0, max_y])
ax2.set_ylim([0, max_y]) ax2.set_ylim([0, max_y])
ax.plot(resampled_speeds, resampled_power_total, label="X+Y+Z", color='purple') ax.plot(speeds, power_total[0], label="X+Y+Z", color='purple', zorder=5)
ax.plot(speeds, power_total[1], label="X", color='red') ax.plot(speeds, power_total[1], label="X", color='red')
ax.plot(speeds, power_total[2], label="Y", color='green') ax.plot(speeds, power_total[2], label="Y", color='green')
ax.plot(speeds, power_total[3], label="Z", color='blue') ax.plot(speeds, power_total[3], label="Z", color='blue')
peaks, num_peaks = detect_peaks(resampled_power_total, resampled_speeds)
low_energy_zones = identify_low_energy_zones(resampled_power_total)
if peaks.size: if peaks.size:
ax.plot(speed_array[peaks], power_total_sum[peaks], "x", color='black', markersize=8) ax.plot(speeds[peaks], power_total[0][peaks], "x", color='black', markersize=8)
for idx, peak in enumerate(peaks): for idx, peak in enumerate(peaks):
fontcolor = 'red' fontcolor = 'red'
fontweight = 'bold' fontweight = 'bold'
ax.annotate(f"{idx+1}", (speed_array[peak], power_total_sum[peak]), ax.annotate(f"{idx+1}", (speeds[peak], power_total[0][peak]),
textcoords="offset points", xytext=(8, 5), textcoords="offset points", xytext=(8, 5),
ha='left', fontsize=13, color=fontcolor, weight=fontweight) ha='left', fontsize=13, color=fontcolor, weight=fontweight)
ax2.plot([], [], ' ', label=f'Number of peaks: {num_peaks}') ax2.plot([], [], ' ', label=f'Number of peaks: {num_peaks}')
@@ -258,9 +226,9 @@ def plot_total_power(ax, speeds, power_total):
ax2.plot([], [], ' ', label=f'No peaks detected') ax2.plot([], [], ' ', label=f'No peaks detected')
for idx, (start, end, energy) in enumerate(low_energy_zones): for idx, (start, end, energy) in enumerate(low_energy_zones):
ax.axvline(speed_array[start], color='red', linestyle='dotted', linewidth=1.5) ax.axvline(speeds[start], color='red', linestyle='dotted', linewidth=1.5)
ax.axvline(speed_array[end], color='red', linestyle='dotted', linewidth=1.5) ax.axvline(speeds[end], color='red', linestyle='dotted', linewidth=1.5)
ax2.fill_between(speed_array[start:end], 0, power_total_sum[start:end], color='green', alpha=0.2, label=f'Zone {idx+1}: {speed_array[start]:.1f} to {speed_array[end]:.1f} mm/s (mean energy: {energy:.2f}%)') ax2.fill_between(speeds[start:end], 0, power_total[0][start:end], color='green', alpha=0.2, label=f'Zone {idx+1}: {speeds[start]:.1f} to {speeds[end]:.1f} mm/s (mean energy: {energy:.2f}%)')
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
@@ -271,22 +239,23 @@ def plot_total_power(ax, speeds, power_total):
ax.legend(loc='upper left', prop=fontP) ax.legend(loc='upper left', prop=fontP)
ax2.legend(loc='upper right', prop=fontP) ax2.legend(loc='upper right', prop=fontP)
if peaks.size: return
return speed_array[peaks]
else:
return None
def plot_spectrogram(ax, speeds, freqs, power_spectral_densities, peaks, max_freq): def plot_vibration_spectrogram(ax, speeds, freqs, power_spectral_densities, peaks, fr, max_freq):
# Prepare the spectrum data
spectrum = np.empty([len(freqs), len(speeds)]) spectrum = np.empty([len(freqs), len(speeds)])
for i in range(len(speeds)): for i in range(len(speeds)):
for j in range(len(freqs)): for j in range(len(freqs)):
spectrum[j, i] = power_spectral_densities[i][0][j] spectrum[j, i] = power_spectral_densities[i][0][j]
ax.set_title("Vibrations spectrogram", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold') ax.set_title("Vibrations spectrogram", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold')
ax.pcolormesh(speeds, freqs, spectrum, norm=matplotlib.colors.LogNorm(), # ax.pcolormesh(speeds, freqs, spectrum, norm=matplotlib.colors.LogNorm(),
cmap='inferno', shading='gouraud') # cmap='inferno', shading='gouraud')
ax.imshow(spectrum, norm=matplotlib.colors.LogNorm(), cmap='inferno',
aspect='auto', extent=[speeds[0], speeds[-1], freqs[0], freqs[-1]],
origin='lower', interpolation='antialiased')
# Add peaks lines in the spectrogram to get hint from peaks found in the first graph # Add peaks lines in the spectrogram to get hint from peaks found in the first graph
if peaks is not None: if peaks is not None:
@@ -296,6 +265,13 @@ def plot_spectrogram(ax, speeds, freqs, power_spectral_densities, peaks, max_fre
textcoords="data", color='cyan', rotation=90, fontsize=10, textcoords="data", color='cyan', rotation=90, fontsize=10,
verticalalignment='top', horizontalalignment='right') verticalalignment='top', horizontalalignment='right')
# Add motor resonance line
if fr is not None and fr > 25:
ax.axhline(fr, color='cyan', linestyle='dotted', linewidth=1)
ax.annotate(f"Motor resonance", (speeds[-1]*0.95, fr+2),
textcoords="data", color='cyan', fontsize=10,
verticalalignment='bottom', horizontalalignment='right')
ax.set_ylim([0., max_freq]) ax.set_ylim([0., max_freq])
ax.set_ylabel('Frequency (hz)') ax.set_ylabel('Frequency (hz)')
ax.set_xlabel('Speed (mm/s)') ax.set_xlabel('Speed (mm/s)')
@@ -303,24 +279,47 @@ def plot_spectrogram(ax, speeds, freqs, power_spectral_densities, peaks, max_fre
return return
def plot_motor_profile(ax, freqs, motor_vibration_power, motor_fr, motor_zeta, motor_max_power_index):
ax.set_title("Motors frequency profile", fontsize=14, color=KLIPPAIN_COLORS['dark_orange'], weight='bold')
ax.set_xlabel('Energy')
ax.set_ylabel('Frequency (hz)')
ax2 = ax.twinx()
ax2.yaxis.set_visible(False)
ax.set_ylim([freqs.min(), freqs.max()])
ax.set_xlim([0, motor_vibration_power.max() + motor_vibration_power.max() * 0.1])
# Plot the profile curve
ax.plot(motor_vibration_power, freqs, color=KLIPPAIN_COLORS['orange'])
# Tag the resonance peak
ax.plot(motor_vibration_power[motor_max_power_index], freqs[motor_max_power_index], "x", color='black', markersize=8)
fontcolor = KLIPPAIN_COLORS['purple']
fontweight = 'bold'
ax.annotate(f"R", (motor_vibration_power[motor_max_power_index], freqs[motor_max_power_index]),
textcoords="offset points", xytext=(8, 8),
ha='right', fontsize=13, color=fontcolor, weight=fontweight)
# Add the legend
ax2.plot([], [], ' ', label="Motor resonant frequency (ω0): %.1fHz" % (motor_fr))
ax2.plot([], [], ' ', label="Motor damping ratio (ζ): %.3f" % (motor_zeta))
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.grid(which='major', color='grey')
ax.grid(which='minor', color='lightgrey')
fontP = matplotlib.font_manager.FontProperties()
fontP.set_size('small')
ax2.legend(loc='upper right', prop=fontP)
return
###################################################################### ######################################################################
# Startup and main routines # Startup and main routines
###################################################################### ######################################################################
def parse_log(logname):
with open(logname) as f:
for header in f:
if not header.startswith('#'):
break
if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
# Raw accelerometer data
return np.loadtxt(logname, comments='#', delimiter=',')
# Power spectral density data or shaper calibration data
raise ValueError("File %s does not contain raw accelerometer data and therefore "
"is not supported by this script. Please use the official Klipper"
"calibrate_shaper.py script to process it instead." % (logname,))
def extract_speed(logname): def extract_speed(logname):
try: try:
speed = re.search('sp(.+?)n', os.path.basename(logname)).group(1).replace('_','.') speed = re.search('sp(.+?)n', os.path.basename(logname)).group(1).replace('_','.')
@@ -334,69 +333,104 @@ def sort_and_slice(raw_speeds, raw_datas, remove):
# Sort to get the speeds and their datas aligned and in ascending order # Sort to get the speeds and their datas aligned and in ascending order
raw_speeds, raw_datas = zip(*sorted(zip(raw_speeds, raw_datas), key=operator.itemgetter(0))) raw_speeds, raw_datas = zip(*sorted(zip(raw_speeds, raw_datas), key=operator.itemgetter(0)))
# Remove beginning and end of the datas for each file to get only # Optionally remove the beginning and end of each data file to get only
# constant speed data and remove the start/stop phase of the movements # the constant speed part of the segments and remove the start/stop phase
datas = [] sliced_datas = []
for data in raw_datas: for data in raw_datas:
sliced = round((len(data) * remove / 100) / 2) sliced = round((len(data) * remove / 100) / 2)
datas.append(data[sliced:len(data)-sliced]) sliced_datas.append(data[sliced:len(data)-sliced])
return raw_speeds, datas return raw_speeds, sliced_datas
def setup_klipper_import(kdir): def vibrations_calibration(lognames, klipperdir="~/klipper", axisname=None, accel=None, max_freq=1000., remove=0):
set_locale()
global shaper_calibrate global shaper_calibrate
kdir = os.path.expanduser(kdir) shaper_calibrate = setup_klipper_import(klipperdir)
sys.path.append(os.path.join(kdir, 'klippy'))
shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
def vibrations_calibration(lognames, klipperdir="~/klipper", axisname=None, max_freq=1000., remove=0):
setup_klipper_import(klipperdir)
# Parse the raw data and get them ready for analysis # Parse the raw data and get them ready for analysis
raw_datas = [parse_log(filename) for filename in lognames] raw_datas = [parse_log(filename) for filename in lognames]
raw_speeds = [extract_speed(filename) for filename in lognames] raw_speeds = [extract_speed(filename) for filename in lognames]
speeds, datas = sort_and_slice(raw_speeds, raw_datas, remove) speeds, datas = sort_and_slice(raw_speeds, raw_datas, remove)
del raw_datas, raw_speeds
# As we assume that we have the same number of file for each speeds. We can group # As we assume that we have the same number of file for each speed increment, we can group
# the PSD results by this number (to combine vibrations at given speed on all movements) # the PSD results by this number (to combine all the segments of the pattern at a constant speed)
group_by = speeds.count(speeds[0]) group_by = speeds.count(speeds[0])
# Compute psd and total power of the signal
freqs, power_spectral_densities = calc_psd(datas, group_by, max_freq)
power_total = calc_powertot(power_spectral_densities, freqs)
fig = matplotlib.pyplot.figure()
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[4, 3])
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
title_line1 = "VIBRATIONS MEASUREMENT TOOL"
fig.text(0.12, 0.965, title_line1, ha='left', va='bottom', fontsize=20, color=KLIPPAIN_COLORS['purple'], weight='bold')
try:
filename_parts = (lognames[0].split('/')[-1]).split('_')
dt = datetime.strptime(f"{filename_parts[1]} {filename_parts[2].split('-')[0]}", "%Y%m%d %H%M%S")
title_line2 = dt.strftime('%x %X') + ' -- ' + axisname.upper() + ' axis'
except:
print("Warning: CSV filename look to be different than expected (%s)" % (lognames[0]))
title_line2 = lognames[0].split('/')[-1]
fig.text(0.12, 0.957, title_line2, ha='left', va='top', fontsize=16, color=KLIPPAIN_COLORS['dark_purple'])
# Remove speeds duplicates and graph the processed datas # Remove speeds duplicates and graph the processed datas
speeds = list(OrderedDict((x, True) for x in speeds).keys()) speeds = list(OrderedDict((x, True) for x in speeds).keys())
peaks = plot_total_power(ax1, speeds, power_total) # Compute speed profile, vibration spectrogram and motor resonance profile
plot_spectrogram(ax2, speeds, freqs, power_spectral_densities, peaks, max_freq) freqs, psd = compute_vibration_spectrogram(datas, group_by, max_freq)
upsampled_speeds, speeds_powers = compute_speed_profile(speeds, freqs, psd)
motor_vibration_power = compute_motor_profile(psd)
fig.set_size_inches(8.3, 11.6) # Peak detection and low energy valleys (good speeds) identification between the peaks
fig.tight_layout() num_peaks, vibration_peaks, peaks_speeds = detect_peaks(
fig.subplots_adjust(top=0.89) speeds_powers[0], upsampled_speeds,
PEAKS_DETECTION_THRESHOLD * speeds_powers[0].max(),
PEAKS_RELATIVE_HEIGHT_THRESHOLD, 10, 10
)
low_energy_zones = identify_low_energy_zones(speeds_powers[0])
# Print the vibration peaks info in the console
formated_peaks_speeds = ["{:.1f}".format(pspeed) for pspeed in peaks_speeds]
print_with_c_locale("Vibrations peaks detected: %d @ %s mm/s (avoid setting a speed near these values in your slicer print profile)" % (num_peaks, ", ".join(map(str, formated_peaks_speeds))))
# Motor resonance estimation
motor_fr, motor_zeta, motor_max_power_index = compute_mechanical_parameters(motor_vibration_power, freqs)
if motor_fr > 25:
print_with_c_locale("Motors have a main resonant frequency at %.1fHz with an estimated damping ratio of %.3f" % (motor_fr, motor_zeta))
else:
print_with_c_locale("The detected resonance frequency of the motors is too low (%.1fHz). This is probably due to the test run with too high acceleration!" % motor_fr)
print_with_c_locale("Try lowering the ACCEL value before restarting the macro to ensure that only constant speeds are recorded and that the dynamic behavior in the corners is not impacting the measurements.")
# Create graph layout
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, gridspec_kw={
'height_ratios':[4, 3],
'width_ratios':[5, 3],
'bottom':0.050,
'top':0.890,
'left':0.057,
'right':0.985,
'hspace':0.166,
'wspace':0.138
})
ax2.remove() # top right graph is not used and left blank for now...
fig.set_size_inches(14, 11.6)
# Add title
title_line1 = "VIBRATIONS MEASUREMENT TOOL"
fig.text(0.075, 0.965, title_line1, ha='left', va='bottom', fontsize=20, color=KLIPPAIN_COLORS['purple'], weight='bold')
try:
filename_parts = (lognames[0].split('/')[-1]).split('_')
dt = datetime.strptime(f"{filename_parts[1]} {filename_parts[2].split('-')[0]}", "%Y%m%d %H%M%S")
title_line2 = dt.strftime('%x %X')
if axisname is not None:
title_line2 += ' -- ' + str(axisname).upper() + ' axis'
if accel is not None:
title_line2 += ' at ' + str(accel) + ' mm/s²'
except:
print_with_c_locale("Warning: CSV filename look to be different than expected (%s)" % (lognames[0]))
title_line2 = lognames[0].split('/')[-1]
fig.text(0.075, 0.957, title_line2, ha='left', va='top', fontsize=16, color=KLIPPAIN_COLORS['dark_purple'])
# Plot the graphs
plot_speed_profile(ax1, upsampled_speeds, speeds_powers, num_peaks, vibration_peaks, low_energy_zones)
plot_motor_profile(ax4, freqs, motor_vibration_power, motor_fr, motor_zeta, motor_max_power_index)
plot_vibration_spectrogram(ax3, speeds, freqs, psd, peaks_speeds, motor_fr, max_freq)
# Adding a small Klippain logo to the top left corner of the figure # Adding a small Klippain logo to the top left corner of the figure
ax_logo = fig.add_axes([0.001, 0.899, 0.1, 0.1], anchor='NW', zorder=-1) ax_logo = fig.add_axes([0.001, 0.924, 0.075, 0.075], anchor='NW')
ax_logo.imshow(matplotlib.pyplot.imread(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'klippain.png'))) ax_logo.imshow(plt.imread(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'klippain.png')))
ax_logo.axis('off') ax_logo.axis('off')
# Adding Shake&Tune version in the top right corner
st_version = get_git_version()
if st_version is not None:
fig.text(0.995, 0.985, st_version, ha='right', va='bottom', fontsize=8, color=KLIPPAIN_COLORS['purple'])
return fig return fig
@@ -407,11 +441,13 @@ def main():
opts.add_option("-o", "--output", type="string", dest="output", opts.add_option("-o", "--output", type="string", dest="output",
default=None, help="filename of output graph") default=None, help="filename of output graph")
opts.add_option("-a", "--axis", type="string", dest="axisname", opts.add_option("-a", "--axis", type="string", dest="axisname",
default=None, help="axis name to be shown on the side of the graph") default=None, help="axis name to be printed on the graph")
opts.add_option("-c", "--accel", type="int", dest="accel",
default=None, help="accel value to be printed on the graph")
opts.add_option("-f", "--max_freq", type="float", default=1000., opts.add_option("-f", "--max_freq", type="float", default=1000.,
help="maximum frequency to graph") help="maximum frequency to graph")
opts.add_option("-r", "--remove", type="int", default=0, opts.add_option("-r", "--remove", type="int", default=0,
help="percentage of data removed at start/end of each files") help="percentage of data removed at start/end of each CSV files")
opts.add_option("-k", "--klipper_dir", type="string", dest="klipperdir", opts.add_option("-k", "--klipper_dir", type="string", dest="klipperdir",
default="~/klipper", help="main klipper directory") default="~/klipper", help="main klipper directory")
options, args = opts.parse_args() options, args = opts.parse_args()
@@ -422,8 +458,8 @@ def main():
if options.remove > 50 or options.remove < 0: if options.remove > 50 or options.remove < 0:
opts.error("You must specify a correct percentage (option -r) in the 0-50 range") opts.error("You must specify a correct percentage (option -r) in the 0-50 range")
fig = vibrations_calibration(args, options.klipperdir, options.axisname, options.max_freq, options.remove) fig = vibrations_calibration(args, options.klipperdir, options.axisname, options.accel, options.max_freq, options.remove)
fig.savefig(options.output) fig.savefig(options.output, dpi=150)
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -5,14 +5,11 @@
############################################ ############################################
# Written by Frix_x#0161 # # Written by Frix_x#0161 #
# Usage: # This script is designed to be used with gcode_shell_commands directly from Klipper
# This script was designed to be used with gcode_shell_commands directly from Klipper # Use the provided Shake&Tune macros instead!
# Parameters availables:
# BELTS - To generate belts diagrams after calling the Klipper TEST_RESONANCES AXIS=1,(-)1 OUTPUT=raw_data
# SHAPER - To generate input shaper diagrams after calling the Klipper TEST_RESONANCES AXIS=X/Y OUTPUT=raw_data
# VIBRATIONS - To generate vibration diagram after calling the custom (Frix_x#0161) VIBRATIONS_CALIBRATION macro
import optparse
import os import os
import time import time
import glob import glob
@@ -24,7 +21,6 @@ from datetime import datetime
################################################################################################################# #################################################################################################################
RESULTS_FOLDER = os.path.expanduser('~/printer_data/config/K-ShakeTune_results') RESULTS_FOLDER = os.path.expanduser('~/printer_data/config/K-ShakeTune_results')
KLIPPER_FOLDER = os.path.expanduser('~/klipper') KLIPPER_FOLDER = os.path.expanduser('~/klipper')
STORE_RESULTS = 3
################################################################################################################# #################################################################################################################
from graph_belts import belts_calibration from graph_belts import belts_calibration
@@ -51,7 +47,7 @@ def is_file_open(filepath):
return False return False
def create_belts_graph(): def create_belts_graph(keep_csv):
current_date = datetime.now().strftime('%Y%m%d_%H%M%S') current_date = datetime.now().strftime('%Y%m%d_%H%M%S')
lognames = [] lognames = []
@@ -86,12 +82,18 @@ def create_belts_graph():
# Generate the belts graph and its name # Generate the belts graph and its name
fig = belts_calibration(lognames, KLIPPER_FOLDER) fig = belts_calibration(lognames, KLIPPER_FOLDER)
png_filename = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[0], f'belts_{current_date}.png') png_filename = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[0], f'belts_{current_date}.png')
fig.savefig(png_filename, dpi=150)
# Remove the CSV files if the user don't want to keep them
if not keep_csv:
for csv in lognames:
if os.path.exists(csv):
os.remove(csv)
fig.savefig(png_filename)
return return
def create_shaper_graph(): def create_shaper_graph(keep_csv):
current_date = datetime.now().strftime('%Y%m%d_%H%M%S') current_date = datetime.now().strftime('%Y%m%d_%H%M%S')
# Get all the files and sort them based on last modified time to select the most recent one # Get all the files and sort them based on last modified time to select the most recent one
@@ -120,16 +122,21 @@ def create_shaper_graph():
# Generate the shaper graph and its name # Generate the shaper graph and its name
fig = shaper_calibration([new_file], KLIPPER_FOLDER) fig = shaper_calibration([new_file], KLIPPER_FOLDER)
png_filename = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[1], f'resonances_{current_date}_{axis}.png') png_filename = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[1], f'resonances_{current_date}_{axis}.png')
fig.savefig(png_filename, dpi=150)
# Remove the CSV file if the user don't want to keep it
if not keep_csv:
if os.path.exists(new_file):
os.remove(new_file)
fig.savefig(png_filename) return axis
return
def create_vibrations_graph(axis_name): def create_vibrations_graph(axis_name, accel, chip_name, keep_csv):
current_date = datetime.now().strftime('%Y%m%d_%H%M%S') current_date = datetime.now().strftime('%Y%m%d_%H%M%S')
lognames = [] lognames = []
globbed_files = glob.glob('/tmp/adxl345-*.csv') globbed_files = glob.glob(f'/tmp/{chip_name}-*.csv')
if not globbed_files: if not globbed_files:
print("No CSV files found in the /tmp folder to create the vibration graphs!") print("No CSV files found in the /tmp folder to create the vibration graphs!")
sys.exit(1) sys.exit(1)
@@ -143,7 +150,7 @@ def create_vibrations_graph(axis_name):
time.sleep(2) time.sleep(2)
# Cleanup of the filename and moving it in the result folder # Cleanup of the filename and moving it in the result folder
cleanfilename = os.path.basename(filename).replace('adxl345', f'vibr_{current_date}') cleanfilename = os.path.basename(filename).replace(chip_name, f'vibr_{current_date}')
new_file = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], cleanfilename) new_file = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], cleanfilename)
shutil.move(filename, new_file) shutil.move(filename, new_file)
@@ -155,25 +162,30 @@ def create_vibrations_graph(axis_name):
time.sleep(5) time.sleep(5)
# Generate the vibration graph and its name # Generate the vibration graph and its name
fig = vibrations_calibration(lognames, KLIPPER_FOLDER, axis_name) fig = vibrations_calibration(lognames, KLIPPER_FOLDER, axis_name, accel)
png_filename = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], f'vibrations_{current_date}_{axis_name}.png') png_filename = os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], f'vibrations_{current_date}_{axis_name}.png')
fig.savefig(png_filename, dpi=150)
# Archive all the csv files in a tarball and remove them to clean up the results folder # Archive all the csv files in a tarball in case the user want to keep them
with tarfile.open(os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], f'vibrations_{current_date}_{axis_name}.tar.gz'), 'w:gz') as tar: if keep_csv:
for csv_file in glob.glob(os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], f'vibr_{current_date}*.csv')): with tarfile.open(os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[2], f'vibrations_{current_date}_{axis_name}.tar.gz'), 'w:gz') as tar:
tar.add(csv_file, recursive=False) for csv_file in lognames:
tar.add(csv_file, recursive=False)
# Remove the remaining CSV files not needed anymore (tarball is safe if it was created)
for csv_file in lognames:
if os.path.exists(csv_file):
os.remove(csv_file) os.remove(csv_file)
fig.savefig(png_filename)
return return
def find_axesmap(accel): def find_axesmap(accel, chip_name):
current_date = datetime.now().strftime('%Y%m%d_%H%M%S') current_date = datetime.now().strftime('%Y%m%d_%H%M%S')
result_filename = os.path.join(RESULTS_FOLDER, f'axes_map_{current_date}.txt') result_filename = os.path.join(RESULTS_FOLDER, f'axes_map_{current_date}.txt')
lognames = [] lognames = []
globbed_files = glob.glob('/tmp/adxl345-*.csv') globbed_files = glob.glob(f'/tmp/{chip_name}-*.csv')
if not globbed_files: if not globbed_files:
print("No CSV files found in the /tmp folder to analyze and find the axes_map!") print("No CSV files found in the /tmp folder to analyze and find the axes_map!")
sys.exit(1) sys.exit(1)
@@ -201,10 +213,10 @@ def get_old_files(folder, extension, limit):
files.sort(key=lambda x: os.path.getmtime(x), reverse=True) files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
return files[limit:] return files[limit:]
def clean_files(): def clean_files(keep_results):
# Define limits based on STORE_RESULTS # Define limits based on STORE_RESULTS
keep1 = STORE_RESULTS + 1 keep1 = keep_results + 1
keep2 = 2 * STORE_RESULTS + 1 keep2 = 2 * keep_results + 1
# Find old files in each directory # Find old files in each directory
old_belts_files = get_old_files(os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[0]), '.png', keep1) old_belts_files = get_old_files(os.path.join(RESULTS_FOLDER, RESULTS_SUBFOLDERS[0]), '.png', keep1)
@@ -236,31 +248,51 @@ def clean_files():
def main(): def main():
# Check if results folders are there or create them # Parse command-line arguments
usage = "%prog [options] <logs>"
opts = optparse.OptionParser(usage)
opts.add_option("-t", "--type", type="string", dest="type",
default=None, help="type of output graph to produce")
opts.add_option("--accel", type="int", default=None, dest="accel_used",
help="acceleration used during the vibration macro or axesmap macro")
opts.add_option("--axis_name", type="string", default=None, dest="axis_name",
help="axis tested during the vibration macro")
opts.add_option("--chip_name", type="string", default="adxl345", dest="chip_name",
help="accelerometer chip name in klipper used during the vibration macro or the axesmap macro")
opts.add_option("-n", "--keep_results", type="int", default=3, dest="keep_results",
help="number of results to keep in the result folder after each run of the script")
opts.add_option("-c", "--keep_csv", action="store_true", default=False, dest="keep_csv",
help="weither or not to keep the CSV files alongside the PNG graphs image results")
options, args = opts.parse_args()
if options.type is None:
opts.error("You must specify the type of output graph you want to produce (option -t)")
elif options.type.lower() is None or options.type.lower() not in ['belts', 'shaper', 'vibrations', 'axesmap', 'clean']:
opts.error("Type of output graph need to be in the list of 'belts', 'shaper', 'vibrations', 'axesmap' or 'clean'")
else:
graph_mode = options.type
# Check if results folders are there or create them before doing anything else
for result_subfolder in RESULTS_SUBFOLDERS: for result_subfolder in RESULTS_SUBFOLDERS:
folder = os.path.join(RESULTS_FOLDER, result_subfolder) folder = os.path.join(RESULTS_FOLDER, result_subfolder)
if not os.path.exists(folder): if not os.path.exists(folder):
os.makedirs(folder) os.makedirs(folder)
if len(sys.argv) < 2: if graph_mode.lower() == 'belts':
print("Usage: is_workflow.py [BELTS|SHAPER|VIBRATIONS|AXESMAP]") create_belts_graph(keep_csv=options.keep_csv)
sys.exit(1) print(f"Belt graph created. You will find the results in {RESULTS_FOLDER}/{RESULTS_SUBFOLDERS[0]}")
elif graph_mode.lower() == 'shaper':
if sys.argv[1].lower() == 'belts': axis = create_shaper_graph(keep_csv=options.keep_csv)
create_belts_graph() print(f"{axis} input shaper graph created. You will find the results in {RESULTS_FOLDER}/{RESULTS_SUBFOLDERS[1]}")
elif sys.argv[1].lower() == 'shaper': elif graph_mode.lower() == 'vibrations':
create_shaper_graph() create_vibrations_graph(axis_name=options.axis_name, accel=options.accel_used, chip_name=options.chip_name, keep_csv=options.keep_csv)
elif sys.argv[1].lower() == 'vibrations': print(f"{options.axis_name} vibration graph created. You will find the results in {RESULTS_FOLDER}/{RESULTS_SUBFOLDERS[2]}")
create_vibrations_graph(axis_name=sys.argv[2]) elif graph_mode.lower() == 'axesmap':
elif sys.argv[1].lower() == 'axesmap': print(f"WARNING: AXES_MAP_CALIBRATION is currently very experimental and may produce incorrect results... Please validate the output!")
find_axesmap(accel=sys.argv[2]) find_axesmap(accel=options.accel_used, chip_name=options.chip_name)
else: elif graph_mode.lower() == 'clean':
print("Usage: is_workflow.py [BELTS|SHAPER|VIBRATIONS|AXESMAP]") print(f"Cleaning output folder to keep only the last {options.keep_results} results...")
sys.exit(1) clean_files(keep_results=options.keep_results)
clean_files()
print(f"Graphs created. You will find the results in {RESULTS_FOLDER}")
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
# Special utility functions to manage locale settings and printing
# Written by Frix_x#0161 #
import locale
# Set the best locale for time and date formating (generation of the titles)
def set_locale():
try:
current_locale = locale.getlocale(locale.LC_TIME)
if current_locale is None or current_locale[0] is None:
locale.setlocale(locale.LC_TIME, 'C')
except locale.Error:
locale.setlocale(locale.LC_TIME, 'C')
# Print function to avoid problem in Klipper console (that doesn't support special characters) due to locale settings
def print_with_c_locale(*args, **kwargs):
try:
original_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, 'C')
except locale.Error as e:
print("Warning: Failed to set a basic locale. Special characters may not display correctly in Klipper console:", e)
finally:
print(*args, **kwargs) # Proceed with printing regardless of locale setting success
try:
locale.setlocale(locale.LC_ALL, original_locale)
except locale.Error as e:
print("Warning: Failed to restore the original locale setting:", e)

View File

@@ -21,19 +21,15 @@ Check out the **[detailed documentation of the Shake&Tune module here](./docs/RE
Follow these steps to install the Shake&Tune module in your printer: Follow these steps to install the Shake&Tune module in your printer:
1. Be sure to have a working accelerometer on your machine. You can follow the official [Measuring Resonances Klipper documentation](https://www.klipper3d.org/Measuring_Resonances.html) to configure one. Validate with an `ACCELEROMETER_QUERY` command that everything works correctly. 1. Be sure to have a working accelerometer on your machine. You can follow the official [Measuring Resonances Klipper documentation](https://www.klipper3d.org/Measuring_Resonances.html) to configure one. Validate with an `ACCELEROMETER_QUERY` command that everything works correctly.
1. Install the system libraries that are needed to run the custom Python scripts: 1. Install the Shake&Tune package by running over SSH on your printer:
```bash
sudo apt update && sudo apt install python3-venv libopenblas-dev libatlas-base-dev -y
```
1. Then, you can install the Shake&Tune package by running over SSH on your printer:
```bash ```bash
wget -O - https://raw.githubusercontent.com/Frix-x/klippain-shaketune/main/install.sh | bash wget -O - https://raw.githubusercontent.com/Frix-x/klippain-shaketune/main/install.sh | bash
``` ```
1. Finally, append the following to your `printer.cfg` file and restart Klipper (if prefered, you can include only the needed macros: using `*.cfg` is a convenient way to include them all at once): 1. Then, append the following to your `printer.cfg` file and restart Klipper (if prefered, you can include only the needed macros: using `*.cfg` is a convenient way to include them all at once):
``` ```
[include K-ShakeTune/*.cfg] [include K-ShakeTune/*.cfg]
``` ```
1. Optionally, if you want to get automatic updates, add the following to your `moonraker.cfg` file: 1. Finally, if you want to get automatic updates, add the following to your `moonraker.cfg` file:
``` ```
[update_manager Klippain-ShakeTune] [update_manager Klippain-ShakeTune]
type: git_repo type: git_repo
@@ -44,10 +40,7 @@ Follow these steps to install the Shake&Tune module in your printer:
managed_services: klipper managed_services: klipper
install_script: install.sh install_script: install.sh
``` ```
> **Note**:
>
> If already using my old IS workflow scripts, please remove everything before installing this new module. This include the macros, the Python scripts, the `plot_graph.sh` and the `[gcode_shell_command plot_graph]` section that are not needed anymore.
## Usage ## Usage

Binary file not shown.

Before

Width:  |  Height:  |  Size: 389 KiB

After

Width:  |  Height:  |  Size: 404 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

View File

@@ -15,6 +15,8 @@ Then, call the `AXES_SHAPER_CALIBRATION` macro and look for the graphs in the re
|FREQ_END|133|Maximum excitation frequency| |FREQ_END|133|Maximum excitation frequency|
|HZ_PER_SEC|1|Number of Hz per seconds for the test| |HZ_PER_SEC|1|Number of Hz per seconds for the test|
|AXIS|"all"|Axis you want to test in the list of "all", "X" or "Y"| |AXIS|"all"|Axis you want to test in the list of "all", "X" or "Y"|
|KEEP_N_RESULTS|3|Total number of results to keep in the result folder after running the test. The older results are automatically cleaned up|
|KEEP_CSV|True|Weither or not to keep the CSV data file alonside the PNG graphs|
## Graphs description ## Graphs description

View File

@@ -14,6 +14,8 @@ Then, call the `BELTS_SHAPER_CALIBRATION` macro and look for the graphs in the r
|FREQ_START|5|Starting excitation frequency| |FREQ_START|5|Starting excitation frequency|
|FREQ_END|133|Maximum excitation frequency| |FREQ_END|133|Maximum excitation frequency|
|HZ_PER_SEC|1|Number of Hz per seconds for the test| |HZ_PER_SEC|1|Number of Hz per seconds for the test|
|KEEP_N_RESULTS|3|Total number of results to keep in the result folder after running the test. The older results are automatically cleaned up|
|KEEP_CSV|True|Weither or not to keep the CSV data files alonside the PNG graphs|
## Graphs description ## Graphs description

View File

@@ -22,11 +22,14 @@ Call the `VIBRATIONS_CALIBRATION` macro with the direction and speed range you w
|SPEED_INCREMENT|2|speed increments of the toolhead in mm/s between every movements| |SPEED_INCREMENT|2|speed increments of the toolhead in mm/s between every movements|
|TRAVEL_SPEED|200|speed in mm/s used for all the travels moves| |TRAVEL_SPEED|200|speed in mm/s used for all the travels moves|
|ACCEL_CHIP|"adxl345"|accelerometer chip name in the config| |ACCEL_CHIP|"adxl345"|accelerometer chip name in the config|
|KEEP_N_RESULTS|3|Total number of results to keep in the result folder after running the test. The older results are automatically cleaned up|
|KEEP_CSV|True|Weither or not to keep the CSV data files alonside the PNG graphs (archived in a tarball)|
## Graphs description ## Graphs description
![](../images/vibrations_graphs/vibration_graph_explanation.png) ![](../images/vibrations_graphs/vibration_graph_explanation.png)
![](../images/vibrations_graphs/vibration_graph_explanation2.png)
## Improving the results ## Improving the results

View File

@@ -27,6 +27,32 @@ function preflight_checks {
echo "[ERROR] Klipper service not found, please install Klipper first!" echo "[ERROR] Klipper service not found, please install Klipper first!"
exit -1 exit -1
fi fi
install_package_requirements
}
# Function to check if a package is installed
function is_package_installed {
dpkg -s "$1" &> /dev/null
return $?
}
function install_package_requirements {
packages=("python3-venv" "libopenblas-dev" "libatlas-base-dev")
packages_to_install=""
for package in "${packages[@]}"; do
if is_package_installed "$package"; then
echo "$package is already installed"
else
packages_to_install="$packages_to_install $package"
fi
done
if [ -n "$packages_to_install" ]; then
echo "Installing missing packages: $packages_to_install"
sudo apt update && sudo apt install -y $packages_to_install
fi
} }
function check_download { function check_download {

View File

@@ -1,12 +1,4 @@
contourpy==1.2.0 GitPython==3.1.40
cycler==0.12.1
fonttools==4.45.1
kiwisolver==1.4.5
matplotlib==3.8.2 matplotlib==3.8.2
numpy==1.26.2 numpy==1.26.2
packaging==23.2
Pillow==10.1.0
pyparsing==3.1.1
python-dateutil==2.8.2
scipy==1.11.4 scipy==1.11.4
six==1.16.0