The apparatus and its analysis were introduced in the previous section. The procedure for the experiment is as follows:
Within the normal sections of the report, please include the following:
First, we import the necessary libraries and configure the plotting parameters. Please install the following packages: - numpy - matplotlib - lvm_read - simulation
Here is a command to install the packages:
pip install numpy matplotlib lvm_readNow import them into the script:
import numpy as np
import matplotlib.pyplot as plt
# Fix for NumPy compatibility with lvm_read
import sys
import numpy.core.numeric as _numeric
sys.modules['numpy._core.numeric'] = _numeric
import lvm_read
from simulation import simulate, simulation_interpolate, L, dx, n, get_sensor_positionsSensor configuration (example with 8 sensors):
Sensor spacing: 15 mm
Sensor positions: [ 97.5 112.5 127.5 142.5 157.5 172.5 187.5 202.5] mm
Load the experimental data from the LVM file format and extract the relevant measurements. The data contains temperature readings from multiple sensors along the rod during heating and cooling.
# Load experimental data
data = lvm_read.read('test.lvm')
data = data[0]['data']
# The data is duplicated because the entire array was saved at each step.
# Find the last occurrence of iteration 0 to get the start of clean data
i_0 = np.where(data[:, 0] == 0)[0][-1]
pre_points = 6 # Skip initial points before heating started
data = data[i_0 + pre_points:, 2:] # Remove iteration column, keep 8 sensor columns
# Create time array
dt = 30 # (s) Time step between measurements
t_data = np.arange(0, len(data) * dt, dt)Analyze the data to determine the heating and cooling phases. This is done by finding the peak temperature which indicates the end of heating.
# Find heating and cooling times from data
# Assume sensor 5 (middle sensor) represents overall heating pattern
reference_sensor = 4 # 0-indexed, so sensor 5
i_peak = np.argmax(data[:, reference_sensor])
heat_time = t_data[i_peak]
cool_time = t_data[-1] - heat_time
print(f"Experimental timing:")
print(f"Heat time: {heat_time/60:.1f} min ({heat_time:.0f} s)")
print(f"Cool time: {cool_time/60:.1f} min ({cool_time:.0f} s)")
print(f"Total experiment time: {t_data[-1]/60:.1f} min")Experimental timing:
Heat time: 101.5 min (6090 s)
Cool time: 88.0 min (5280 s)
Total experiment time: 189.5 min
Define the physical positions of temperature sensors along the rod. These positions are based on the experimental setup geometry.
# Physical parameters
n_sensors = data.shape[1] # Number of sensors from data
print(f"\nData shape after processing: {data.shape}")
print(f"Number of columns (sensors): {n_sensors}")
# Calculate sensor positions using simulation function
sensor_positions = get_sensor_positions(n_sensors)
print(f"\nSensor configuration:")
print(f"Number of sensors: {n_sensors}")
print(f"Sensor spacing: 15 mm")
print(f"Sensor positions: {sensor_positions*1000} mm")
# Verify sensors are within rod bounds
if sensor_positions[0] < 0 or sensor_positions[-1] > L:
raise ValueError("Some sensors are outside rod bounds!")Data shape after processing: (380, 8)
Number of columns (sensors): 8
Sensor configuration:
Number of sensors: 8
Sensor spacing: 15 mm
Sensor positions: [ 97.5 112.5 127.5 142.5 157.5 172.5 187.5 202.5] mm
Run the simulation using the same heating and cooling times as the experiment. This ensures a fair comparison between theoretical predictions and measured data.
# Run simulation with experimental timing
t_sim, y_sim = simulate(heat_time, cool_time, nt=201)
print(f"\nSimulation parameters:")
print(f"Simulation time points: {len(t_sim)}")
print(f"Simulation elements: {y_sim.shape[0]}")Simulation parameters:
Simulation time points: 201
Simulation elements: 50
Use interpolation to predict temperatures at the exact sensor positions. This accounts for the fact that sensors may not be located at finite element centers.
# Predict temperatures at sensor positions
T_predicted = np.zeros((len(t_data), n_sensors))
for i, t in enumerate(t_data):
if t <= t_sim[-1]: # Only interpolate within simulation time range
T_predicted[i, :] = simulation_interpolate(t, sensor_positions, t_sim, y_sim)
else:
T_predicted[i, :] = np.nan
print(f"Predictions generated for {n_sensors} sensors over {len(t_data)} time points")Predictions generated for 8 sensors over 380 time points
Examine the experimental data characteristics and compare with simulation predictions.
print(f"\nData analysis:")
print(f"Experimental temperature range: {np.nanmin(data):.1f} to {np.nanmax(data):.1f} °C")
print(f"Predicted temperature range: {np.nanmin(T_predicted):.1f} to {np.nanmax(T_predicted):.1f} °C")
print(f"Temperature rise (experimental): {np.nanmax(data) - np.nanmin(data):.1f} °C")
print(f"Temperature rise (predicted): {np.nanmax(T_predicted) - np.nanmin(T_predicted):.1f} °C")Data analysis:
Experimental temperature range: 20.3 to 55.5 °C
Predicted temperature range: 20.0 to 54.7 °C
Temperature rise (experimental): 35.2 °C
Temperature rise (predicted): 34.7 °C
Display the arrangement of sensors relative to the finite element grid. This helps understand the spatial resolution and measurement locations.
fig, ax = plt.subplots(figsize=(12, 6))
# Show finite element positions (from simulation)
element_positions = np.linspace(dx/2, L - dx/2, n)
ax.scatter(element_positions * 1000, np.zeros_like(element_positions),
marker='|', s=100, c='lightblue', alpha=0.6, label=f'{n} Finite Elements')
# Show sensor positions with different colors
colors = plt.cm.tab10(np.linspace(0, 1, n_sensors))
for i, pos in enumerate(sensor_positions):
ax.scatter(pos * 1000, 0, marker='o', s=20, c=[colors[i]],
label=f'Sensor {i+1}', alpha=0.9, edgecolors='black', linewidth=1)
ax.set_xlabel('Position (mm)')
ax.set_title('Finite Element Grid and Sensor Positions')
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
ax.grid(True, alpha=0.3)
ax.set_ylim(-0.5, 0.5)
ax.set_ylabel('')
ax.set_yticks([])
plt.tight_layout()
plt.show()Compare experimental measurements with simulation predictions side by side. Measurements are shown as markers while simulations use lines for clear distinction.
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
# Define colors for each sensor (same for data and simulation)
colors = plt.cm.tab10(np.linspace(0, 1, n_sensors))
# Plot experimental data (markers only)
for i in range(n_sensors):
ax1.plot(t_data / 60, data[:, i], 'o', markersize=3, color=colors[i],
label=f'Sensor {i+1}', alpha=0.7)
ax1.axvline(heat_time / 60, color='red', linestyle='--', alpha=0.7, label='Heating ends')
ax1.set_xlabel('Time (min)')
ax1.set_ylabel('Temperature (°C)')
ax1.set_title('Experimental Data')
ax1.grid(True, alpha=0.3)
ax1.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# Plot simulation predictions (lines only)
for i in range(n_sensors):
ax2.plot(t_data / 60, T_predicted[:, i], '-', linewidth=2, color=colors[i],
label=f'Sensor {i+1}', alpha=0.8)
ax2.axvline(heat_time / 60, color='red', linestyle='--', alpha=0.7, label='Heating ends')
ax2.set_xlabel('Time (min)')
ax2.set_ylabel('Temperature (°C)')
ax2.set_title('Simulation Predictions')
ax2.grid(True, alpha=0.3)
ax2.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.tight_layout()
plt.show()Overlay experimental data and simulation predictions on the same plot. This allows direct comparison of trends and identification of discrepancies.
fig, ax = plt.subplots(figsize=(14, 8))
# Plot experimental data (markers only, same colors as simulation)
for i in range(n_sensors):
ax.plot(t_data / 60, data[:, i], 'o', markersize=4, color=colors[i],
label=f'Sensor {i+1} (Data)', alpha=0.6)
# Plot simulation predictions (lines only, matching colors)
for i in range(n_sensors):
ax.plot(t_data / 60, T_predicted[:, i], '-', linewidth=2.5, color=colors[i],
label=f'Sensor {i+1} (Sim)', alpha=0.9)
ax.axvline(heat_time / 60, color='red', linestyle='--', alpha=0.7,
linewidth=2, label='Heating ends')
ax.set_xlabel('Time (min)')
ax.set_ylabel('Temperature (°C)')
ax.set_title('Experimental Data vs Simulation Predictions')
ax.grid(True, alpha=0.3)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.tight_layout()
plt.show()Examine the spatial temperature distribution at key time points. This shows how well the simulation captures the temperature gradients along the rod.
# Select key time points for profile comparison
key_times = [heat_time/4, heat_time/2, heat_time, heat_time + cool_time/2]
key_labels = ['25% Heat', '50% Heat', 'Heat End', '50% Cool']fig, axes = plt.subplots(2, 2, figsize=(14, 10))
axes = axes.flatten()
for idx, (time_point, label) in enumerate(zip(key_times, key_labels)):
ax = axes[idx]
# Find closest data point
data_idx = np.argmin(np.abs(t_data - time_point))
actual_time = t_data[data_idx]
# Get simulation prediction at this time
T_profile_sim = simulation_interpolate(actual_time, sensor_positions, t_sim, y_sim)
# Plot comparison
ax.plot(sensor_positions * 1000, data[data_idx, :], 'ro', markersize=8,
label='Experimental', alpha=0.7)
ax.plot(sensor_positions * 1000, T_profile_sim, 'b-', linewidth=2,
label='Simulation', alpha=0.8)
ax.set_xlabel('Position (mm)')
ax.set_ylabel('Temperature (°C)')
ax.set_title(f'{label} (t = {actual_time/60:.1f} min)')
ax.grid(True, alpha=0.3)
ax.legend()
plt.tight_layout()
plt.show()Calculate quantitative metrics to assess the agreement between simulation and experiment.
# Calculate residuals (difference between prediction and measurement)
residuals = T_predicted - data
# Remove NaN values for statistics
valid_mask = ~np.isnan(residuals)
residuals_clean = residuals[valid_mask]
# Calculate statistics
mean_error = np.mean(residuals_clean)
rms_error = np.sqrt(np.mean(residuals_clean**2))
max_error = np.max(np.abs(residuals_clean))
std_error = np.std(residuals_clean)
print(f"\nStatistical Analysis:")
print(f"Mean error (bias): {mean_error:.2f} °C")
print(f"RMS error: {rms_error:.2f} °C")
print(f"Maximum absolute error: {max_error:.2f} °C")
print(f"Standard deviation of errors: {std_error:.2f} °C")Statistical Analysis:
Mean error (bias): -1.25 °C
RMS error: 1.94 °C
Maximum absolute error: 6.06 °C
Standard deviation of errors: 1.48 °C
Plot the residuals to identify systematic patterns or sensor-specific biases.
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
# Plot residuals over time for each sensor
for i in range(n_sensors):
ax1.plot(t_data / 60, residuals[:, i], 'o-', markersize=2, color=colors[i],
label=f'Sensor {i+1}', alpha=0.7)
ax1.axhline(0, color='black', linestyle='-', alpha=0.5)
ax1.axvline(heat_time / 60, color='red', linestyle='--', alpha=0.7, label='Heating ends')
ax1.set_xlabel('Time (min)')
ax1.set_ylabel('Residual (Sim - Data) [°C]')
ax1.set_title('Temperature Residuals vs Time')
ax1.grid(True, alpha=0.3)
ax1.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
# Plot residuals vs position (averaged over time)
mean_residuals_by_sensor = np.nanmean(residuals, axis=0)
ax2.bar(range(1, n_sensors+1), mean_residuals_by_sensor, color=colors, alpha=0.7)
ax2.axhline(0, color='black', linestyle='-', alpha=0.5)
ax2.set_xlabel('Sensor Number')
ax2.set_ylabel('Mean Residual [°C]')
ax2.set_title('Average Temperature Residuals by Sensor')
ax2.grid(True, alpha=0.3, axis='y')
plt.tight_layout()
plt.show()This analysis provides:
Key Findings: - Model captures overall heating/cooling trends - Systematic biases may indicate model parameter refinement opportunities
Recommendations for Students: - Compare your results with these metrics - Investigate sensors with largest residuals - Consider model parameter sensitivity analysis - Validate results with uncertainty analysis
print(f"\nAnalysis complete!")
print(f"Files used: simulation.py (for model), test.lvm (for data)")
print(f"RMS Error: {rms_error:.2f} °C, Max Error: {max_error:.2f} °C")Analysis complete!
Files used: simulation.py (for model), test.lvm (for data)
RMS Error: 1.94 °C, Max Error: 6.06 °C