Electronic – Reading internal temperature sensor STM32

adcconversionstm32temperature

I'm trying to read internal temperature sensor. Each time, the value from ADC conversion is 296 which results in negative temperature. Should I add something to the code below, enable some peripheral or my calculations are wrong?

#define TEMP_SENSOR_AVG_SLOPE_MV_PER_CELSIUS                        2.5f
#define TEMP_SENSOR_VOLTAGE_MV_AT_25                                760.0f
#define ADC_REFERENCE_VOLTAGE_MV                                    1210.0f
#define ADC_MAX_OUTPUT_VALUE                                        4095.0f

int32_t sensorValue, temperature;

__HAL_ADC_ENABLE(&hadc1);

// Disable Vbat signal from input channel and wake up temp sensor from power down mode
ADC->CCR &= ~(ADC_CCR_TSVREFE);


HAL_ADC_Start(&hadc1);
if(HAL_ADC_PollForConversion(&hadc1, 100) == HAL_OK)
{
    sensorValue = (int32_t)HAL_ADC_GetValue(&hadc1);
    HAL_ADC_Stop(&hadc1);
    sensorValue = sensorValue * ADC_REFERENCE_VOLTAGE_MV / ADC_MAX_OUTPUT_VALUE;
    temperature = (int32_t)((sensorValue - TEMP_SENSOR_VOLTAGE_MV_AT_25) / TEMP_SENSOR_AVG_SLOPE_MV_PER_CELSIUS + 25);
}
else
{
    temperature = -273;
}

return temperature;

–EDIT

#define TEMP_SENSOR_AVG_SLOPE_MV_PER_CELSIUS                        2.5f
#define TEMP_SENSOR_VOLTAGE_MV_AT_25                                760.0f
#define ADC_REFERENCE_VOLTAGE_MV                                    3300.0f
#define ADC_MAX_OUTPUT_VALUE                                        4095.0f
#define TEMP110_CAL_VALUE                                           ((uint16_t*)((uint32_t)0x1FFF7A2E))
#define TEMP30_CAL_VALUE                                            ((uint16_t*)((uint32_t)0x1FFF7A2C))
#define TEMP110                                                     110.0f
#define TEMP30                                                      30.0f

int32_t temperature;
float sensorValue;
float adcCalValue30 = (float)(*TEMP30_CAL_VALUE);
float adcCalValue110 = (float)(*TEMP110_CAL_VALUE);

__HAL_ADC_ENABLE(&hadc1);

// Disable Vbat signal from input channel and wake up temp sensor from power down mode
ADC->CCR |= ADC_CCR_TSVREFE;
ADC->CCR &= ~ADC_CCR_VBATE ;


HAL_ADC_Start(&hadc1);
if(HAL_ADC_PollForConversion(&hadc1, 100) == HAL_OK)
{
    sensorValue = (float)HAL_ADC_GetValue(&hadc1);
    HAL_ADC_Stop(&hadc1);
    temperature = (int32_t)((TEMP110 - TEMP30) / ((float)(*TEMP110_CAL_VALUE) - (float)(*TEMP30_CAL_VALUE)) * (sensorValue - (float)(*TEMP30_CAL_VALUE)) + TEMP30);
}
else
{
    temperature = -273;
}

return temperature;

Temperature calculation formula
Calibration values

ADC configuration:

  ADC_ChannelConfTypeDef sConfig;

    /**Configure the global features of the ADC (Clock, Resolution, Data Alignment and number of conversion) 
    */
  hadc1.Instance = ADC1;
  hadc1.Init.ClockPrescaler = ADC_CLOCK_SYNC_PCLK_DIV4;
  hadc1.Init.Resolution = ADC_RESOLUTION_12B;
  hadc1.Init.ScanConvMode = DISABLE;
  hadc1.Init.ContinuousConvMode = DISABLE;
  hadc1.Init.DiscontinuousConvMode = DISABLE;
  hadc1.Init.ExternalTrigConvEdge = ADC_EXTERNALTRIGCONVEDGE_NONE;
  hadc1.Init.ExternalTrigConv = ADC_SOFTWARE_START;
  hadc1.Init.DataAlign = ADC_DATAALIGN_RIGHT;
  hadc1.Init.NbrOfConversion = 1;
  hadc1.Init.DMAContinuousRequests = DISABLE;
  hadc1.Init.EOCSelection = ADC_EOC_SINGLE_CONV;
  if (HAL_ADC_Init(&hadc1) != HAL_OK)
  {
    Error_Handler();
  }

    /**Configure for the selected ADC regular channel its corresponding rank in the sequencer and its sample time. 
    */
  sConfig.Channel = ADC_CHANNEL_TEMPSENSOR;
  sConfig.Rank = 1;
  sConfig.SamplingTime = ADC_SAMPLETIME_480CYCLES;
  sConfig.Offset = 0;
  if (HAL_ADC_ConfigChannel(&hadc1, &sConfig) != HAL_OK)
  {
    Error_Handler();
  }

I'm getting the output around 35 degrees. Is it okay to have such a big offset?

Best Answer

Like PeterJ pointed out, the first flaw is that you have to set the ADC_CCR_TSVREFE bit and not reset it.

I've no idea how you set the sample and hold time, but I hope it is correct. It has to be at least 10 µs for an accurate measurement (datasheet section about temperature sensor).

Your next big flaw is in your thinking that the reference voltage is 1.21 V. That is the nominal value for the \$V_{refint}\$. This voltage is not the reference voltage for the ADC. The reference voltage is usually \$V_{DDA}\$ or a different externally supplied voltage on the \$V_{REF+}\$-pin. But it can't be higher than \$V_{DDA}\$ nor can it be lower than \$V_{DDA}\$-1.2 V.

With your conversion result of 950, and taking 3.3 V you would end up with 27.2 °C, which seems like a good start. If you have 3 V it'd be -0.6 °C, which also seems okay.

Why do I consider -0.6 °C a good value? Because calculation using the average values is crap.

I don't know why, but STM doesn't promote their calibrated values much. Every device has two ADC raw values taken at 30 °C and 110 °C at 3.3 V stored internally. Using those values, you end up with more reasonable temperature values without performing calibration.


Something along these lines should do:

// see datasheet for position of the calibration values, this is for STM32F429
const uint16_t* const ADC_TEMP_3V3_30C =  reinterpret_cast<uint16_t*>(0x1FFF7A2C);
const uint16_t* const ADC_TEMP_3V3_110C =  reinterpret_cast<uint16_t*>(0x1FFF7A2E);
const float CALIBRATION_REFERENCE_VOLTAGE = 3.3F;

const float REFERENCE_VOLTAGE = 3.0F; // supplied with Vref+ or VDDA

// scale constants to current reference voltage
float adcCalTemp30C = static_cast<float>(*ADC_TEMP_3V3_30C) * (REFERENCE_VOLTAGE/CALIBRATION_REFERENCE_VOLTAGE);
float adcCalTemp110C = static_cast<float>(*ADC_TEMP_3V3_110C) * (REFERENCE_VOLTAGE/CALIBRATION_REFERENCE_VOLTAGE);

uint16_t adcTempValue = SAMPLED VALUE;

float temperature = (static_cast<float>(adcTempValue) - adcCalTemp30C)/(adcCalTemp110C - adcCalTemp30C) * (110.0F - 30.0F) + 30.0F;

I'm used to C++, so maybe not your coding style, but that shouldn't be a big problem.