type
stringclasses
5 values
content
stringlengths
9
163k
includes
#include <linux/msm_tsens.h>
includes
#include <linux/workqueue.h>
includes
#include <linux/completion.h>
includes
#include <linux/cpu.h>
includes
#include <linux/cpufreq.h>
includes
#include <linux/msm_tsens.h>
includes
#include <linux/msm_thermal.h>
includes
#include <linux/platform_device.h>
includes
#include <linux/of.h>
includes
#include <linux/err.h>
includes
#include <linux/slab.h>
includes
#include <linux/of.h>
includes
#include <linux/sysfs.h>
includes
#include <linux/types.h>
includes
#include <linux/thermal.h>
includes
#include <linux/regulator/rpm-smd-regulator.h>
includes
#include <linux/regulator/consumer.h>
includes
#include <linux/regulator/driver.h>
includes
#include <linux/msm_thermal_ioctl.h>
includes
#include <soc/qcom/rpm-smd.h>
includes
#include <soc/qcom/scm.h>
includes
#include <linux/sched/rt.h>
includes
#include <trace/trace_thermal.h>
defines
#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
defines
#define CREATE_TRACE_POINTS
defines
#define TRACE_MSM_THERMAL
defines
#define MAX_CURRENT_UA 100000
defines
#define MAX_RAILS 5
defines
#define MAX_THRESHOLD 2
defines
#define MONITOR_ALL_TSENS -1
defines
#define TSENS_NAME_MAX 20
defines
#define TSENS_NAME_FORMAT "tsens_tz_sensor%d"
defines
#define THERM_SECURE_BITE_CMD 8
defines
#define SENSOR_SCALING_FACTOR 1
defines
#define CPU_DEVICE "cpu%d"
defines
#define POLLING_DELAY 100
defines
#define SYNC_CORE(_cpu) \
defines
#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
defines
#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
defines
#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
defines
#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
defines
#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
defines
#define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \
defines
#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
defines
#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
defines
#define PHASE_RW_ATTR(_phase, _name, _attr, j, _attr_gr) \
defines
#define MX_RW_ATTR(ko_attr, _name, _attr_gp) \
defines
#define show_phase(_name, _variable) \
defines
#define store_phase(_name, _variable, _iscx) \
structs
struct cluster_info { int cluster_id; uint32_t entity_count; struct cluster_info *child_entity_ptr; struct cluster_info *parent_ptr; struct cpufreq_frequency_table *freq_table; int freq_idx; int freq_idx_low; int freq_idx_high; cpumask_t cluster_cores; bool sync_cluster; uint32_t limited_max_freq; uint32_t ...
structs
struct cpu_info { uint32_t cpu; const char *sensor_type; enum sensor_id_type id_type; uint32_t sensor_id; bool offline; bool user_offline; bool hotplug_thresh_clear; struct sensor_threshold threshold[THRESHOLD_MAX_NR]; bool max_freq; uint32_t user_max_freq; uint32_t user_min_freq; uint32_t limited_max_freq;...
structs
struct therm_threshold { int32_t sensor_id; enum sensor_id_type id_type; struct sensor_threshold threshold[MAX_THRESHOLD]; int32_t trip_triggered; void (*notify)(struct therm_threshold *); struct threshold_info *parent; };
structs
struct threshold_info { uint32_t thresh_ct; bool thresh_triggered; struct therm_threshold *thresh_list; };
structs
struct rail { const char *name; uint32_t freq_req; uint32_t min_level; uint32_t num_levels; int32_t curr_level; uint32_t levels[3]; struct kobj_attribute value_attr; struct kobj_attribute level_attr; struct regulator *reg; struct attribute_group attr_gp; };
structs
struct msm_sensor_info { const char *name; const char *alias; const char *type; uint32_t scaling_factor; };
structs
struct psm_rail { const char *name; uint8_t init; uint8_t mode; struct kobj_attribute mode_attr; struct rpm_regulator *reg; struct regulator *phase_reg; struct attribute_group attr_gp; };
structs
struct devmgr_devices { struct device_manager_data *hotplug_dev; struct device_manager_data *cpufreq_dev[NR_CPUS]; };
structs
struct vdd_rstr_enable { struct kobj_attribute ko_attr; uint32_t enabled; };
functions
int validate_client(struct device_clnt_data *clnt) { int ret = 0; struct device_manager_data *dev_mgr = NULL; struct device_clnt_data *client_ptr = NULL; if (!clnt || !clnt->dev_mgr) { pr_err("Invalid client\n"); ret = -EINVAL; goto validate_exit; }
functions
int devmgr_client_cpufreq_update(struct device_manager_data *dev_mgr) { int ret = 0; struct device_clnt_data *clnt = NULL; uint32_t max_freq = UINT_MAX; uint32_t min_freq = 0; mutex_lock(&dev_mgr->clnt_lock); list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) { if (!clnt->req_active) continue; max...
functions
int devmgr_client_hotplug_update(struct device_manager_data *dev_mgr) { int ret = 0; struct device_clnt_data *clnt = NULL; cpumask_t offline_mask = CPU_MASK_NONE; mutex_lock(&dev_mgr->clnt_lock); list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) { if (!clnt->req_active) continue; cpumask_or(&offli...
functions
int devmgr_hotplug_client_request_validate_and_update( struct device_clnt_data *clnt, union device_request *req, enum device_req_type type) { if (type != HOTPLUG_MITIGATION_REQ) return -EINVAL; cpumask_copy(&clnt->request.offline_mask, &req->offline_mask); if (!cpumask_empty(&req->offline_mask)) cl...
functions
int devmgr_cpufreq_client_request_validate_and_update( struct device_clnt_data *clnt, union device_request *req, enum device_req_type type) { if (type != CPUFREQ_MITIGATION_REQ) return -EINVAL; if (req->freq.max_freq < req->freq.min_freq) { pr_err("Invalid Max and Min freq req. max:%u min:%u\n"...
functions
int devmgr_client_request_mitigation(struct device_clnt_data *clnt, enum device_req_type type, union device_request *req) { int ret = 0; struct device_manager_data *dev_mgr = NULL; if (!clnt || !req) { pr_err("Invalid inputs for mitigation.\n"); ret = -EINVAL; goto req_exit; }
functions
void devmgr_unregister_mitigation_client(struct device *dev, struct device_clnt_data *clnt) { int ret = 0; struct device_manager_data *dev_mgr = NULL; if (!clnt) { pr_err("Invalid input\n"); return; }
functions
int msm_thermal_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; uint32_t max_freq_req, min_freq_req; switch (event) { case CPUFREQ_INCOMPATIBLE: if (SYNC_CORE(policy->cpu)) { max_freq_req = cpus[policy->cpu].parent_ptr->limited_max_f...
functions
void update_cpu_freq(int cpu) { int ret = 0; if (cpu_online(cpu)) { trace_thermal_pre_frequency_mit(cpu, cpus[cpu].limited_max_freq, cpus[cpu].limited_min_freq); ret = cpufreq_update_policy(cpu); trace_thermal_post_frequency_mit(cpu, cpufreq_quick_get_max(cpu), cpus[cpu].limited_min_freq); if (re...
functions
__init get_sync_cluster(struct device *dev, int *cnt) { int *sync_cluster = NULL, cluster_cnt = 0, ret = 0; char *key = "qcom,synchronous-cluster-id"; if (!of_get_property(dev->of_node, key, &cluster_cnt) || cluster_cnt <= 0 || !core_ptr) return NULL; cluster_cnt /= sizeof(__be32); if (cluster_cnt > core_ptr...
functions
void update_cpu_datastructure(struct cluster_info *cluster_ptr, int *sync_cluster, int sync_cluster_cnt) { int i = 0; bool is_sync_cluster = false; for (i = 0; (sync_cluster) && (i < sync_cluster_cnt); i++) { if (cluster_ptr->cluster_id != sync_cluster[i]) continue; is_sync_cluster = true; break; }
functions
ssize_t cluster_info_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { uint32_t i = 0; ssize_t tot_size = 0, size = 0; for (; i < core_ptr->entity_count; i++) { struct cluster_info *cluster_ptr = &core_ptr->child_entity_ptr[i]; size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size, "...
functions
int create_cpu_topology_sysfs(void) { int ret = 0; struct kobject *module_kobj = NULL; if (!cluster_info_probed) { cluster_info_nodes_called = true; return ret; }
functions
int get_device_tree_cluster_info(struct device *dev, int *cluster_id, cpumask_t *cluster_cpus) { int i, cluster_cnt = 0, ret = 0; uint32_t val = 0; char *key = "qcom,synchronous-cluster-map"; if (!of_get_property(dev->of_node, key, &cluster_cnt) || cluster_cnt <= 0) { pr_debug("Property %s not defined.\n", ...
functions
int get_kernel_cluster_info(int *cluster_id, cpumask_t *cluster_cpus) { uint32_t _cpu, cluster_index, cluster_cnt; for (_cpu = 0, cluster_cnt = 0; _cpu < num_possible_cpus(); _cpu++) { if (topology_physical_package_id(_cpu) < 0) { pr_err("CPU%d topology not initialized.\n", _cpu); return -ENODEV; }
functions
void update_cpu_topology(struct device *dev) { int cluster_id[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}
functions
__ref init_cluster_freq_table(void) { uint32_t _cluster = 0, _cpu = 0, table_len = 0, idx = 0; int ret = 0; struct cluster_info *cluster_ptr = NULL; struct cpufreq_policy *policy = NULL; struct cpufreq_frequency_table *freq_table_ptr = NULL; for (; _cluster < core_ptr->entity_count; _cluster++, table_len = 0, ...
functions
void update_cluster_freq(void) { int online_cpu = -1; struct cluster_info *cluster_ptr = NULL; uint32_t _cluster = 0, _cpu = 0, max = UINT_MAX, min = 0; if (!core_ptr) return; for (; _cluster < core_ptr->entity_count; _cluster++, _cpu = 0, online_cpu = -1, max = UINT_MAX, min = 0) { /* ** If a cluster i...
functions
void do_cluster_freq_ctrl(long temp) { uint32_t _cluster = 0; int _cpu = -1, freq_idx = 0; bool mitigate = false; struct cluster_info *cluster_ptr = NULL; if (temp >= msm_thermal_info.limit_temp_degC) mitigate = true; else if (temp < msm_thermal_info.limit_temp_degC - msm_thermal_info.temp_hysteresis_degC) ...
functions
int check_freq_table(void) { int ret = 0; uint32_t i = 0; static bool invalid_table; if (invalid_table) return -EINVAL; if (freq_table_get) return 0; if (core_ptr) { ret = init_cluster_freq_table(); if (!ret) freq_table_get = 1; else if (ret == -EINVAL) invalid_table = true; return ret; }
functions
int update_cpu_min_freq_all(uint32_t min) { uint32_t cpu = 0, _cluster = 0; int ret = 0; struct cluster_info *cluster_ptr = NULL; bool valid_table = false; if (!freq_table_get) { ret = check_freq_table(); if (ret && !core_ptr) { pr_err("Fail to get freq table. err:%d\n", ret); return ret; }
functions
int vdd_restriction_apply_freq(struct rail *r, int level) { int ret = 0; if (level == r->curr_level) return ret; /* level = -1: disable, level = 0,1,2..n: enable */ if (level == -1) { ret = update_cpu_min_freq_all(r->min_level); if (ret) return ret; else r->curr_level = -1; }
functions
int vdd_restriction_apply_voltage(struct rail *r, int level) { int ret = 0; if (r->reg == NULL) { pr_err("%s don't have regulator handle. can't apply vdd\n", r->name); return -EFAULT; }
functions
int psm_set_mode_all(int mode) { int i = 0; int fail_cnt = 0; int ret = 0; pr_debug("Requesting PMIC Mode: %d\n", mode); for (i = 0; i < psm_rails_cnt; i++) { if (psm_rails[i].mode != mode) { ret = rpm_regulator_set_mode(psm_rails[i].reg, mode); if (ret) { pr_err("Cannot set mode:%d for %s. err:%d", ...
functions
ssize_t vdd_rstr_en_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled); }
functions
ssize_t vdd_rstr_en_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int i = 0; uint8_t en_cnt = 0; uint8_t dis_cnt = 0; uint32_t val = 0; struct kernel_param kp; struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); mutex_lock(&vdd_rstr_mutex);...
functions
int send_temperature_band(enum msm_thermal_phase_ctrl phase, enum msm_temp_band req_band) { int ret = 0; uint32_t msg_id; struct msm_rpm_request *rpm_req; unsigned int band = req_band; uint32_t key, resource, resource_id; if (phase < 0 || phase >= MSM_PHASE_CTRL_NR || req_band <= 0 || req_band >= MSM_TEMP_MAX...
functions
uint32_t msm_thermal_str_to_int(const char *inp) { int i, len; uint32_t output = 0; len = strnlen(inp, sizeof(uint32_t)); for (i = 0; i < len; i++) output |= inp[i] << (i * 8); return output; }
functions
ssize_t sensor_info_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int i; ssize_t tot_size = 0, size = 0; for (i = 0; i < sensor_cnt; i++) { size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size, "%s:%s:%s:%d ", sensors[i].type, sensors[i].name, sensors[i].alias ? : "", sensors[...
functions
ssize_t vdd_rstr_reg_value_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int val = 0; struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr); /* -1:disabled, -2:fail to get regualtor handle */ if (reg->curr_level < 0) val = reg->curr_level; else val = reg->levels[reg->curr_level]; ...
functions
ssize_t vdd_rstr_reg_level_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level); }
functions
ssize_t vdd_rstr_reg_level_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr); mutex_lock(&vdd_rstr_mutex); if (vdd_rstr_en.enabled == 0) goto done_store_level; ret = kstrtouint(buf, 10,...
functions
int request_optimum_current(struct psm_rail *rail, enum ocr_request req) { int ret = 0; if ((!rail) || (req >= OPTIMUM_CURRENT_NR) || (req < 0)) { pr_err("Invalid input %d\n", req); ret = -EINVAL; goto request_ocr_exit; }
functions
int ocr_set_mode_all(enum ocr_request req) { int ret = 0, i; for (i = 0; i < ocr_rail_cnt; i++) { if (ocr_rails[i].mode == req) continue; ret = request_optimum_current(&ocr_rails[i], req); if (ret) goto ocr_set_mode_exit; ocr_rails[i].mode = req; }
functions
ssize_t ocr_reg_mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode); }
functions
ssize_t ocr_reg_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); if (!ocr_enabled) return count; mutex_lock(&ocr_mutex); ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invali...
functions
ssize_t store_phase_request(const char *buf, size_t count, bool is_cx) { int ret = 0, val; struct mutex *phase_mutex = (is_cx) ? (&cx_mutex) : (&gfx_mutex); enum msm_thermal_phase_ctrl phase_req = (is_cx) ? MSM_CX_PHASE_CTRL : MSM_GFX_PHASE_CTRL; ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid inpu...
functions
ssize_t psm_reg_mode_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode); }
functions
ssize_t psm_reg_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int val = 0; struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr); mutex_lock(&psm_mutex); ret = kstrtoint(buf, 10, &val); if (ret) { pr_err("Invalid input %s for mode\n", buf); goto...
functions
int check_sensor_id(int sensor_id) { int i = 0; bool hw_id_found = false; int ret = 0; for (i = 0; i < max_tsens_num; i++) { if (sensor_id == tsens_id_map[i]) { hw_id_found = true; break; }
functions
int create_sensor_id_map(void) { int i = 0; int ret = 0; tsens_id_map = kzalloc(sizeof(int) * max_tsens_num, GFP_KERNEL); if (!tsens_id_map) { pr_err("Cannot allocate memory for tsens_id_map\n"); return -ENOMEM; }
functions
int vdd_restriction_apply_all(int en) { int i = 0; int en_cnt = 0; int dis_cnt = 0; int fail_cnt = 0; int ret = 0; for (i = 0; i < rails_cnt; i++) { if (rails[i].freq_req == 1) if (freq_table_get) ret = vdd_restriction_apply_freq(&rails[i], en ? 0 : -1); else continue; else ret = vdd_re...