hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70930266a9947b1e4d78fd8cb4f0ee3fd4e8801
| 7,397
|
py
|
Python
|
src/api/dataflow/shared/datamanage/datamanage_helper.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/dataflow/shared/datamanage/datamanage_helper.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/dataflow/shared/datamanage/datamanage_helper.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from django.utils.translation import ugettext as _
from dataflow.pizza_settings import KAFKA_OP_ROLE_NAME
from dataflow.shared.api.modules.datamanage import DatamanageApi
from dataflow.shared.api.util.api_driver import APIResponseUtil as res_util
from dataflow.shared.databus.databus_helper import DatabusHelper
class DatamanageHelper(object):
@staticmethod
def op_metric_report(message, kafka_topic, tags):
request_params = {
"message": json.dumps(message),
"kafka_topic": kafka_topic,
"tags": tags,
}
res = DatamanageApi.metrics.report(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_result_table_metric(database, sql, geog_area_code):
request_params = {"database": database, "sql": sql, "tags": [geog_area_code]}
res = DatamanageApi.metrics.query(request_params)
res_util.check_response(res)
return res.data
# 优化指标,不再采用 sql 查询的方式
@staticmethod
def get_result_table_metric_v2(request_params):
res = DatamanageApi.metrics_v2.list(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_alert_detail_v2(request_params):
res = DatamanageApi.alert_detail.list(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_batch_executions(processing_ids):
"""
{
'interval': 3600,
'execute_history': []
}
@param processing_ids:
@return:
"""
request_params = {"processing_ids": processing_ids}
res = DatamanageApi.dmonitor.batch_executions(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_alert_details(flow_id, start_time, end_time):
"""
[{
'message': 'xxx',
'message_en': 'xxx',
'full_message': 'xxx',
'full_message_en': 'xxx'
}]
@param flow_id:
@param start_time:
@param end_time:
@return:
"""
request_params = {
"flow_id": flow_id,
"start_time": start_time,
"end_time": end_time,
"dimensions": json.dumps({"generate_type": "user"}),
}
res = DatamanageApi.dmonitor.alert_details(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_metric_kafka_server(geog_area_code):
channels_info = DatabusHelper.list_channel_info([KAFKA_OP_ROLE_NAME, geog_area_code])
if len(channels_info) != 1:
raise Exception(_("kafka-op 连接信息不唯一,请联系管理员处理."))
metric_kafka_server = "{}:{}".format(
channels_info[0]["cluster_domain"],
channels_info[0]["cluster_port"],
)
return metric_kafka_server
@staticmethod
def create_alert_config(flow_id, dmonitor_type="alert_configs"):
request_params = {
"flow_id": flow_id,
"dmonitor_type": dmonitor_type,
}
res = DatamanageApi.dmonitor_dataflow.create(request_params)
res_util.check_response(res)
return res.data
# 数据修正相关
@staticmethod
def create_data_correct(params):
res = DatamanageApi.data_correct.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_correct(params):
res = DatamanageApi.data_correct.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_correct(params):
res = DatamanageApi.data_correct.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_correct(params):
res = DatamanageApi.data_correct.delete(params)
res_util.check_response(res)
return res.data
# 数据模型应用相关
@staticmethod
def create_data_model_instance(params):
res = DatamanageApi.data_model_instance.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_model_instance(params):
res = DatamanageApi.data_model_instance.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_model_instance(params):
res = DatamanageApi.data_model_instance.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_model_instance(params):
res = DatamanageApi.data_model_instance.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def rollback_data_model_instance(params):
res = DatamanageApi.data_model_instance.rollback(params)
res_util.check_response(res)
return res.data
# 数据模型指标相关
@staticmethod
def create_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def rollback_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.rollback(params)
res_util.check_response(res)
return res.data
@staticmethod
def check_data_model_instance(params):
res = DatamanageApi.data_model_instance_check.list(params)
return res
| 33.931193
| 111
| 0.680681
|
import json
from django.utils.translation import ugettext as _
from dataflow.pizza_settings import KAFKA_OP_ROLE_NAME
from dataflow.shared.api.modules.datamanage import DatamanageApi
from dataflow.shared.api.util.api_driver import APIResponseUtil as res_util
from dataflow.shared.databus.databus_helper import DatabusHelper
class DatamanageHelper(object):
@staticmethod
def op_metric_report(message, kafka_topic, tags):
request_params = {
"message": json.dumps(message),
"kafka_topic": kafka_topic,
"tags": tags,
}
res = DatamanageApi.metrics.report(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_result_table_metric(database, sql, geog_area_code):
request_params = {"database": database, "sql": sql, "tags": [geog_area_code]}
res = DatamanageApi.metrics.query(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_result_table_metric_v2(request_params):
res = DatamanageApi.metrics_v2.list(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_alert_detail_v2(request_params):
res = DatamanageApi.alert_detail.list(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_batch_executions(processing_ids):
request_params = {"processing_ids": processing_ids}
res = DatamanageApi.dmonitor.batch_executions(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_alert_details(flow_id, start_time, end_time):
request_params = {
"flow_id": flow_id,
"start_time": start_time,
"end_time": end_time,
"dimensions": json.dumps({"generate_type": "user"}),
}
res = DatamanageApi.dmonitor.alert_details(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def get_metric_kafka_server(geog_area_code):
channels_info = DatabusHelper.list_channel_info([KAFKA_OP_ROLE_NAME, geog_area_code])
if len(channels_info) != 1:
raise Exception(_("kafka-op 连接信息不唯一,请联系管理员处理."))
metric_kafka_server = "{}:{}".format(
channels_info[0]["cluster_domain"],
channels_info[0]["cluster_port"],
)
return metric_kafka_server
@staticmethod
def create_alert_config(flow_id, dmonitor_type="alert_configs"):
request_params = {
"flow_id": flow_id,
"dmonitor_type": dmonitor_type,
}
res = DatamanageApi.dmonitor_dataflow.create(request_params)
res_util.check_response(res)
return res.data
@staticmethod
def create_data_correct(params):
res = DatamanageApi.data_correct.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_correct(params):
res = DatamanageApi.data_correct.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_correct(params):
res = DatamanageApi.data_correct.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_correct(params):
res = DatamanageApi.data_correct.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def create_data_model_instance(params):
res = DatamanageApi.data_model_instance.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_model_instance(params):
res = DatamanageApi.data_model_instance.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_model_instance(params):
res = DatamanageApi.data_model_instance.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_model_instance(params):
res = DatamanageApi.data_model_instance.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def rollback_data_model_instance(params):
res = DatamanageApi.data_model_instance.rollback(params)
res_util.check_response(res)
return res.data
@staticmethod
def create_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.create(params)
res_util.check_response(res)
return res.data
@staticmethod
def update_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.update(params)
res_util.check_response(res)
return res.data
@staticmethod
def get_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.retrieve(params)
res_util.check_response(res)
return res.data
@staticmethod
def del_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.delete(params)
res_util.check_response(res)
return res.data
@staticmethod
def rollback_data_model_indicator(params):
res = DatamanageApi.data_model_indicator.rollback(params)
res_util.check_response(res)
return res.data
@staticmethod
def check_data_model_instance(params):
res = DatamanageApi.data_model_instance_check.list(params)
return res
| true
| true
|
f709302f607362b2ef2384ba1503f4713840f6d2
| 3,390
|
py
|
Python
|
tic-tac.py
|
kobe81venum/tic-tac
|
c34841f31655bbfa85b3ae3ec548e99d0555589a
|
[
"MIT"
] | null | null | null |
tic-tac.py
|
kobe81venum/tic-tac
|
c34841f31655bbfa85b3ae3ec548e99d0555589a
|
[
"MIT"
] | null | null | null |
tic-tac.py
|
kobe81venum/tic-tac
|
c34841f31655bbfa85b3ae3ec548e99d0555589a
|
[
"MIT"
] | null | null | null |
#main game section
# %%
plansza_do_gry = {'7':' ','8':' ','9':' ',
'4':' ','5':' ','6':' ',
'1':' ','2':' ','3':' '}
klawisze_gry=[]
for key in plansza_do_gry:
klawisze_gry.append(key)
# print(klawisze_gry)
def drukuj_plansze(pole):
print(f"{pole['7']} | {pole['8']} | {pole['9']}")
print('- + - + -')
print(f"{pole['4']} | {pole['5']} | {pole['6']}")
print('- + - + -')
print(f"{pole['1']} | {pole['2']} | {pole['3']}")
# drukuj_plansze(plansza_do_gry)
def gra():
gracz = 'X'
licznik=0
for i in range(10):
drukuj_plansze(plansza_do_gry)
move=input(f"To jest ruch, {gracz}. Wybierz gdzie chcesz postawić znak")
if plansza_do_gry[move] == ' ':
plansza_do_gry[move] = gracz
licznik += 1
else:
print('miejsce zajęte\nwstaw znak w inne pole')
continue
if licznik >=5: #i
if plansza_do_gry['7'] == plansza_do_gry['8'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['4'] == plansza_do_gry['5'] == plansza_do_gry['6'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['2'] == plansza_do_gry['3'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['4'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['2'] == plansza_do_gry['5'] == plansza_do_gry['8'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['6'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['5'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['5'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
if licznik == 9:
print("\nKoniec Gry")
print("remis")
if gracz == 'X':
gracz = 'O'
else:
gracz = 'X'
restart = input('grasz ponownie?/n(t/n')
if restart == 't' or restart == 'T':
for key in klawisze_gry:
plansza_do_gry[key] = ' '
gra() #wywołanie rekurencyjne
#superfunkcja
if __name__ == '__main__': #dotyczy pakietów i pakowania do pakietu
gra()
# %%
| 32.912621
| 90
| 0.484956
|
plansza_do_gry = {'7':' ','8':' ','9':' ',
'4':' ','5':' ','6':' ',
'1':' ','2':' ','3':' '}
klawisze_gry=[]
for key in plansza_do_gry:
klawisze_gry.append(key)
def drukuj_plansze(pole):
print(f"{pole['7']} | {pole['8']} | {pole['9']}")
print('- + - + -')
print(f"{pole['4']} | {pole['5']} | {pole['6']}")
print('- + - + -')
print(f"{pole['1']} | {pole['2']} | {pole['3']}")
def gra():
gracz = 'X'
licznik=0
for i in range(10):
drukuj_plansze(plansza_do_gry)
move=input(f"To jest ruch, {gracz}. Wybierz gdzie chcesz postawić znak")
if plansza_do_gry[move] == ' ':
plansza_do_gry[move] = gracz
licznik += 1
else:
print('miejsce zajęte\nwstaw znak w inne pole')
continue
if licznik >=5: if plansza_do_gry['7'] == plansza_do_gry['8'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['4'] == plansza_do_gry['5'] == plansza_do_gry['6'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['2'] == plansza_do_gry['3'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['4'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['2'] == plansza_do_gry['5'] == plansza_do_gry['8'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['6'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['1'] == plansza_do_gry['5'] == plansza_do_gry['9'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
elif plansza_do_gry['3'] == plansza_do_gry['5'] == plansza_do_gry['7'] != ' ':
drukuj_plansze(plansza_do_gry)
print("\nKoniec Gry")
print(f"Wygrał gracz: {gracz}")
break
if licznik == 9:
print("\nKoniec Gry")
print("remis")
if gracz == 'X':
gracz = 'O'
else:
gracz = 'X'
restart = input('grasz ponownie?/n(t/n')
if restart == 't' or restart == 'T':
for key in klawisze_gry:
plansza_do_gry[key] = ' '
gra() if __name__ == '__main__': gra()
| true
| true
|
f70931652a71d14009a5d59d657e6473e79290df
| 401
|
py
|
Python
|
backend/thread_33988/wsgi.py
|
crowdbotics-apps/thread-33988
|
4a80fd631efce5785fcfadc057b4f90ac9a4749e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/thread_33988/wsgi.py
|
crowdbotics-apps/thread-33988
|
4a80fd631efce5785fcfadc057b4f90ac9a4749e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/thread_33988/wsgi.py
|
crowdbotics-apps/thread-33988
|
4a80fd631efce5785fcfadc057b4f90ac9a4749e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for thread_33988 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thread_33988.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thread_33988.settings')
application = get_wsgi_application()
| true
| true
|
f709328daa9880d8b3313f573584ecdd16f15dea
| 30,599
|
py
|
Python
|
run_classifier.py
|
ksboy/superglue
|
12b5bf6d729ba5b95b8a29682f6bfa584131ae9c
|
[
"Apache-2.0"
] | null | null | null |
run_classifier.py
|
ksboy/superglue
|
12b5bf6d729ba5b95b8a29682f6bfa584131ae9c
|
[
"Apache-2.0"
] | null | null | null |
run_classifier.py
|
ksboy/superglue
|
12b5bf6d729ba5b95b8a29682f6bfa584131ae9c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import sys
import random
from tqdm import tqdm, trange
import numpy as np
from scipy.special import softmax
# from sklearn.utils.extmath import softmax
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss, MSELoss
from tensorboardX import SummaryWriter
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--loss_weight",
default=None,
type=str,
help="The Loss Weight.")
parser.add_argument("--pop_classifier_layer",
action='store_true',
help="pop classifier layer")
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict",
action='store_true',
help="Whether to run predict on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--predict_batch_size",
default=8,
type=int,
help="Total batch size for predict.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir',
action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
args.device = device
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_predict:
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
print("pop_classifier_layer", args.pop_classifier_layer)
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels, pop_classifier_layer=args.pop_classifier_layer)
if args.local_rank == 0:
torch.distributed.barrier()
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
print("loss_weight", args.loss_weight)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
# Prepare data loader
train_examples = processor.get_train_examples(args.data_dir)
cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
# define a new function to compute loss values for both output_modes
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
# print(input_ids)
# print(logits)
# print(label_ids)
if output_mode == "classification":
if args.loss_weight == None:
loss_fct = CrossEntropyLoss()
else:
loss_weight= [int(_) for _ in args.loss_weight.split(",")]
loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), label_ids.view(-1))
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.local_rank in [-1, 0]:
tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
tb_writer.add_scalar('loss', loss.item(), global_step)
### Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
### Example:
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Save a trained model, configuration and tokenizer
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
# Good practice: save your training arguments together with the trained model
output_args_file = os.path.join(args.output_dir, 'training_args.bin')
torch.save(args, output_args_file)
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
### Evaluation
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_eval_features_file, "rb") as reader:
eval_features = pickle.load(reader)
except:
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving eval features into cached file %s", cached_eval_features_file)
with open(cached_eval_features_file, "wb") as writer:
pickle.dump(eval_features, writer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data) # Note that this sampler samples randomly
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
out_label_ids = None
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
print(logits )
print(label_ids)
print(logits.view(-1, num_labels), label_ids.view(-1))
# create eval loss and other metric required by the task
if output_mode == "classification":
if args.loss_weight == None:
loss_fct = CrossEntropyLoss()
else:
loss_weight= [int(_) for _ in args.loss_weight.split(",")]
loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
print(preds)
def swap_value(a):
temp=a[0];a[0]=a[1];a[1]=temp
if task_name == "copa":
preds = softmax(preds,axis=1)
print(preds)
for i in range(int(len(preds)/2)):
if preds[2*i][0]>=preds[2*i+1][0]:
if preds[2*i][0]<preds[2*i][1]:
# print(preds[2*i][0], preds[2*i][1])
swap_value(preds[2*i])
# print(preds[2*i][0], preds[2*i][1])
if preds[2*i+1][0]>preds[2*i+1][1]:
swap_value(preds[2*i+1])
else:
if preds[2*i][0]>preds[2*i][1]:
swap_value(preds[2*i])
if preds[2*i+1][0]<preds[2*i+1][1]:
swap_value(preds[2*i+1])
print(preds)
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
print(preds,out_label_ids)
result = compute_metrics(task_name, preds, out_label_ids)
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
### Prediction
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
predict_examples = processor.get_test_examples(args.data_dir)
cached_predict_features_file = os.path.join(args.data_dir, 'predict_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_predict_features_file, "rb") as reader:
predict_features = pickle.load(reader)
except:
predict_features = convert_examples_to_features(
predict_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving predict features into cached file %s", cached_predict_features_file)
with open(cached_predict_features_file, "wb") as writer:
pickle.dump(predict_features, writer)
logger.info("***** Running prediction *****")
logger.info(" Num examples = %d", len(predict_examples))
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in predict_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in predict_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in predict_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.float)
predict_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
if args.local_rank == -1:
predict_sampler = SequentialSampler(predict_data)
else:
predict_sampler = DistributedSampler(predict_data) # Note that this sampler samples randomly
predict_dataloader = DataLoader(predict_data, sampler=predict_sampler, batch_size=args.predict_batch_size)
model.eval()
# predict_loss = 0
# nb_predict_steps = 0
preds = []
out_label_ids = None
for input_ids, input_mask, segment_ids, label_ids in tqdm(predict_dataloader, desc="predicting"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
print(logits )
print(label_ids)
# create eval loss and other metric required by the task
# if output_mode == "classification":
# loss_fct = CrossEntropyLoss()
# tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
# elif output_mode == "regression":
# loss_fct = MSELoss()
# tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
#
# eval_loss += tmp_eval_loss.mean().item()
# nb_predict_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
# out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(
# out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
#
# eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
print(preds)
if task_name == "copa":
preds = softmax(preds,axis=1)
print(preds)
results=[]
for i in range(int(len(preds)/2)):
if preds[2*i][0]>=preds[2*i+1][0]:
results.append(0)
else:
results.append(1)
preds= results
label_map = {i : i for i in range(2)}
else:
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
label_map = {i : label for i, label in enumerate(label_list)}
print(preds)
# result = compute_metrics(task_name, preds, out_label_ids)
# loss = tr_loss/global_step if args.do_train else None
# result['eval_loss'] = eval_loss
# result['global_step'] = global_step
# result['loss'] = loss
output_predict_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_predict_file, "w") as writer:
logger.info("***** Predict results *****")
for i in range(len(preds)):
label_i = label_map[preds[i]]
# json_i= "\"idx: %d, \"label\": \"label_i\""
writer.write("{\"idx\": %d, \"label\": \"%s\"}\n"%(i,label_i))
# for key in sorted(result.keys()):
# logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 47.513975
| 145
| 0.587013
|
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import sys
import random
from tqdm import tqdm, trange
import numpy as np
from scipy.special import softmax
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss, MSELoss
from tensorboardX import SummaryWriter
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--loss_weight",
default=None,
type=str,
help="The Loss Weight.")
parser.add_argument("--pop_classifier_layer",
action='store_true',
help="pop classifier layer")
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict",
action='store_true',
help="Whether to run predict on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--predict_batch_size",
default=8,
type=int,
help="Total batch size for predict.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir',
action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
args.device = device
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_predict:
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
print("pop_classifier_layer", args.pop_classifier_layer)
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels, pop_classifier_layer=args.pop_classifier_layer)
if args.local_rank == 0:
torch.distributed.barrier()
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
print("loss_weight", args.loss_weight)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
train_examples = processor.get_train_examples(args.data_dir)
cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
if output_mode == "classification":
if args.loss_weight == None:
loss_fct = CrossEntropyLoss()
else:
loss_weight= [int(_) for _ in args.loss_weight.split(",")]
loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), label_ids.view(-1))
if n_gpu > 1:
loss = loss.mean() if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.local_rank in [-1, 0]:
tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
tb_writer.add_scalar('loss', loss.item(), global_step)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
output_args_file = os.path.join(args.output_dir, 'training_args.bin')
torch.save(args, output_args_file)
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_eval_features_file, "rb") as reader:
eval_features = pickle.load(reader)
except:
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving eval features into cached file %s", cached_eval_features_file)
with open(cached_eval_features_file, "wb") as writer:
pickle.dump(eval_features, writer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
out_label_ids = None
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
print(logits )
print(label_ids)
print(logits.view(-1, num_labels), label_ids.view(-1))
if output_mode == "classification":
if args.loss_weight == None:
loss_fct = CrossEntropyLoss()
else:
loss_weight= [int(_) for _ in args.loss_weight.split(",")]
loss_fct = CrossEntropyLoss(torch.FloatTensor(loss_weight).cuda())
tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
elif output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds[0]
print(preds)
def swap_value(a):
temp=a[0];a[0]=a[1];a[1]=temp
if task_name == "copa":
preds = softmax(preds,axis=1)
print(preds)
for i in range(int(len(preds)/2)):
if preds[2*i][0]>=preds[2*i+1][0]:
if preds[2*i][0]<preds[2*i][1]:
swap_value(preds[2*i])
if preds[2*i+1][0]>preds[2*i+1][1]:
swap_value(preds[2*i+1])
else:
if preds[2*i][0]>preds[2*i][1]:
swap_value(preds[2*i])
if preds[2*i+1][0]<preds[2*i+1][1]:
swap_value(preds[2*i+1])
print(preds)
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
print(preds,out_label_ids)
result = compute_metrics(task_name, preds, out_label_ids)
loss = tr_loss/global_step if args.do_train else None
result['eval_loss'] = eval_loss
result['global_step'] = global_step
result['loss'] = loss
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
predict_examples = processor.get_test_examples(args.data_dir)
cached_predict_features_file = os.path.join(args.data_dir, 'predict_{0}_{1}_{2}'.format(
list(filter(None, args.bert_model.split('/'))).pop(),
str(args.max_seq_length),
str(task_name)))
try:
with open(cached_predict_features_file, "rb") as reader:
predict_features = pickle.load(reader)
except:
predict_features = convert_examples_to_features(
predict_examples, label_list, args.max_seq_length, tokenizer, output_mode)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
logger.info(" Saving predict features into cached file %s", cached_predict_features_file)
with open(cached_predict_features_file, "wb") as writer:
pickle.dump(predict_features, writer)
logger.info("***** Running prediction *****")
logger.info(" Num examples = %d", len(predict_examples))
logger.info(" Batch size = %d", args.predict_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in predict_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in predict_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in predict_features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in predict_features], dtype=torch.float)
predict_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
predict_sampler = SequentialSampler(predict_data)
else:
predict_sampler = DistributedSampler(predict_data) predict_dataloader = DataLoader(predict_data, sampler=predict_sampler, batch_size=args.predict_batch_size)
model.eval()
preds = []
out_label_ids = None
for input_ids, input_mask, segment_ids, label_ids in tqdm(predict_dataloader, desc="predicting"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)
print(logits )
print(label_ids)
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(
preds[0], logits.detach().cpu().numpy(), axis=0)
preds = preds[0]
print(preds)
if task_name == "copa":
preds = softmax(preds,axis=1)
print(preds)
results=[]
for i in range(int(len(preds)/2)):
if preds[2*i][0]>=preds[2*i+1][0]:
results.append(0)
else:
results.append(1)
preds= results
label_map = {i : i for i in range(2)}
else:
if output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif output_mode == "regression":
preds = np.squeeze(preds)
label_map = {i : label for i, label in enumerate(label_list)}
print(preds)
output_predict_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_predict_file, "w") as writer:
logger.info("***** Predict results *****")
for i in range(len(preds)):
label_i = label_map[preds[i]]
writer.write("{\"idx\": %d, \"label\": \"%s\"}\n"%(i,label_i))
# for key in sorted(result.keys()):
# logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| true
| true
|
f7093398654a58555f899c5a5f2cf4cb9551af39
| 89
|
py
|
Python
|
src/lesson_developer_tools/compileall_recursion_depth.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_developer_tools/compileall_recursion_depth.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_developer_tools/compileall_recursion_depth.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
import compileall
import re
compileall.compile_dir(
'examples',
maxlevels=0,
)
| 9.888889
| 23
| 0.707865
|
import compileall
import re
compileall.compile_dir(
'examples',
maxlevels=0,
)
| true
| true
|
f70933ecf281da6cda4b19389d3bb7d2e6f3df4b
| 9,330
|
py
|
Python
|
saleor/graphql/core/tests/test_graphql.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/graphql/core/tests/test_graphql.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/graphql/core/tests/test_graphql.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
from functools import partial
from unittest.mock import Mock, patch
import graphene
import pytest
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from django.shortcuts import reverse
from graphql.error import GraphQLError
from graphql_relay import to_global_id
from ...core.utils import from_global_id_or_error
from ...product.types import Product
from ...tests.utils import get_graphql_content
from ...utils import get_nodes
from ...utils.filters import filter_by_query_param
def test_middleware_dont_generate_sql_requests(client, settings, assert_num_queries):
"""When requesting on the GraphQL API endpoint, no SQL request should happen
indirectly. This test ensures that."""
# Enables the Graphql playground
settings.DEBUG = True
with assert_num_queries(0):
response = client.get(reverse("api"))
assert response.status_code == 200
def test_jwt_middleware(client, admin_user):
user_details_query = """
{
me {
email
}
}
"""
create_token_query = """
mutation {
tokenCreate(email: "admin@example.com", password: "password") {
token
}
}
"""
api_url = reverse("api")
api_client_post = partial(client.post, api_url, content_type="application/json")
# test setting AnonymousUser on unauthorized request to API
response = api_client_post(data={"query": user_details_query})
repl_data = response.json()
assert response.status_code == 200
assert isinstance(response.wsgi_request.user, AnonymousUser)
assert repl_data["data"]["me"] is None
# test creating a token for admin user
response = api_client_post(data={"query": create_token_query})
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
token = repl_data["data"]["tokenCreate"]["token"]
assert token is not None
# test request with proper JWT token authorizes the request to API
response = api_client_post(
data={"query": user_details_query}, HTTP_AUTHORIZATION=f"JWT {token}"
)
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
assert "errors" not in repl_data
assert repl_data["data"]["me"] == {"email": admin_user.email}
def test_real_query(user_api_client, product, channel_USD):
product_attr = product.product_type.product_attributes.first()
category = product.category
attr_value = product_attr.values.first()
query = """
query Root($categoryId: ID!, $sortBy: ProductOrder, $first: Int,
$attributesFilter: [AttributeInput], $channel: String) {
category(id: $categoryId) {
...CategoryPageFragmentQuery
__typename
}
products(first: $first, sortBy: $sortBy, filter: {categories: [$categoryId],
attributes: $attributesFilter}, channel: $channel) {
...ProductListFragmentQuery
__typename
}
attributes(first: 20, filter: {inCategory: $categoryId}, channel: $channel) {
edges {
node {
...ProductFiltersFragmentQuery
__typename
}
}
}
}
fragment CategoryPageFragmentQuery on Category {
id
name
ancestors(first: 20) {
edges {
node {
name
id
__typename
}
}
}
children(first: 20) {
edges {
node {
name
id
slug
__typename
}
}
}
__typename
}
fragment ProductListFragmentQuery on ProductCountableConnection {
edges {
node {
...ProductFragmentQuery
__typename
}
__typename
}
pageInfo {
hasNextPage
__typename
}
__typename
}
fragment ProductFragmentQuery on Product {
id
isAvailable
name
pricing {
...ProductPriceFragmentQuery
__typename
}
thumbnailUrl1x: thumbnail(size: 255){
url
}
thumbnailUrl2x: thumbnail(size: 510){
url
}
__typename
}
fragment ProductPriceFragmentQuery on ProductPricingInfo {
discount {
gross {
amount
currency
__typename
}
__typename
}
priceRange {
stop {
gross {
amount
currency
__typename
}
currency
__typename
}
start {
gross {
amount
currency
__typename
}
currency
__typename
}
__typename
}
__typename
}
fragment ProductFiltersFragmentQuery on Attribute {
id
name
slug
choices(first: 10) {
edges {
node {
id
name
slug
__typename
}
}
}
__typename
}
"""
variables = {
"categoryId": graphene.Node.to_global_id("Category", category.id),
"sortBy": {"field": "NAME", "direction": "ASC"},
"first": 1,
"attributesFilter": [
{"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]}
],
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
get_graphql_content(response)
def test_get_nodes(product_list):
global_ids = [to_global_id("Product", product.pk) for product in product_list]
# Make sure function works even if duplicated ids are provided
global_ids.append(to_global_id("Product", product_list[0].pk))
# Return products corresponding to global ids
products = get_nodes(global_ids, Product)
assert products == product_list
# Raise an error if requested id has no related database object
nonexistent_item = Mock(type="Product", pk=-1)
nonexistent_item_global_id = to_global_id(
nonexistent_item.type, nonexistent_item.pk
)
global_ids.append(nonexistent_item_global_id)
msg = "There is no node of type {} with pk {}".format(
nonexistent_item.type, nonexistent_item.pk
)
with pytest.raises(AssertionError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
global_ids.pop()
# Raise an error if one of the node is of wrong type
invalid_item = Mock(type="test", pk=-1)
invalid_item_global_id = to_global_id(invalid_item.type, invalid_item.pk)
global_ids.append(invalid_item_global_id)
with pytest.raises(GraphQLError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (f"Must receive Product id: {invalid_item_global_id}.",)
# Raise an error if no nodes were found
global_ids = []
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
# Raise an error if pass wrong ids
global_ids = ["a", "bb"]
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
@patch("saleor.product.models.Product.objects")
def test_filter_by_query_param(qs):
qs.filter.return_value = qs
qs = filter_by_query_param(qs, "test", ["name", "force"])
test_kwargs = {"name__icontains": "test", "force__icontains": "test"}
q_objects = Q()
for q in test_kwargs:
q_objects |= Q(**{q: test_kwargs[q]})
# FIXME: django 1.11 fails on called_once_with(q_objects)
qs.filter.call_count == 1
def test_from_global_id_or_error(product):
invalid_id = "invalid"
message = f"Couldn't resolve id: {invalid_id}."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(invalid_id)
assert str(error.value) == message
def test_from_global_id_or_error_wth_invalid_type(product):
product_id = graphene.Node.to_global_id("Product", product.id)
message = "Must receive a ProductVariant id."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(product_id, "ProductVariant", raise_error=True)
assert str(error.value) == message
def test_from_global_id_or_error_wth_type(product):
expected_product_type = str(Product)
expected_product_id = graphene.Node.to_global_id(expected_product_type, product.id)
product_type, product_id = from_global_id_or_error(
expected_product_id, expected_product_type
)
assert product_id == str(product.id)
assert product_type == expected_product_type
| 29.432177
| 87
| 0.602572
|
from functools import partial
from unittest.mock import Mock, patch
import graphene
import pytest
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from django.shortcuts import reverse
from graphql.error import GraphQLError
from graphql_relay import to_global_id
from ...core.utils import from_global_id_or_error
from ...product.types import Product
from ...tests.utils import get_graphql_content
from ...utils import get_nodes
from ...utils.filters import filter_by_query_param
def test_middleware_dont_generate_sql_requests(client, settings, assert_num_queries):
settings.DEBUG = True
with assert_num_queries(0):
response = client.get(reverse("api"))
assert response.status_code == 200
def test_jwt_middleware(client, admin_user):
user_details_query = """
{
me {
email
}
}
"""
create_token_query = """
mutation {
tokenCreate(email: "admin@example.com", password: "password") {
token
}
}
"""
api_url = reverse("api")
api_client_post = partial(client.post, api_url, content_type="application/json")
response = api_client_post(data={"query": user_details_query})
repl_data = response.json()
assert response.status_code == 200
assert isinstance(response.wsgi_request.user, AnonymousUser)
assert repl_data["data"]["me"] is None
response = api_client_post(data={"query": create_token_query})
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
token = repl_data["data"]["tokenCreate"]["token"]
assert token is not None
response = api_client_post(
data={"query": user_details_query}, HTTP_AUTHORIZATION=f"JWT {token}"
)
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
assert "errors" not in repl_data
assert repl_data["data"]["me"] == {"email": admin_user.email}
def test_real_query(user_api_client, product, channel_USD):
product_attr = product.product_type.product_attributes.first()
category = product.category
attr_value = product_attr.values.first()
query = """
query Root($categoryId: ID!, $sortBy: ProductOrder, $first: Int,
$attributesFilter: [AttributeInput], $channel: String) {
category(id: $categoryId) {
...CategoryPageFragmentQuery
__typename
}
products(first: $first, sortBy: $sortBy, filter: {categories: [$categoryId],
attributes: $attributesFilter}, channel: $channel) {
...ProductListFragmentQuery
__typename
}
attributes(first: 20, filter: {inCategory: $categoryId}, channel: $channel) {
edges {
node {
...ProductFiltersFragmentQuery
__typename
}
}
}
}
fragment CategoryPageFragmentQuery on Category {
id
name
ancestors(first: 20) {
edges {
node {
name
id
__typename
}
}
}
children(first: 20) {
edges {
node {
name
id
slug
__typename
}
}
}
__typename
}
fragment ProductListFragmentQuery on ProductCountableConnection {
edges {
node {
...ProductFragmentQuery
__typename
}
__typename
}
pageInfo {
hasNextPage
__typename
}
__typename
}
fragment ProductFragmentQuery on Product {
id
isAvailable
name
pricing {
...ProductPriceFragmentQuery
__typename
}
thumbnailUrl1x: thumbnail(size: 255){
url
}
thumbnailUrl2x: thumbnail(size: 510){
url
}
__typename
}
fragment ProductPriceFragmentQuery on ProductPricingInfo {
discount {
gross {
amount
currency
__typename
}
__typename
}
priceRange {
stop {
gross {
amount
currency
__typename
}
currency
__typename
}
start {
gross {
amount
currency
__typename
}
currency
__typename
}
__typename
}
__typename
}
fragment ProductFiltersFragmentQuery on Attribute {
id
name
slug
choices(first: 10) {
edges {
node {
id
name
slug
__typename
}
}
}
__typename
}
"""
variables = {
"categoryId": graphene.Node.to_global_id("Category", category.id),
"sortBy": {"field": "NAME", "direction": "ASC"},
"first": 1,
"attributesFilter": [
{"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]}
],
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
get_graphql_content(response)
def test_get_nodes(product_list):
global_ids = [to_global_id("Product", product.pk) for product in product_list]
global_ids.append(to_global_id("Product", product_list[0].pk))
products = get_nodes(global_ids, Product)
assert products == product_list
nonexistent_item = Mock(type="Product", pk=-1)
nonexistent_item_global_id = to_global_id(
nonexistent_item.type, nonexistent_item.pk
)
global_ids.append(nonexistent_item_global_id)
msg = "There is no node of type {} with pk {}".format(
nonexistent_item.type, nonexistent_item.pk
)
with pytest.raises(AssertionError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
global_ids.pop()
invalid_item = Mock(type="test", pk=-1)
invalid_item_global_id = to_global_id(invalid_item.type, invalid_item.pk)
global_ids.append(invalid_item_global_id)
with pytest.raises(GraphQLError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (f"Must receive Product id: {invalid_item_global_id}.",)
global_ids = []
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
global_ids = ["a", "bb"]
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
@patch("saleor.product.models.Product.objects")
def test_filter_by_query_param(qs):
qs.filter.return_value = qs
qs = filter_by_query_param(qs, "test", ["name", "force"])
test_kwargs = {"name__icontains": "test", "force__icontains": "test"}
q_objects = Q()
for q in test_kwargs:
q_objects |= Q(**{q: test_kwargs[q]})
qs.filter.call_count == 1
def test_from_global_id_or_error(product):
invalid_id = "invalid"
message = f"Couldn't resolve id: {invalid_id}."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(invalid_id)
assert str(error.value) == message
def test_from_global_id_or_error_wth_invalid_type(product):
product_id = graphene.Node.to_global_id("Product", product.id)
message = "Must receive a ProductVariant id."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(product_id, "ProductVariant", raise_error=True)
assert str(error.value) == message
def test_from_global_id_or_error_wth_type(product):
expected_product_type = str(Product)
expected_product_id = graphene.Node.to_global_id(expected_product_type, product.id)
product_type, product_id = from_global_id_or_error(
expected_product_id, expected_product_type
)
assert product_id == str(product.id)
assert product_type == expected_product_type
| true
| true
|
f70937bd4d44bb239528e4d0a4eeca186cddd1d3
| 6,375
|
py
|
Python
|
sales_forecast/scoring/score.py
|
AghaSaad04/mlops-v2
|
d312ae108c93bacfb3541968bb913874af060ab2
|
[
"MIT"
] | null | null | null |
sales_forecast/scoring/score.py
|
AghaSaad04/mlops-v2
|
d312ae108c93bacfb3541968bb913874af060ab2
|
[
"MIT"
] | null | null | null |
sales_forecast/scoring/score.py
|
AghaSaad04/mlops-v2
|
d312ae108c93bacfb3541968bb913874af060ab2
|
[
"MIT"
] | null | null | null |
import numpy
import os
import math
from azureml.core.model import Model
from azureml.core.dataset import Dataset
from inference_schema.schema_decorators \
import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type \
import NumpyParameterType
import keras
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from azureml.core.run import Run
from azureml.core import Dataset, Datastore, Workspace
import argparse
import json
import pandas as pd
import numpy as np
from azureml.core.authentication import ServicePrincipalAuthentication
# from azureml.core.authentication import InteractiveLoginAuthentication
def tts(data):
data['date'] = pd.to_datetime(data['date'])
data['date'] = (data['date'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
(train, test) = data[0:-2000].values, data[-2000:].values
return (train, test)
def scale_data(train_set, test_set):
# apply Min Max Scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train_set[:, :4])
# reshape training set
train_set = train_set.reshape(train_set.shape[0], train_set.shape[1])
train_set_scaled = scaler.transform(train_set[:, :4])
# reshape test set
test_set = test_set.reshape(test_set.shape[0], test_set.shape[1])
test_set_scaled = scaler.transform(test_set[:, :4])
X_train, y_train = train_set[:, :4], train_set[:, 4:].ravel()
X_test, y_test = test_set[:, :4], test_set[:, 4:].ravel()
return X_train, y_train, X_test, y_test, scaler
def init():
# load the model from file into a global object
global model
model_path = Model.get_model_path(
os.getenv("AZUREML_MODEL_DIR").split('/')[-2])
print ("model path", model_path)
# try:
# print ("try")
# dataset = pd.read_csv('/var/azureml-app/train.csv')
# original_df = dataset.to_pandas_dataframe()
# except:
# print ("except")
# train_dataset = original_df.to_csv('train.csv', index=False)
# interactive_auth = InteractiveLoginAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4")
# ws = Workspace(subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0",
# resource_group="sales-mlops-rg",
# workspace_name="sales-mlops-ws",
# auth=interactive_auth)
# ws.get_details()
# print(original_df)
model = keras.models.load_model(model_path)
print("Current directory:", os.getcwd())
print("Model is loaded")
# date = '6/25/2020'
# store = 3
# item = 105
# price = 990
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
# input_sample = numpy.array([[date, store, item, price]])
# output_sample = numpy.array([4])
input_sample = numpy.array([[1591833600,34,759,690]])
output_sample = numpy.array([10])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data, request_headers):
global original_df
sp = ServicePrincipalAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4", service_principal_id="add8f304-2d88-45e3-94fa-ac6cf335d5df", service_principal_password="If2-.7Wlno57NW6v9~nE~xNIj~naD-DL5f")
ws = Workspace.get(name="sales-mlops-ws", auth = sp, subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0")
ws.get_details()
dataset = ws.datasets['salesforecast_ds']
original_df = dataset.to_pandas_dataframe()
# date = '6/25/2020'
# store = 34
# item = 759
# price = 690
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
date = data[0][0]
prev_sales = []
(train, test) = tts(original_df)
X_train, y_train, X_test, y_test, scaler_object = scale_data(train, test)
first_date = original_df["date"][0]
for x in original_df.index:
last_date = original_df["date"][x]
print("last date", last_date)
days_diff = (int(date) - int(last_date)) / (60 * 60 * 24)
total_data_days = (int(last_date) - int(first_date)) / (60 * 60 * 24)
print("days:", days_diff)
print("total_data_days:", total_data_days)
for i in original_df.index:
if (original_df["item"][i] == data[0][2] and original_df["store"][i] == data[0][1]):
prev_sales.append(original_df["sales"][i])
prev_sales_avg = 0
prev_sales_avg = (sum(prev_sales)) / total_data_days
forecast_result_array = []
test_set = data
test_set_scaled = scaler_object.transform(test_set)
X_test = test_set_scaled[:, :4]
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
y_pred = model.predict(X_test)
print("y_pred:",y_pred)
result = y_pred[0][0][0]
result = round(result)
print("result:",result)
prev_sales_avg = round (prev_sales_avg)
next_day_prediction = math.ceil(result + prev_sales_avg)
prev_sales.append(next_day_prediction)
forecast_result_array.append(next_day_prediction)
if days_diff > 1:
for day in range(round(days_diff)):
total_data_days += 1
prev_sales_avg = sum(prev_sales) / total_data_days
prev_sales_avg = round(prev_sales_avg)
prev_sales.append(prev_sales_avg)
forecast_result_array.append(prev_sales_avg)
end_result = sum(forecast_result_array)
print("end result: ", end_result)
print(('{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
end_result
))
return {"result": end_result}
if __name__ == "__main__":
init()
# date ='6/25/2020'
# store = 34
# item = 759
# price = 690
# date = pd.to_datetime(date)
# date = (date - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
test = numpy.array([[date, store, item, price]])
#print("test:",test)
#test =numpy.array([[1591833600,34,759,690]])
prediction = run(test, {})
print("Test result: ", prediction)
| 34.646739
| 217
| 0.642353
|
import numpy
import os
import math
from azureml.core.model import Model
from azureml.core.dataset import Dataset
from inference_schema.schema_decorators \
import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type \
import NumpyParameterType
import keras
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from azureml.core.run import Run
from azureml.core import Dataset, Datastore, Workspace
import argparse
import json
import pandas as pd
import numpy as np
from azureml.core.authentication import ServicePrincipalAuthentication
def tts(data):
data['date'] = pd.to_datetime(data['date'])
data['date'] = (data['date'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
(train, test) = data[0:-2000].values, data[-2000:].values
return (train, test)
def scale_data(train_set, test_set):
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train_set[:, :4])
train_set = train_set.reshape(train_set.shape[0], train_set.shape[1])
train_set_scaled = scaler.transform(train_set[:, :4])
test_set = test_set.reshape(test_set.shape[0], test_set.shape[1])
test_set_scaled = scaler.transform(test_set[:, :4])
X_train, y_train = train_set[:, :4], train_set[:, 4:].ravel()
X_test, y_test = test_set[:, :4], test_set[:, 4:].ravel()
return X_train, y_train, X_test, y_test, scaler
def init():
global model
model_path = Model.get_model_path(
os.getenv("AZUREML_MODEL_DIR").split('/')[-2])
print ("model path", model_path)
model = keras.models.load_model(model_path)
print("Current directory:", os.getcwd())
print("Model is loaded")
input_sample = numpy.array([[1591833600,34,759,690]])
output_sample = numpy.array([10])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data, request_headers):
global original_df
sp = ServicePrincipalAuthentication(tenant_id="def44f5f-0783-4b05-8f2f-dd615c5dfec4", service_principal_id="add8f304-2d88-45e3-94fa-ac6cf335d5df", service_principal_password="If2-.7Wlno57NW6v9~nE~xNIj~naD-DL5f")
ws = Workspace.get(name="sales-mlops-ws", auth = sp, subscription_id="6542067a-127a-43ff-b7f2-007fe21a37f0")
ws.get_details()
dataset = ws.datasets['salesforecast_ds']
original_df = dataset.to_pandas_dataframe()
date = data[0][0]
prev_sales = []
(train, test) = tts(original_df)
X_train, y_train, X_test, y_test, scaler_object = scale_data(train, test)
first_date = original_df["date"][0]
for x in original_df.index:
last_date = original_df["date"][x]
print("last date", last_date)
days_diff = (int(date) - int(last_date)) / (60 * 60 * 24)
total_data_days = (int(last_date) - int(first_date)) / (60 * 60 * 24)
print("days:", days_diff)
print("total_data_days:", total_data_days)
for i in original_df.index:
if (original_df["item"][i] == data[0][2] and original_df["store"][i] == data[0][1]):
prev_sales.append(original_df["sales"][i])
prev_sales_avg = 0
prev_sales_avg = (sum(prev_sales)) / total_data_days
forecast_result_array = []
test_set = data
test_set_scaled = scaler_object.transform(test_set)
X_test = test_set_scaled[:, :4]
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
y_pred = model.predict(X_test)
print("y_pred:",y_pred)
result = y_pred[0][0][0]
result = round(result)
print("result:",result)
prev_sales_avg = round (prev_sales_avg)
next_day_prediction = math.ceil(result + prev_sales_avg)
prev_sales.append(next_day_prediction)
forecast_result_array.append(next_day_prediction)
if days_diff > 1:
for day in range(round(days_diff)):
total_data_days += 1
prev_sales_avg = sum(prev_sales) / total_data_days
prev_sales_avg = round(prev_sales_avg)
prev_sales.append(prev_sales_avg)
forecast_result_array.append(prev_sales_avg)
end_result = sum(forecast_result_array)
print("end result: ", end_result)
print(('{{"RequestId":"{0}", '
'"TraceParent":"{1}", '
'"NumberOfPredictions":{2}}}'
).format(
request_headers.get("X-Ms-Request-Id", ""),
request_headers.get("Traceparent", ""),
end_result
))
return {"result": end_result}
if __name__ == "__main__":
init()
test = numpy.array([[date, store, item, price]])
prediction = run(test, {})
print("Test result: ", prediction)
| true
| true
|
f70938f0d7e8390db2b65b8b2287c08bd4a5e663
| 16,026
|
py
|
Python
|
plata/shop/migrations/0007_auto__add_field_order_billing_call_prefix__add_field_order_shipping_ca.py
|
DjangoAdminHackers/plata
|
9f09431ef30a1e2022baa47cc89796c6d0879a25
|
[
"BSD-3-Clause"
] | null | null | null |
plata/shop/migrations/0007_auto__add_field_order_billing_call_prefix__add_field_order_shipping_ca.py
|
DjangoAdminHackers/plata
|
9f09431ef30a1e2022baa47cc89796c6d0879a25
|
[
"BSD-3-Clause"
] | null | null | null |
plata/shop/migrations/0007_auto__add_field_order_billing_call_prefix__add_field_order_shipping_ca.py
|
DjangoAdminHackers/plata
|
9f09431ef30a1e2022baa47cc89796c6d0879a25
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.billing_call_prefix'
db.add_column(u'shop_order', 'billing_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
# Adding field 'Order.shipping_call_prefix'
db.add_column(u'shop_order', 'shipping_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.billing_call_prefix'
db.delete_column(u'shop_order', 'billing_call_prefix')
# Deleting field 'Order.shipping_call_prefix'
db.delete_column(u'shop_order', 'shipping_call_prefix')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439968)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439200)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'shop.order': {
'Meta': {'object_name': 'Order'},
'_order_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.TextField', [], {}),
'billing_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'billing_country': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'billing_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'billing_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_subtotal': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'paid': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shipping_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'shipping_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'shipping_same_as_billing': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'shipping_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'shop.orderitem': {
'Meta': {'unique_together': "(('order', 'product'),)", 'object_name': 'OrderItem'},
'_line_item_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'_line_item_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_line_item_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'_unit_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sale': ('django.db.models.fields.BooleanField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['shop.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.ProductVariant']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.TaxClass']", 'null': 'True', 'blank': 'True'}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'shop.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'authorized': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['shop.Order']"}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module_key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'shop.orderstatus': {
'Meta': {'object_name': 'OrderStatus'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': u"orm['shop.Order']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '20'})
},
u'shop.taxclass': {
'Meta': {'object_name': 'TaxClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'zai_products.product': {
'Meta': {'object_name': 'Product'},
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_de': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['shop.TaxClass']"}),
'tax_included': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
u'zai_products.productvariant': {
'Meta': {'object_name': 'ProductVariant'},
'grip': ('django.db.models.fields.CharField', [], {'default': "'chocolate'", 'max_length': '64'}),
'hand': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'default': "'small'", 'max_length': '64'}),
'lie': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.Product']"}),
'shaft': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '64'}),
'special_requirements': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['shop']
| 83.905759
| 195
| 0.567328
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'shop_order', 'billing_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
db.add_column(u'shop_order', 'shipping_call_prefix', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
def backwards(self, orm):
db.delete_column(u'shop_order', 'billing_call_prefix')
db.delete_column(u'shop_order', 'shipping_call_prefix')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439968)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 13, 7, 33, 36, 439200)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'shop.order': {
'Meta': {'object_name': 'Order'},
'_order_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.TextField', [], {}),
'billing_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'billing_country': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'billing_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'billing_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_subtotal': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'items_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'paid': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shipping_call_prefix': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_company': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'shipping_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'shipping_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'shipping_phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'shipping_same_as_billing': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'shipping_tax': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'shipping_zip_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'total': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '18', 'decimal_places': '4'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'shop.orderitem': {
'Meta': {'unique_together': "(('order', 'product'),)", 'object_name': 'OrderItem'},
'_line_item_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '18', 'decimal_places': '4', 'blank': 'True'}),
'_line_item_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_line_item_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '18', 'decimal_places': '4'}),
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'_unit_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_sale': ('django.db.models.fields.BooleanField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['shop.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.ProductVariant']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.TaxClass']", 'null': 'True', 'blank': 'True'}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'shop.orderpayment': {
'Meta': {'object_name': 'OrderPayment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'authorized': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'data': ('plata.fields.JSONField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['shop.Order']"}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'payment_module_key': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'shop.orderstatus': {
'Meta': {'object_name': 'OrderStatus'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': u"orm['shop.Order']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '20'})
},
u'shop.taxclass': {
'Meta': {'object_name': 'TaxClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
u'zai_products.product': {
'Meta': {'object_name': 'Product'},
'_unit_price': ('django.db.models.fields.DecimalField', [], {'max_digits': '18', 'decimal_places': '4'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_de': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_it': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_de': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_it': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tax_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['shop.TaxClass']"}),
'tax_included': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
u'zai_products.productvariant': {
'Meta': {'object_name': 'ProductVariant'},
'grip': ('django.db.models.fields.CharField', [], {'default': "'chocolate'", 'max_length': '64'}),
'hand': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.CharField', [], {'default': "'small'", 'max_length': '64'}),
'lie': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zai_products.Product']"}),
'shaft': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '64'}),
'special_requirements': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['shop']
| true
| true
|
f70939b28be639a86193981881a264d901c31701
| 481
|
py
|
Python
|
tests/test_signal/test_generator.py
|
mcanatalay/SIMULOC
|
af32c522887dec08a0815052a5878b5a595b8d44
|
[
"MIT"
] | null | null | null |
tests/test_signal/test_generator.py
|
mcanatalay/SIMULOC
|
af32c522887dec08a0815052a5878b5a595b8d44
|
[
"MIT"
] | null | null | null |
tests/test_signal/test_generator.py
|
mcanatalay/SIMULOC
|
af32c522887dec08a0815052a5878b5a595b8d44
|
[
"MIT"
] | null | null | null |
import unittest
from simuloc.signal import Generator
class GeneratorTestCase(unittest.TestCase):
"""GeneratorTestCase tests the generator class."""
def setUp(self):
"""Creates a instance of the generator class."""
self.cinst = Generator()
def tearDown(self):
pass
def test_noise(self):
"""Tests the boundry of noise generator."""
self.assertTrue(self.cinst.noise(0.1) < 4)
if __name__ == '__main__':
unittest.main()
| 24.05
| 56
| 0.656965
|
import unittest
from simuloc.signal import Generator
class GeneratorTestCase(unittest.TestCase):
def setUp(self):
self.cinst = Generator()
def tearDown(self):
pass
def test_noise(self):
self.assertTrue(self.cinst.noise(0.1) < 4)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70939c6c4c8363f7dcf9a0f1d8507a2c83f495d
| 4,882
|
py
|
Python
|
source/Project.py
|
victorchen276/CarND-Advanced-Lane-Lines
|
436d81150107c181e3f328adfd3f1c31d6a5cb15
|
[
"MIT"
] | 1
|
2019-01-20T10:59:36.000Z
|
2019-01-20T10:59:36.000Z
|
source/Project.py
|
victorchen276/CarND-Advanced-Lane-Lines
|
436d81150107c181e3f328adfd3f1c31d6a5cb15
|
[
"MIT"
] | 4
|
2021-03-18T21:58:46.000Z
|
2022-01-13T01:04:22.000Z
|
source/Project.py
|
victorchen276/CarND-Advanced-Lane-Lines
|
436d81150107c181e3f328adfd3f1c31d6a5cb15
|
[
"MIT"
] | 2
|
2019-01-20T10:59:39.000Z
|
2019-08-04T13:08:14.000Z
|
from source.camera import camera
from source.LaneDetect import LaneDetect
from moviepy.editor import VideoFileClip
import glob
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimg
import numpy as np
import cv2
#
# def process_video(input_video_file):
# clip1 = VideoFileClip(input_video_file);
# outputclip = clip1.fl_image(process_vid)
# outputclip.write_videofile('output_'+input_video_file, audio=False);
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
if __name__ == "__main__":
print('main')
# images = glob.glob('../camera_cal/calibration*.jpg')
# print(images)
camera = camera()
# camera.calibration(images, x_cor=9, y_cor=6, outputfilename='./camera_calibration_data_1')
camera.load_calibration_data('./camera_calibration_data.p')
# # images = sorted(images, key=lambda x: float(re.findall("(\d+)", x)[0]))
#
# print('Correction images (successfully detected corners):')
# plt.figure(figsize=(11.5, 9))
# gridspec.GridSpec(6, 3)
# # Step through the list and search for chessboard corners
# for i, image in enumerate(camera_calibrate.calibration_images_success):
# plt.subplot2grid((6, 3), (i // 3, i % 3), colspan=1, rowspan=1)
# plt.imshow(image)
# plt.axis('off')
# plt.show()
#
# plt.figure(figsize=(12, 4))
# plt.figtext(.5, .8, 'Images in which cv2 failed to find desired corners', fontsize=22, ha='center')
# for i, p in enumerate(camera_calibrate.calibration_images_fail):
# plt.subplot(1, 3, i + 1)
# plt.imshow(mpimg.imread(p)) # draw the first image of each class
# plt.title(p)
# plt.axis('off')
# plt.tight_layout(pad=0, h_pad=0, w_pad=0)
# plt.show()
# plt.savefig('fail.png')
# camera_calibrate.load_calibration_data('./camera_calibration_data.p')
# orig_img = mpimg.imread('../test_images/test1.jpg')
# undist_img = camera_calibrate.undistort(orig_img)
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
# ax1.imshow(orig_img)
# ax1.set_title('Original', fontsize=20)
# ax2.imshow(undist_img)
# ax2.set_title('Undistorted', fontsize=20)
# # plt.show()
# plt.savefig('undistort2.png')
# Perspective transform
# for image in glob.glob('../test_images/*.jpg'):
# orig_img = cv2.imread(image)
# birdeye_img, _ = camera.birds_eye(orig_img)
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
# f.tight_layout()
# ax1.imshow(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
# ax1.set_title('Original', fontsize=20)
# ax2.imshow(cv2.cvtColor(birdeye_img, cv2.COLOR_BGR2RGB))
# ax2.set_title('Undistorted and Warped Image', fontsize=20)
# plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# plt.show()
# # plt.savefig('../output_images/warp_' + str(i) + '.png')
# # edege
# image = mpimg.imread('../test_images/test6.jpg')
# lane_detecter = LaneDetect()
# result = lane_detecter.get_edges(image)
#
# # Plot the result
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
# # f.tight_layout()
# ax1.axis('off')
# ax1.imshow(image)
# ax1.set_title('Original', fontsize=18)
# ax2.axis('off')
# ax2.set_title('Edge', fontsize=18)
#
#
# ax2.imshow(result, cmap='gray')
# plt.show()
# plt.savefig('edge.png')
# # Detect Lane line
# for image_name in glob.glob('../test_images/*.jpg'):
# orig_img = mpimg.imread(image_name)
#
# lane_detecter = LaneDetect()
# lane_detecter.initcamera()
# lane_detecter.initlines(orig_img)
# output_img = lane_detecter.process_pipeline(orig_img)
# f, (ax1) = plt.subplots(1, 1, figsize=(9, 6))
# ax1.imshow(output_img)
# ax1.set_title('output_img', fontsize=20)
# plt.axis('off')
# plt.show()
# break
# Applying pipeline to video
clip1 = VideoFileClip('../project_video.mp4')
lane_detecter = LaneDetect()
lane_detecter.initcamera()
lane_detecter.initlines(clip1.get_frame(0))
outputclip = clip1.fl_image(lane_detecter.process_pipeline)
outputclip.write_videofile('../output_videos/output_project_video.mp4', audio=False)
#
# clip1 = VideoFileClip('../harder_challenge_video.mp4');
# lane_detecter = LaneDetect(clip1.get_frame(0))
# outputclip = clip1.fl_image(lane_detecter.process_pipeline)
# outputclip.write_videofile('../output_harder_challenge_video.mp4', audio=False)
#
# clip1 = VideoFileClip('../challenge_video.mp4')
# lane_detecter = LaneDetect(clip1.get_frame(0))
# outputclip = clip1.fl_image(lane_detecter.process_pipeline)
# outputclip.write_videofile('../output_challenge_video.mp4', audio=False)
| 35.897059
| 105
| 0.648914
|
from source.camera import camera
from source.LaneDetect import LaneDetect
from moviepy.editor import VideoFileClip
import glob
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mpimg
import numpy as np
import cv2
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
if __name__ == "__main__":
print('main')
camera = camera()
camera.load_calibration_data('./camera_calibration_data.p')
clip1 = VideoFileClip('../project_video.mp4')
lane_detecter = LaneDetect()
lane_detecter.initcamera()
lane_detecter.initlines(clip1.get_frame(0))
outputclip = clip1.fl_image(lane_detecter.process_pipeline)
outputclip.write_videofile('../output_videos/output_project_video.mp4', audio=False)
| true
| true
|
f7093aa21d3221a29bbe4a8c0a4d8dbde9443d8d
| 3,322
|
py
|
Python
|
doc/source/conf.py
|
lostmap/python-mistralclient
|
f77c1d23fb68b24d7406647b489f3f960026c46b
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
lostmap/python-mistralclient
|
f77c1d23fb68b24d7406647b489f3f960026c46b
|
[
"Apache-2.0"
] | null | null | null |
doc/source/conf.py
|
lostmap/python-mistralclient
|
f77c1d23fb68b24d7406647b489f3f960026c46b
|
[
"Apache-2.0"
] | null | null | null |
# Mistral documentation build configuration file
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinxcontrib.apidoc',
'openstackdocstheme',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../mistralclient'
apidoc_output_dir = 'api'
apidoc_excluded_paths = [
'test',
'tests/*']
apidoc_separate_modules = True
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mistral Client'
copyright = u'2016, Mistral Contributors'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mistralclient.']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'MistralClient'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': [
'sidebarlinks.html', 'localtoc.html', 'searchbox.html',
'sourcelink.html'
],
'**': [
'localtoc.html', 'relations.html',
'searchbox.html', 'sourcelink.html'
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mistraldoc'
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mistral_client', u'Mistral Client Documentation',
[u'Mistral Contributors'], 1)
]
# -- Options for openstackdocstheme -------------------------------------------
openstackdocs_repo_name = 'openstack/python-mistralclient'
openstackdocs_bug_project = 'python-mistralclient'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
| 31.046729
| 79
| 0.689344
|
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
extensions = [
'sphinxcontrib.apidoc',
'openstackdocstheme',
]
apidoc_module_dir = '../../mistralclient'
apidoc_output_dir = 'api'
apidoc_excluded_paths = [
'test',
'tests/*']
apidoc_separate_modules = True
source_suffix = '.rst'
master_doc = 'index'
project = u'Mistral Client'
copyright = u'2016, Mistral Contributors'
exclude_patterns = []
add_function_parentheses = True
add_module_names = True
show_authors = False
pygments_style = 'native'
modindex_common_prefix = ['mistralclient.']
html_theme = 'openstackdocs'
html_title = 'MistralClient'
html_sidebars = {
'index': [
'sidebarlinks.html', 'localtoc.html', 'searchbox.html',
'sourcelink.html'
],
'**': [
'localtoc.html', 'relations.html',
'searchbox.html', 'sourcelink.html'
]
}
htmlhelp_basename = 'Mistraldoc'
man_pages = [
('index', 'mistral_client', u'Mistral Client Documentation',
[u'Mistral Contributors'], 1)
]
openstackdocs_repo_name = 'openstack/python-mistralclient'
openstackdocs_bug_project = 'python-mistralclient'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
| true
| true
|
f7093c16a586362247f066ec5e801495ab9cae9d
| 547
|
py
|
Python
|
reviews/migrations/0009_movie_poster_thumbnail.py
|
UrbanBogger/horrorexplosion
|
3698e00a6899a5e8b224cd3d1259c3deb3a2ca80
|
[
"MIT"
] | null | null | null |
reviews/migrations/0009_movie_poster_thumbnail.py
|
UrbanBogger/horrorexplosion
|
3698e00a6899a5e8b224cd3d1259c3deb3a2ca80
|
[
"MIT"
] | 4
|
2020-06-05T18:21:18.000Z
|
2021-06-10T20:17:31.000Z
|
reviews/migrations/0009_movie_poster_thumbnail.py
|
UrbanBogger/horrorexplosion
|
3698e00a6899a5e8b224cd3d1259c3deb3a2ca80
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-08 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0008_auto_20180623_2009'),
]
operations = [
migrations.AddField(
model_name='movie',
name='poster_thumbnail',
field=models.ImageField(blank=True, help_text='Upload the poster thumbnail', null=True, upload_to='movie_posters/thumbnails/'),
),
]
| 26.047619
| 139
| 0.650823
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0008_auto_20180623_2009'),
]
operations = [
migrations.AddField(
model_name='movie',
name='poster_thumbnail',
field=models.ImageField(blank=True, help_text='Upload the poster thumbnail', null=True, upload_to='movie_posters/thumbnails/'),
),
]
| true
| true
|
f7093def5323b66ffaaea55ff027d9e85613d2dd
| 42,660
|
py
|
Python
|
model/stage.py
|
Lee-Ft/RHA
|
8a832a9afebc9204148bbd340c31e26c83138024
|
[
"MIT"
] | 3
|
2021-04-23T11:53:13.000Z
|
2022-02-11T12:29:34.000Z
|
model/stage.py
|
Lee-Ft/RHA
|
8a832a9afebc9204148bbd340c31e26c83138024
|
[
"MIT"
] | 2
|
2022-02-11T12:43:03.000Z
|
2022-02-14T15:18:39.000Z
|
model/stage.py
|
Lee-Ft/RHA
|
8a832a9afebc9204148bbd340c31e26c83138024
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pprint
from collections import defaultdict
from .context_query_attention import StructuredAttention
from .encoder import StackedEncoder
from .cnn import DepthwiseSeparableConv
from .model_utils import save_pickle, mask_logits, flat_list_of_lists, \
find_max_triples, get_high_iou_sapns, expand_span
class LinearWrapper(nn.Module):
"""1D conv layer"""
def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):
super(LinearWrapper, self).__init__()
self.relu = relu
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [
nn.Dropout(dropout),
nn.Linear(in_hsz, out_hsz)
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""(N, L, D)"""
if self.relu:
return F.relu(self.conv(x), inplace=True) # (N, L, D)
else:
return self.conv(x) # (N, L, D)
class ConvLinear(nn.Module):
"""1D conv layer"""
def __init__(self, in_hsz, out_hsz, kernel_size=3, layer_norm=True, dropout=0.1, relu=True):
super(ConvLinear, self).__init__()
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [
nn.Dropout(dropout),
DepthwiseSeparableConv(in_ch=in_hsz,
out_ch=out_hsz,
k=kernel_size,
dim=1,
relu=relu)
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
"""(N, L, D)"""
return self.conv(x) # (N, L, D)
class STAGE(nn.Module):
def __init__(self, opt):
super(STAGE, self).__init__()
self.opt = opt
self.inference_mode = False
self.sub_flag = opt.sub_flag
self.vfeat_flag = opt.vfeat_flag
self.vfeat_size = opt.vfeat_size
self.t_iter = opt.t_iter
self.extra_span_length = opt.extra_span_length
self.add_local = opt.add_local
self.use_sup_att = opt.use_sup_att
self.num_negatives = opt.num_negatives
self.negative_pool_size = opt.negative_pool_size
self.num_hard = opt.num_hard
self.drop_topk = opt.drop_topk
self.margin = opt.margin
self.att_loss_type = opt.att_loss_type
self.scale = opt.scale
self.alpha = opt.alpha
self.dropout = opt.dropout
self.hsz = opt.hsz
self.bsz = None
self.num_seg = None
self.num_a = 5
self.flag_cnt = self.sub_flag + self.vfeat_flag
self.wd_size = opt.embedding_size
self.bridge_hsz = 300
self.bert_word_encoding_fc = nn.Sequential(
nn.LayerNorm(self.wd_size),
nn.Dropout(self.dropout),
nn.Linear(self.wd_size, self.bridge_hsz),
nn.ReLU(True),
nn.LayerNorm(self.bridge_hsz),
)
if self.sub_flag:
print("Activate sub branch")
if self.vfeat_flag:
print("Activate vid branch")
self.vid_fc = nn.Sequential(
nn.LayerNorm(self.vfeat_size),
nn.Dropout(self.dropout),
nn.Linear(self.vfeat_size, self.bridge_hsz),
nn.ReLU(True),
nn.LayerNorm(self.bridge_hsz)
)
if self.flag_cnt == 2:
self.concat_fc = nn.Sequential(
nn.LayerNorm(3 * self.hsz),
nn.Dropout(self.dropout),
nn.Linear(3 * self.hsz, self.hsz),
nn.ReLU(True),
nn.LayerNorm(self.hsz),
)
self.input_embedding = nn.Sequential(
nn.Dropout(self.dropout),
nn.Linear(self.bridge_hsz, self.hsz),
nn.ReLU(True),
nn.LayerNorm(self.hsz),
)
self.input_encoder = StackedEncoder(n_blocks=opt.input_encoder_n_blocks,
n_conv=opt.input_encoder_n_conv,
kernel_size=opt.input_encoder_kernel_size,
num_heads=opt.input_encoder_n_heads,
hidden_size=self.hsz,
dropout=self.dropout)
self.str_attn = StructuredAttention(dropout=self.dropout,
scale=opt.scale,
add_void=opt.add_non_visual) # no parameters inside
self.c2q_down_projection = nn.Sequential(
nn.LayerNorm(3 * self.hsz),
nn.Dropout(self.dropout),
nn.Linear(3*self.hsz, self.hsz),
nn.ReLU(True),
)
self.cls_encoder = StackedEncoder(n_blocks=opt.cls_encoder_n_blocks,
n_conv=opt.cls_encoder_n_conv,
kernel_size=opt.cls_encoder_kernel_size,
num_heads=opt.cls_encoder_n_heads,
hidden_size=self.hsz,
dropout=self.dropout)
self.cls_projection_layers = nn.ModuleList(
[
LinearWrapper(in_hsz=self.hsz,
out_hsz=self.hsz,
layer_norm=True,
dropout=self.dropout,
relu=True)
] +
[
ConvLinear(in_hsz=self.hsz,
out_hsz=self.hsz,
kernel_size=3,
layer_norm=True,
dropout=self.dropout,
relu=True)
for _ in range(self.t_iter)])
self.temporal_scoring_st_layers = nn.ModuleList([
LinearWrapper(in_hsz=self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
for _ in range(self.t_iter+1)])
self.temporal_scoring_ed_layers = nn.ModuleList([
LinearWrapper(in_hsz=self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
for _ in range(self.t_iter+1)])
self.temporal_criterion = nn.CrossEntropyLoss(reduction="sum")
self.classifier = LinearWrapper(in_hsz=self.hsz * 2 if self.add_local else self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
def load_word_embedding(self, pretrained_embedding, requires_grad=False):
self.word_embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))
self.word_embedding.weight.requires_grad = requires_grad
def forward(self, batch):
if self.inference_mode:
return self.forward_main(batch)
else:
out, att_loss, att_predictions, temporal_loss, temporal_predictions, other_outputs = self.forward_main(batch)
return out, att_loss, att_predictions, temporal_loss, temporal_predictions
def forward_main(self, batch):
"""
Args:
batch: edict, keys = qas, qas_mask, qa_noun_masks, sub, sub_mask, vcpt, vcpt_mask, vid, vid_mask,
att_labels, att_labels_mask, qid, target, vid_name, ts_label
qas, qas_mask, qa_noun_masks: (N, 5, Lqa)
sub, sub_mask: (N, #imgs, Ls)
vcpt, vcpt_mask: (N, #imgs, #regions)
vid, vid_mask: (N, #imgs, #regions, D), (N, #imgs, #regions)
att_labels, att_labels_mask: A list of N (#imgs, #qa-words, #regions)
qid: list(int)
vid_name: list(str)
target: torch.LongTensor
use_hard_negatives: bool, true to sample hard negatives
q_l: int, length of the tokenized question
anno_st_idx (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation.
ts_label: {"st": (N, ), "ed": (N, )} for 'st_ed'. (N, L) for 'frm'
ts_label_mask: (N, L) for both 'st_ed' and 'frm'
Returns:
"""
self.bsz = len(batch.qid)
bsz = self.bsz
num_a = self.num_a
hsz = self.hsz
a_embed = self.base_encoder(batch.qas_bert.view(bsz*num_a, -1, self.wd_size), # (N*5, L, D)
batch.qas_mask.view(bsz * num_a, -1), # (N*5, L)
self.bert_word_encoding_fc,
self.input_embedding,
self.input_encoder) # (N*5, L, D)
a_embed = a_embed.view(bsz, num_a, 1, -1, hsz) # (N, 5, 1, L, D)
a_mask = batch.qas_mask.view(bsz, num_a, 1, -1) # (N, 5, 1, L)
attended_sub, attended_vid, attended_vid_mask, attended_sub_mask = (None, ) * 4
other_outputs = {} # {"pos_noun_mask": batch.qa_noun_masks} # used to visualization and compute att acc
if self.sub_flag:
num_imgs, num_words = batch.sub_bert.shape[1:3]
sub_embed = self.base_encoder(batch.sub_bert.view(bsz*num_imgs, num_words, -1), # (N*Li, Lw)
batch.sub_mask.view(bsz * num_imgs, num_words), # (N*Li, Lw)
self.bert_word_encoding_fc,
self.input_embedding,
self.input_encoder) # (N*Li, Lw, D)
sub_embed = sub_embed.contiguous().view(bsz, 1, num_imgs, num_words, -1) # (N, Li, Lw, D)
sub_mask = batch.sub_mask.view(bsz, 1, num_imgs, num_words) # (N, 1, Li, Lw)
attended_sub, attended_sub_mask, sub_raw_s, sub_normalized_s = \
self.qa_ctx_attention(a_embed, sub_embed, a_mask, sub_mask,
noun_mask=None,
non_visual_vectors=None)
other_outputs["sub_normalized_s"] = sub_normalized_s
other_outputs["sub_raw_s"] = sub_raw_s
if self.vfeat_flag:
num_imgs, num_regions = batch.vid.shape[1:3]
vid_embed = F.normalize(batch.vid, p=2, dim=-1) # (N, Li, Lr, D)
vid_embed = self.base_encoder(vid_embed.view(bsz*num_imgs, num_regions, -1), # (N*Li, Lw)
batch.vid_mask.view(bsz * num_imgs, num_regions), # (N*Li, Lr)
self.vid_fc,
self.input_embedding,
self.input_encoder) # (N*Li, L, D)
vid_embed = vid_embed.contiguous().view(bsz, 1, num_imgs, num_regions, -1) # (N, 1, Li, Lr, D)
vid_mask = batch.vid_mask.view(bsz, 1, num_imgs, num_regions) # (N, 1, Li, Lr)
attended_vid, attended_vid_mask, vid_raw_s, vid_normalized_s = \
self.qa_ctx_attention(a_embed, vid_embed, a_mask, vid_mask,
noun_mask=None,
non_visual_vectors=None)
other_outputs["vid_normalized_s"] = vid_normalized_s
other_outputs["vid_raw_s"] = vid_raw_s
if self.flag_cnt == 2:
visual_text_embedding = torch.cat([attended_sub,
attended_vid,
attended_sub * attended_vid], dim=-1) # (N, 5, Li, Lqa, 3D)
visual_text_embedding = self.concat_fc(visual_text_embedding) # (N, 5, Li, Lqa, D)
out, target, t_scores = self.classfier_head_multi_proposal(
visual_text_embedding, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
elif self.sub_flag:
out, target, t_scores = self.classfier_head_multi_proposal(
attended_sub, attended_sub_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
elif self.vfeat_flag:
out, target, t_scores = self.classfier_head_multi_proposal(
attended_vid, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
else:
raise NotImplementedError
assert len(out) == len(target)
other_outputs["temporal_scores"] = t_scores # (N, 5, Li) or (N, 5, Li, 2)
if self.inference_mode:
inference_outputs = {
"answer": out, # (N, 5)
"t_scores": F.softmax(t_scores, dim=2),
"att_predictions": self.get_att_prediction(
scores=other_outputs["vid_raw_s"],
object_vocab=batch.eval_object_word_ids,
words=batch.qas,
vid_names=batch.vid_name,
qids=batch.qid,
img_indices=batch.image_indices,
boxes=batch.boxes,
start_indices=batch.anno_st_idx,
) if self.vfeat_flag else None,
}
return inference_outputs
att_loss = 0
att_predictions = None
# if (self.use_sup_att or not self.training) and self.vfeat_flag:
if self.use_sup_att and self.training and self.vfeat_flag:
start_indices = batch.anno_st_idx
try:
cur_att_loss, cur_att_predictions = \
self.get_att_loss(other_outputs["vid_raw_s"], batch.att_labels, batch.target, batch.qas,
qids=batch.qid,
q_lens=batch.q_l,
vid_names=batch.vid_name,
img_indices=batch.image_indices,
boxes=batch.boxes,
start_indices=start_indices,
num_negatives=self.num_negatives,
use_hard_negatives=batch.use_hard_negatives,
drop_topk=self.drop_topk)
except AssertionError as e:
save_pickle(
{"batch": batch, "start_indices": start_indices, "vid_raw_s": other_outputs["vid_raw_s"]},
"err_dict.pickle"
)
import sys
sys.exit(1)
att_loss += cur_att_loss
att_predictions = cur_att_predictions
temporal_loss = self.get_ts_loss(temporal_scores=t_scores,
ts_labels=batch.ts_label,
answer_indices=batch.target)
if self.training:
return [out, target], att_loss, att_predictions, temporal_loss, t_scores, other_outputs
else:
return out, att_loss, att_predictions, temporal_loss, F.softmax(t_scores, dim=2), other_outputs
@classmethod
def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):
""" Raw data --> higher-level embedding
Args:
data: (N, L) for text, (N, L, D) for video
data_mask: (N, L)
init_encoder: word_embedding layer for text, MLP (downsize) for video
downsize_encoder: MLP, down project to hsz
input_encoder: multiple layer of encoder block, with residual connection, CNN, layernorm, etc
Returns:
encoded_data: (N, L, D)
"""
data = downsize_encoder(init_encoder(data))
return input_encoder(data, data_mask)
def qa_ctx_attention(self, qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask, non_visual_vectors):
""" Align image regions with QA words
Args:
qa_embed: (N, 5, 1, Lqa, D)
qa_mask: (N, 5, 1, Lqa)
ctx_embed: (N, 1, Li, Lr, D)
ctx_mask: (N, 1, Li, Lr)
noun_mask: (N, 5, Lqa)
non_visual_vectors: (m, D), m is a tunable parameter
Returns:
"""
num_img, num_region = ctx_mask.shape[2:]
u_a, raw_s, s_mask, s_normalized = self.str_attn(
qa_embed, ctx_embed, qa_mask, ctx_mask,
noun_mask=noun_mask, void_vector=non_visual_vectors) # (N, 5, Li, Lqa, D), (N, 5, Li, Lqa, lr) x2
qa_embed = qa_embed.repeat(1, 1, num_img, 1, 1)
mixed = torch.cat([qa_embed,
u_a,
qa_embed*u_a], dim=-1) # (N, 5, Li, Lqa, D)
mixed = self.c2q_down_projection(mixed) # (N, 5, Li, Lqa, D)
mixed_mask = (s_mask.sum(-1) != 0).float() # (N, 5, Li, Lqa)
return mixed, mixed_mask, raw_s, s_normalized
def get_proposals(self, max_statement, max_statement_mask, temporal_scores,
targets, ts_labels, max_num_proposal=1, iou_thd=0.5, ce_prob_thd=0.01,
extra_span_length=3):
"""
Args:
max_statement: (N, 5, Li, D)
max_statement_mask: (N, 5, Li, 1)
temporal_scores: (N, 5, Li, 2)
targets: (N, )
ts_labels: (N, Li) for frm or N * (st, ed) for st_ed
max_num_proposal:
iou_thd:
ce_prob_thd:
extra_span_length:
Returns:
"""
bsz, num_a, num_img, _ = max_statement_mask.shape
if self.training:
ca_temporal_scores_st_ed = \
temporal_scores[torch.arange(bsz, dtype=torch.long), targets].data # (N, Li, 2)
ca_temporal_scores_st_ed = F.softmax(ca_temporal_scores_st_ed, dim=1) # (N, Li, 2)
ca_pred_spans = find_max_triples(ca_temporal_scores_st_ed[:, :, 0],
ca_temporal_scores_st_ed[:, :, 1],
topN=max_num_proposal,
prob_thd=ce_prob_thd) # N * [(st_idx, ed_idx, confidence), ...]
# +1 for ed index before forward into get_high_iou_spans func.
ca_pred_spans = [[[sub_e[0], sub_e[1] + 1, sub_e[2]] for sub_e in e] for e in ca_pred_spans]
spans = get_high_iou_sapns(zip(ts_labels["st"].tolist(), (ts_labels["ed"] + 1).tolist()),
ca_pred_spans, iou_thd=iou_thd, add_gt=True) # N * [(st, ed), ...]
local_max_max_statement_list = [] # N_new * (5, D)
global_max_max_statement_list = [] # N_new * (5, D)
span_targets = [] # N_new * (1,)
for idx, (t, span_sublist) in enumerate(zip(targets, spans)):
span_targets.extend([t] * len(span_sublist))
cur_global_max_max_statement = \
torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 1)[0]
global_max_max_statement_list.extend([cur_global_max_max_statement] * len(span_sublist))
for span in span_sublist:
span = expand_span(span, expand_length=extra_span_length)
cur_span_max_statement = mask_logits(
max_statement[idx, :, span[0]:span[1]],
max_statement_mask[idx, :, span[0]:span[1]]) # (5, Li[st:ed], D)
local_max_max_statement_list.append(torch.max(cur_span_max_statement, 1)[0]) # (5, D)
local_max_max_statement = torch.stack(local_max_max_statement_list) # (N_new, 5, D)
global_max_max_statement = torch.stack(global_max_max_statement_list) # (N_new, 5, D)
max_max_statement = torch.cat([
local_max_max_statement,
global_max_max_statement], dim=-1) # (N_new, 5, 2D)
return max_max_statement, targets.new_tensor(span_targets) # (N_new, 5, 2D), (N_new, )
else: # testing
temporal_scores_st_ed = F.softmax(temporal_scores, dim=2) # (N, 5, Li, 2)
temporal_scores_st_ed_reshaped = temporal_scores_st_ed.view(bsz * num_a, -1, 2) # (N*5, Li, 2)
pred_spans = find_max_triples(temporal_scores_st_ed_reshaped[:, :, 0],
temporal_scores_st_ed_reshaped[:, :, 1],
topN=1, prob_thd=None) # (N*5) * [(st, ed, confidence), ]
pred_spans = flat_list_of_lists(pred_spans) # (N*5) * (st, ed, confidence)
pred_spans = torch.FloatTensor(pred_spans).to(temporal_scores_st_ed_reshaped.device) # (N*5, 3)
pred_spans, pred_scores = pred_spans[:, :2].long(), pred_spans[:, 2] # (N*5, 2), (N*5, )
pred_spans = [[e[0], e[1] + 1] for e in pred_spans]
max_statement = max_statement.view(bsz * num_a, num_img, -1) # (N*5, Li, D)
max_statement_mask = max_statement_mask.view(bsz * num_a, num_img, -1) # (N*5, Li, 1)
local_max_max_statement_list = [] # N*5 * (D, )
global_max_max_statement_list = [] # N*5 * (D, )
for idx, span in enumerate(pred_spans):
span = expand_span(span, expand_length=extra_span_length)
cur_global_max_max_statement = \
torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 0)[0]
global_max_max_statement_list.append(cur_global_max_max_statement)
cur_span_max_statement = mask_logits(
max_statement[idx, span[0]:span[1]],
max_statement_mask[idx, span[0]:span[1]]) # (Li[st:ed], D), words for span[0] == span[1]
local_max_max_statement_list.append(torch.max(cur_span_max_statement, 0)[0]) # (D, )
local_max_max_statement = torch.stack(local_max_max_statement_list) # (N*5, D)
global_max_max_statement = torch.stack(global_max_max_statement_list) # (N*5, D)
max_max_statement = torch.cat([
local_max_max_statement,
global_max_max_statement], dim=-1) # (N_new, 5, 2D)
return max_max_statement.view(bsz, num_a, -1), targets # (N, 5, 2D), (N, )
def residual_temporal_predictor(self, layer_idx, input_tensor):
"""
Args:
layer_idx (int):
input_tensor: (N, L, D)
Returns:
temporal_score
"""
input_tensor = input_tensor + self.cls_projection_layers[layer_idx](input_tensor) # (N, L, D)
t_score_st = self.temporal_scoring_st_layers[layer_idx](input_tensor) # (N, L, 1)
t_score_ed = self.temporal_scoring_ed_layers[layer_idx](input_tensor) # (N, L, 1)
t_score = torch.cat([t_score_st, t_score_ed], dim=2) # (N, L, 2)
return input_tensor, t_score
def classfier_head_multi_proposal(self, statement, statement_mask, targets, ts_labels, ts_labels_mask,
max_num_proposal=1, ce_prob_thd=0.01, iou_thd=0.5, extra_span_length=3):
"""Predict the probabilities of each statements being true. Statements = QA + Context.
Args:
statement: (N, 5, Li, Lqa, D)
statement_mask: (N, 5, Li, Lqa)
targets: (N, )
ts_labels: (N, Li) for frm or N * (st, ed) for st_ed
ts_labels_mask: (N, Li)
max_num_proposal (int):
ce_prob_thd (float): threshold for p1*p2 (st, ed)
iou_thd (float): threshold for temporal iou
extra_span_length (int): expand the localized span to give a little bit extra context
Returns:
"""
bsz, num_a, num_img, num_words = statement_mask.shape
statement = statement.view(bsz*num_a*num_img, num_words, -1) # (N*5*Li, Lqa, D)
statement_mask = statement_mask.view(bsz*num_a*num_img, num_words) # (N*5*Li, Lqa)
statement = self.cls_encoder(statement, statement_mask) # (N*5*Li, Lqa, D)
max_statement = torch.max(mask_logits(statement, statement_mask.unsqueeze(2)), 1)[0] # (N*5*Li, D)
max_statement_mask = (statement_mask.sum(1) != 0).float().view(bsz, num_a, num_img, 1) # (N, 5, Li, 1)
max_statement = max_statement.view(bsz*num_a, num_img, -1) # (N, 5, Li, D)
t_score_container = []
encoded_max_statement_container = []
encoded_max_statement = max_statement # (N*5, Li, D)
for layer_idx in range(self.t_iter+1):
encoded_max_statement, prev_t_score = \
self.residual_temporal_predictor(layer_idx, encoded_max_statement)
t_score_container.append(prev_t_score.view(bsz, num_a, num_img, 2)) # (N, 5, Li, 2)
encoded_max_statement_container.append(encoded_max_statement) # (N*5, Li, D)
if self.t_iter > 0:
temporal_scores_st_ed = 0.5 * (t_score_container[0] + torch.stack(t_score_container[:1]).mean(0))
else:
temporal_scores_st_ed = t_score_container[0] # (N, 5, Li, 2)
# mask before softmax
temporal_scores_st_ed = mask_logits(temporal_scores_st_ed, ts_labels_mask.view(bsz, 1, num_img, 1))
# when predict answer, only consider 1st level representation !!!
# since the others are all generated from the 1st level
stacked_max_statement = encoded_max_statement_container[0].view(bsz, num_a, num_img, -1) # (N, 5, Li, D)
if self.add_local:
max_max_statement, targets = self.get_proposals(
stacked_max_statement, max_statement_mask, temporal_scores_st_ed,
targets, ts_labels, max_num_proposal=max_num_proposal, iou_thd=iou_thd,
ce_prob_thd=ce_prob_thd, extra_span_length=extra_span_length) # (N, 5, D)
else:
max_max_statement = \
torch.max(mask_logits(stacked_max_statement, max_statement_mask), 2)[0] # (N, 5, D)
# targets = targets
answer_scores = self.classifier(max_max_statement).squeeze(2) # (N, 5)
return answer_scores, targets, temporal_scores_st_ed # (N_new, 5), (N_new, ) (N, 5, Li, 2)
def get_ts_loss(self, temporal_scores, ts_labels, answer_indices):
"""
Args:
temporal_scores: (N, 5, Li, 2)
ts_labels: dict(st=(N, ), ed=(N, ))
answer_indices: (N, )
Returns:
"""
bsz = len(answer_indices)
# compute loss
ca_temporal_scores_st_ed = \
temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices] # (N, Li, 2)
loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels["st"])
loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels["ed"])
return (loss_st + loss_ed) / 2.
@classmethod
def sample_negatives(cls, pred_score, pos_indices, neg_indices, num_negatives=2,
use_hard_negatives=False, negative_pool_size=0, num_hard=2, drop_topk=0):
""" Sample negatives from a set of indices. Several sampling strategies are supported:
1, random; 2, hard negatives; 3, drop_topk hard negatives; 4, mix easy and hard negatives
5, sampling within a pool of hard negatives; 6, sample across images of the same video.
Args:
pred_score: (num_img, num_words, num_region)
pos_indices: (N_pos, 3) all positive region indices for the same word, not necessaryily the same image.
neg_indices: (N_neg, 3) ...
num_negatives (int):
use_hard_negatives (bool):
negative_pool_size (int):
num_hard (int):
drop_topk (int):
Returns:
"""
num_unique_pos = len(pos_indices)
sampled_pos_indices = torch.cat([pos_indices] * num_negatives, dim=0)
if use_hard_negatives:
# print("using use_hard_negatives")
neg_scores = pred_score[neg_indices[:, 0], neg_indices[:, 1], neg_indices[:, 2]] # TODO
max_indices = torch.sort(neg_scores, descending=True)[1].tolist()
if negative_pool_size > num_negatives: # sample from a pool of hard negatives
hard_pool = max_indices[drop_topk:drop_topk + negative_pool_size]
hard_pool_indices = neg_indices[hard_pool]
num_hard_negs = num_negatives
sampled_easy_neg_indices = []
if num_hard < num_negatives:
easy_pool = max_indices[drop_topk + negative_pool_size:]
easy_pool_indices = neg_indices[easy_pool]
num_hard_negs = num_hard
num_easy_negs = num_negatives - num_hard_negs
sampled_easy_neg_indices = easy_pool_indices[
torch.randint(low=0, high=len(easy_pool_indices),
size=(num_easy_negs * num_unique_pos, ), dtype=torch.long)
]
sampled_hard_neg_indices = hard_pool_indices[
torch.randint(low=0, high=len(hard_pool_indices),
size=(num_hard_negs * num_unique_pos, ), dtype=torch.long)
]
if len(sampled_easy_neg_indices) != 0:
sampled_neg_indices = torch.cat([sampled_hard_neg_indices, sampled_easy_neg_indices], dim=0)
else:
sampled_neg_indices = sampled_hard_neg_indices
else: # directly take the top negatives
sampled_neg_indices = neg_indices[max_indices[drop_topk:drop_topk+len(sampled_pos_indices)]]
else:
sampled_neg_indices = neg_indices[
torch.randint(low=0, high=len(neg_indices), size=(len(sampled_pos_indices),), dtype=torch.long)
]
return sampled_pos_indices, sampled_neg_indices
def get_att_loss(self, scores, att_labels, target, words, vid_names, qids, q_lens, img_indices, boxes,
start_indices, num_negatives=2, use_hard_negatives=False, drop_topk=0):
""" compute ranking loss, use for loop to find the indices,
use advanced indexing to perform the real calculation
Build a list contains a quaduple
Args:
scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]
att_labels: list(tensor), each has dimension (#num_imgs, #num_words, #regions), not batched
target: 1D tensor (N, )
words: LongTensor (N, 5, Lqa)
vid_names: list(str) (N,)
qids: list(int), (N, )
q_lens: list(int), (N, )
img_indices: list(list(int)), (N, Li), or None
boxes: list(list(box)) of length N, each sublist represent an image,
each box contains the coordinates of xyxy, or None
num_negatives: number of negatives for each positive region
use_hard_negatives: use hard negatives, uselect negatives with high scores
drop_topk: drop topk highest negatives (since the top negatives might be correct, they are just not labeled)
start_indices (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation. If with_ts, set to zero
Returns:
att_loss: loss value for the batch
att_predictions: (list) [{"gt": gt_scores, "pred": pred_scores}, ], used to calculate att. accuracy
"""
pos_container = [] # contains tuples of 5 elements, which are (batch_i, ca_i, img_i, word_i, region_i)
neg_container = []
for batch_idx in range(len(target)): # batch
ca_idx = target[batch_idx].cpu().item()
gt_score = att_labels[batch_idx] # num_img * (num_words, num_region)
start_idx = start_indices[batch_idx] # int
num_img = len(gt_score)
sen_l, _ = gt_score[0].shape
pred_score = scores[batch_idx, ca_idx, :num_img, :sen_l] # (num_img, num_words, num_region)
# find positive and negative indices
batch_pos_indices = []
batch_neg_indices = []
for img_idx, img_gt_score in enumerate(gt_score):
img_idx = start_idx + img_idx
img_pos_indices = torch.nonzero(img_gt_score) # (N_pos, 2) ==> (#words, #regions)
if len(img_pos_indices) == 0: # skip if no positive indices
continue
img_pos_indices = torch.cat([img_pos_indices.new_full([len(img_pos_indices), 1], img_idx),
img_pos_indices], dim=1) # (N_pos, 3) ==> (#img, #words, #regions)
img_neg_indices = torch.nonzero(img_gt_score == 0) # (N_neg, 2)
img_neg_indices = torch.cat([img_neg_indices.new_full([len(img_neg_indices), 1], img_idx),
img_neg_indices], dim=1) # (N_neg, 3)
batch_pos_indices.append(img_pos_indices)
batch_neg_indices.append(img_neg_indices)
if len(batch_pos_indices) == 0: # skip if empty ==> no gt label for the video
continue
batch_pos_indices = torch.cat(batch_pos_indices, dim=0) # (N_pos, 3) -->
batch_neg_indices = torch.cat(batch_neg_indices, dim=0) # (N_neg, 3)
# sample positives and negatives
available_img_indices = batch_pos_indices[:, 0].unique().tolist()
for img_idx in available_img_indices:
# pos_indices for a certrain img
img_idx_pos_indices = batch_pos_indices[batch_pos_indices[:, 0] == img_idx]
img_idx_neg_indices = batch_neg_indices[batch_neg_indices[:, 0] == img_idx]
available_word_indices = img_idx_pos_indices[:, 1].unique().tolist()
for word_idx in available_word_indices:
# positives and negatives for a given image-word pair, specified by img_idx-word_idx
img_idx_word_idx_pos_indices = img_idx_pos_indices[img_idx_pos_indices[:, 1] == word_idx]
img_idx_word_idx_neg_indices = img_idx_neg_indices[img_idx_neg_indices[:, 1] == word_idx]
# actually all the positives, not sampled pos
sampled_pos_indices, sampled_neg_indices = \
self.sample_negatives(pred_score,
img_idx_word_idx_pos_indices, img_idx_word_idx_neg_indices,
num_negatives=num_negatives, use_hard_negatives=use_hard_negatives,
negative_pool_size=self.negative_pool_size,
num_hard=self.num_hard, drop_topk=drop_topk)
base_indices = torch.LongTensor([[batch_idx, ca_idx]] * len(sampled_pos_indices)).\
to(sampled_pos_indices.device)
pos_container.append(torch.cat([base_indices, sampled_pos_indices], dim=1))
neg_container.append(torch.cat([base_indices, sampled_neg_indices], dim=1))
pos_container = torch.cat(pos_container, dim=0)
neg_container = torch.cat(neg_container, dim=0)
# contain all the predictions and gt labels in this batch, only consider the ones with gt labels
# also only consider the positive answer.
att_predictions = None
if not self.training and self.vfeat_flag:
att_predictions = dict(det_q=[],
det_ca=[])
unique_pos_container = np.unique(pos_container.cpu().numpy(), axis=0) # unique rows in the array
for row in unique_pos_container:
batch_idx, ca_idx, img_idx, word_idx, region_idx = row
start_idx = start_indices[batch_idx] # int
cur_q_len = q_lens[batch_idx]
num_region = att_labels[batch_idx][img_idx-start_idx].shape[1] # num_img * (num_words, num_region)
if len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()) != \
len(boxes[batch_idx][img_idx-start_idx]):
print("scores[batch_idx, ca_idx, img_idx, word_idx].data.cpu()",
len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()))
print("len(boxes[batch_idx][img_idx-start_idx])", len(boxes[batch_idx][img_idx-start_idx]))
print("boxes, batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx",
batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx)
print(row)
raise AssertionError
cur_det_data = {
"pred": scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu(),
"word": words[batch_idx, ca_idx, word_idx],
"qid": qids[batch_idx],
"vid_name": vid_names[batch_idx],
"img_idx": img_indices[batch_idx][img_idx], # full indices
"boxes": boxes[batch_idx][img_idx-start_idx] # located boxes
}
if word_idx < cur_q_len:
att_predictions["det_q"].append(cur_det_data)
else:
att_predictions["det_ca"].append(cur_det_data)
pos_scores = scores[pos_container[:, 0], pos_container[:, 1], pos_container[:, 2],
pos_container[:, 3], pos_container[:, 4]]
neg_scores = scores[neg_container[:, 0], neg_container[:, 1], neg_container[:, 2],
neg_container[:, 3], neg_container[:, 4]]
if self.att_loss_type == "hinge":
# max(0, m + S_pos - S_neg)
att_loss = torch.clamp(self.margin + neg_scores - pos_scores, min=0).sum()
elif self.att_loss_type == "lse":
# log[1 + exp(scale * (S_pos - S_neg))]
att_loss = torch.log1p(torch.exp(self.alpha * (neg_scores - pos_scores))).sum()
else:
raise NotImplementedError("Only support hinge and lse")
return att_loss, att_predictions
def get_att_prediction(self, scores, object_vocab, words, vid_names, qids, img_indices, boxes,
start_indices, score_thd=0.2):
""" compute ranking loss, use for loop to find the indices,
use advanced indexing to perform the real calculation
Build a list contains a quaduple
Args:
scores: cosine similarity scores (N, 5, Li, Lqa, Lr), in the range [-1, 1]
object_vocab: list, object word ids in the vocabulary
words: LongTensor (N, 5, Lqa)
vid_names: list(str) (N,)
qids: list(int), (N, )
img_indices: list(list(int)), (N, Li), or None
boxes: list(list(box)) of length N, each sublist represent an image,
each box contains the coordinates of xyxy, or None
start_indices (list of int): each element is an index (at 0.5fps) of the first image
with spatial annotation. If with_ts, set to zero
score_thd: only keep boxes with score higher than this value
Returns:
att_loss: loss value for the batch
att_predictions: (list) [{"gt": gt_scores, "pred": pred_scores}, ], used to calculate att. accuracy
"""
# contain all the predictions and gt labels in this batch, only consider the ones with gt labels
# also only consider the positive answer.
att_predictions = None
if self.vfeat_flag:
att_predictions = []
for batch_idx in range(len(scores)):
start_idx = start_indices[batch_idx] # int
q_att_predictions = dict() # predictions associated with this question
for ans_idx in range(5):
q_att_predictions[ans_idx] = []
for img_idx_local in range(len(boxes[batch_idx])):
# img_idx_local: for the imgs with box anno
# img_idx_global: for all the imgs, including ones without box anno
img_idx_global = img_idx_local + start_idx
cur_img_scores = scores[batch_idx, ans_idx, img_idx_global] # (Lqa, Lr)
cur_words = words[batch_idx, ans_idx].tolist() # (Lqa, )
cur_img_boxes = boxes[batch_idx][img_idx_local]
for word_idx, w in enumerate(cur_words):
if w in object_vocab:
cur_word_region_scores = cur_img_scores[word_idx].data.cpu().numpy() # (Lr, )
accepted_region_ids = np.nonzero(cur_word_region_scores >= score_thd)[0].tolist()
accepted_region_scores = [float(cur_word_region_scores[i]) for i in accepted_region_ids]
accepted_region_boxes = [cur_img_boxes[i] for i in accepted_region_ids]
sorted_indices = np.argsort(accepted_region_scores)
accepted_region_scores = [accepted_region_scores[i] for i in sorted_indices]
accepted_region_boxes = [accepted_region_boxes[i] for i in sorted_indices]
cur_det_data = {
"pred": accepted_region_scores,
"bbox": accepted_region_boxes,
"word": int(words[batch_idx, ans_idx, word_idx]),
"qid": int(qids[batch_idx]),
"vid_name": vid_names[batch_idx],
"img_idx": img_indices[batch_idx][img_idx_global], # image file name id
}
q_att_predictions[ans_idx].append(cur_det_data)
att_predictions.append(q_att_predictions)
return att_predictions
| 52.92804
| 121
| 0.560267
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pprint
from collections import defaultdict
from .context_query_attention import StructuredAttention
from .encoder import StackedEncoder
from .cnn import DepthwiseSeparableConv
from .model_utils import save_pickle, mask_logits, flat_list_of_lists, \
find_max_triples, get_high_iou_sapns, expand_span
class LinearWrapper(nn.Module):
def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):
super(LinearWrapper, self).__init__()
self.relu = relu
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [
nn.Dropout(dropout),
nn.Linear(in_hsz, out_hsz)
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.relu:
return F.relu(self.conv(x), inplace=True) else:
return self.conv(x)
class ConvLinear(nn.Module):
def __init__(self, in_hsz, out_hsz, kernel_size=3, layer_norm=True, dropout=0.1, relu=True):
super(ConvLinear, self).__init__()
layers = [nn.LayerNorm(in_hsz)] if layer_norm else []
layers += [
nn.Dropout(dropout),
DepthwiseSeparableConv(in_ch=in_hsz,
out_ch=out_hsz,
k=kernel_size,
dim=1,
relu=relu)
]
self.conv = nn.Sequential(*layers)
def forward(self, x):
return self.conv(x)
class STAGE(nn.Module):
def __init__(self, opt):
super(STAGE, self).__init__()
self.opt = opt
self.inference_mode = False
self.sub_flag = opt.sub_flag
self.vfeat_flag = opt.vfeat_flag
self.vfeat_size = opt.vfeat_size
self.t_iter = opt.t_iter
self.extra_span_length = opt.extra_span_length
self.add_local = opt.add_local
self.use_sup_att = opt.use_sup_att
self.num_negatives = opt.num_negatives
self.negative_pool_size = opt.negative_pool_size
self.num_hard = opt.num_hard
self.drop_topk = opt.drop_topk
self.margin = opt.margin
self.att_loss_type = opt.att_loss_type
self.scale = opt.scale
self.alpha = opt.alpha
self.dropout = opt.dropout
self.hsz = opt.hsz
self.bsz = None
self.num_seg = None
self.num_a = 5
self.flag_cnt = self.sub_flag + self.vfeat_flag
self.wd_size = opt.embedding_size
self.bridge_hsz = 300
self.bert_word_encoding_fc = nn.Sequential(
nn.LayerNorm(self.wd_size),
nn.Dropout(self.dropout),
nn.Linear(self.wd_size, self.bridge_hsz),
nn.ReLU(True),
nn.LayerNorm(self.bridge_hsz),
)
if self.sub_flag:
print("Activate sub branch")
if self.vfeat_flag:
print("Activate vid branch")
self.vid_fc = nn.Sequential(
nn.LayerNorm(self.vfeat_size),
nn.Dropout(self.dropout),
nn.Linear(self.vfeat_size, self.bridge_hsz),
nn.ReLU(True),
nn.LayerNorm(self.bridge_hsz)
)
if self.flag_cnt == 2:
self.concat_fc = nn.Sequential(
nn.LayerNorm(3 * self.hsz),
nn.Dropout(self.dropout),
nn.Linear(3 * self.hsz, self.hsz),
nn.ReLU(True),
nn.LayerNorm(self.hsz),
)
self.input_embedding = nn.Sequential(
nn.Dropout(self.dropout),
nn.Linear(self.bridge_hsz, self.hsz),
nn.ReLU(True),
nn.LayerNorm(self.hsz),
)
self.input_encoder = StackedEncoder(n_blocks=opt.input_encoder_n_blocks,
n_conv=opt.input_encoder_n_conv,
kernel_size=opt.input_encoder_kernel_size,
num_heads=opt.input_encoder_n_heads,
hidden_size=self.hsz,
dropout=self.dropout)
self.str_attn = StructuredAttention(dropout=self.dropout,
scale=opt.scale,
add_void=opt.add_non_visual)
self.c2q_down_projection = nn.Sequential(
nn.LayerNorm(3 * self.hsz),
nn.Dropout(self.dropout),
nn.Linear(3*self.hsz, self.hsz),
nn.ReLU(True),
)
self.cls_encoder = StackedEncoder(n_blocks=opt.cls_encoder_n_blocks,
n_conv=opt.cls_encoder_n_conv,
kernel_size=opt.cls_encoder_kernel_size,
num_heads=opt.cls_encoder_n_heads,
hidden_size=self.hsz,
dropout=self.dropout)
self.cls_projection_layers = nn.ModuleList(
[
LinearWrapper(in_hsz=self.hsz,
out_hsz=self.hsz,
layer_norm=True,
dropout=self.dropout,
relu=True)
] +
[
ConvLinear(in_hsz=self.hsz,
out_hsz=self.hsz,
kernel_size=3,
layer_norm=True,
dropout=self.dropout,
relu=True)
for _ in range(self.t_iter)])
self.temporal_scoring_st_layers = nn.ModuleList([
LinearWrapper(in_hsz=self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
for _ in range(self.t_iter+1)])
self.temporal_scoring_ed_layers = nn.ModuleList([
LinearWrapper(in_hsz=self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
for _ in range(self.t_iter+1)])
self.temporal_criterion = nn.CrossEntropyLoss(reduction="sum")
self.classifier = LinearWrapper(in_hsz=self.hsz * 2 if self.add_local else self.hsz,
out_hsz=1,
layer_norm=True,
dropout=self.dropout,
relu=False)
def load_word_embedding(self, pretrained_embedding, requires_grad=False):
self.word_embedding.weight.data.copy_(torch.from_numpy(pretrained_embedding))
self.word_embedding.weight.requires_grad = requires_grad
def forward(self, batch):
if self.inference_mode:
return self.forward_main(batch)
else:
out, att_loss, att_predictions, temporal_loss, temporal_predictions, other_outputs = self.forward_main(batch)
return out, att_loss, att_predictions, temporal_loss, temporal_predictions
def forward_main(self, batch):
self.bsz = len(batch.qid)
bsz = self.bsz
num_a = self.num_a
hsz = self.hsz
a_embed = self.base_encoder(batch.qas_bert.view(bsz*num_a, -1, self.wd_size), batch.qas_mask.view(bsz * num_a, -1), self.bert_word_encoding_fc,
self.input_embedding,
self.input_encoder) a_embed = a_embed.view(bsz, num_a, 1, -1, hsz) a_mask = batch.qas_mask.view(bsz, num_a, 1, -1)
attended_sub, attended_vid, attended_vid_mask, attended_sub_mask = (None, ) * 4
other_outputs = {} if self.sub_flag:
num_imgs, num_words = batch.sub_bert.shape[1:3]
sub_embed = self.base_encoder(batch.sub_bert.view(bsz*num_imgs, num_words, -1), batch.sub_mask.view(bsz * num_imgs, num_words), self.bert_word_encoding_fc,
self.input_embedding,
self.input_encoder)
sub_embed = sub_embed.contiguous().view(bsz, 1, num_imgs, num_words, -1) sub_mask = batch.sub_mask.view(bsz, 1, num_imgs, num_words)
attended_sub, attended_sub_mask, sub_raw_s, sub_normalized_s = \
self.qa_ctx_attention(a_embed, sub_embed, a_mask, sub_mask,
noun_mask=None,
non_visual_vectors=None)
other_outputs["sub_normalized_s"] = sub_normalized_s
other_outputs["sub_raw_s"] = sub_raw_s
if self.vfeat_flag:
num_imgs, num_regions = batch.vid.shape[1:3]
vid_embed = F.normalize(batch.vid, p=2, dim=-1)
vid_embed = self.base_encoder(vid_embed.view(bsz*num_imgs, num_regions, -1), batch.vid_mask.view(bsz * num_imgs, num_regions), self.vid_fc,
self.input_embedding,
self.input_encoder)
vid_embed = vid_embed.contiguous().view(bsz, 1, num_imgs, num_regions, -1) vid_mask = batch.vid_mask.view(bsz, 1, num_imgs, num_regions)
attended_vid, attended_vid_mask, vid_raw_s, vid_normalized_s = \
self.qa_ctx_attention(a_embed, vid_embed, a_mask, vid_mask,
noun_mask=None,
non_visual_vectors=None)
other_outputs["vid_normalized_s"] = vid_normalized_s
other_outputs["vid_raw_s"] = vid_raw_s
if self.flag_cnt == 2:
visual_text_embedding = torch.cat([attended_sub,
attended_vid,
attended_sub * attended_vid], dim=-1) visual_text_embedding = self.concat_fc(visual_text_embedding) out, target, t_scores = self.classfier_head_multi_proposal(
visual_text_embedding, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
elif self.sub_flag:
out, target, t_scores = self.classfier_head_multi_proposal(
attended_sub, attended_sub_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
elif self.vfeat_flag:
out, target, t_scores = self.classfier_head_multi_proposal(
attended_vid, attended_vid_mask, batch.target, batch.ts_label, batch.ts_label_mask,
extra_span_length=self.extra_span_length)
else:
raise NotImplementedError
assert len(out) == len(target)
other_outputs["temporal_scores"] = t_scores
if self.inference_mode:
inference_outputs = {
"answer": out, "t_scores": F.softmax(t_scores, dim=2),
"att_predictions": self.get_att_prediction(
scores=other_outputs["vid_raw_s"],
object_vocab=batch.eval_object_word_ids,
words=batch.qas,
vid_names=batch.vid_name,
qids=batch.qid,
img_indices=batch.image_indices,
boxes=batch.boxes,
start_indices=batch.anno_st_idx,
) if self.vfeat_flag else None,
}
return inference_outputs
att_loss = 0
att_predictions = None
if self.use_sup_att and self.training and self.vfeat_flag:
start_indices = batch.anno_st_idx
try:
cur_att_loss, cur_att_predictions = \
self.get_att_loss(other_outputs["vid_raw_s"], batch.att_labels, batch.target, batch.qas,
qids=batch.qid,
q_lens=batch.q_l,
vid_names=batch.vid_name,
img_indices=batch.image_indices,
boxes=batch.boxes,
start_indices=start_indices,
num_negatives=self.num_negatives,
use_hard_negatives=batch.use_hard_negatives,
drop_topk=self.drop_topk)
except AssertionError as e:
save_pickle(
{"batch": batch, "start_indices": start_indices, "vid_raw_s": other_outputs["vid_raw_s"]},
"err_dict.pickle"
)
import sys
sys.exit(1)
att_loss += cur_att_loss
att_predictions = cur_att_predictions
temporal_loss = self.get_ts_loss(temporal_scores=t_scores,
ts_labels=batch.ts_label,
answer_indices=batch.target)
if self.training:
return [out, target], att_loss, att_predictions, temporal_loss, t_scores, other_outputs
else:
return out, att_loss, att_predictions, temporal_loss, F.softmax(t_scores, dim=2), other_outputs
@classmethod
def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):
data = downsize_encoder(init_encoder(data))
return input_encoder(data, data_mask)
def qa_ctx_attention(self, qa_embed, ctx_embed, qa_mask, ctx_mask, noun_mask, non_visual_vectors):
num_img, num_region = ctx_mask.shape[2:]
u_a, raw_s, s_mask, s_normalized = self.str_attn(
qa_embed, ctx_embed, qa_mask, ctx_mask,
noun_mask=noun_mask, void_vector=non_visual_vectors) qa_embed = qa_embed.repeat(1, 1, num_img, 1, 1)
mixed = torch.cat([qa_embed,
u_a,
qa_embed*u_a], dim=-1) mixed = self.c2q_down_projection(mixed) mixed_mask = (s_mask.sum(-1) != 0).float() return mixed, mixed_mask, raw_s, s_normalized
def get_proposals(self, max_statement, max_statement_mask, temporal_scores,
targets, ts_labels, max_num_proposal=1, iou_thd=0.5, ce_prob_thd=0.01,
extra_span_length=3):
bsz, num_a, num_img, _ = max_statement_mask.shape
if self.training:
ca_temporal_scores_st_ed = \
temporal_scores[torch.arange(bsz, dtype=torch.long), targets].data ca_temporal_scores_st_ed = F.softmax(ca_temporal_scores_st_ed, dim=1) ca_pred_spans = find_max_triples(ca_temporal_scores_st_ed[:, :, 0],
ca_temporal_scores_st_ed[:, :, 1],
topN=max_num_proposal,
prob_thd=ce_prob_thd) ca_pred_spans = [[[sub_e[0], sub_e[1] + 1, sub_e[2]] for sub_e in e] for e in ca_pred_spans]
spans = get_high_iou_sapns(zip(ts_labels["st"].tolist(), (ts_labels["ed"] + 1).tolist()),
ca_pred_spans, iou_thd=iou_thd, add_gt=True) local_max_max_statement_list = [] global_max_max_statement_list = [] span_targets = [] for idx, (t, span_sublist) in enumerate(zip(targets, spans)):
span_targets.extend([t] * len(span_sublist))
cur_global_max_max_statement = \
torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 1)[0]
global_max_max_statement_list.extend([cur_global_max_max_statement] * len(span_sublist))
for span in span_sublist:
span = expand_span(span, expand_length=extra_span_length)
cur_span_max_statement = mask_logits(
max_statement[idx, :, span[0]:span[1]],
max_statement_mask[idx, :, span[0]:span[1]]) local_max_max_statement_list.append(torch.max(cur_span_max_statement, 1)[0]) local_max_max_statement = torch.stack(local_max_max_statement_list) global_max_max_statement = torch.stack(global_max_max_statement_list) max_max_statement = torch.cat([
local_max_max_statement,
global_max_max_statement], dim=-1) return max_max_statement, targets.new_tensor(span_targets) else: temporal_scores_st_ed = F.softmax(temporal_scores, dim=2) temporal_scores_st_ed_reshaped = temporal_scores_st_ed.view(bsz * num_a, -1, 2) pred_spans = find_max_triples(temporal_scores_st_ed_reshaped[:, :, 0],
temporal_scores_st_ed_reshaped[:, :, 1],
topN=1, prob_thd=None) pred_spans = flat_list_of_lists(pred_spans) pred_spans = torch.FloatTensor(pred_spans).to(temporal_scores_st_ed_reshaped.device) pred_spans, pred_scores = pred_spans[:, :2].long(), pred_spans[:, 2] pred_spans = [[e[0], e[1] + 1] for e in pred_spans]
max_statement = max_statement.view(bsz * num_a, num_img, -1) max_statement_mask = max_statement_mask.view(bsz * num_a, num_img, -1) local_max_max_statement_list = [] global_max_max_statement_list = [] for idx, span in enumerate(pred_spans):
span = expand_span(span, expand_length=extra_span_length)
cur_global_max_max_statement = \
torch.max(mask_logits(max_statement[idx], max_statement_mask[idx]), 0)[0]
global_max_max_statement_list.append(cur_global_max_max_statement)
cur_span_max_statement = mask_logits(
max_statement[idx, span[0]:span[1]],
max_statement_mask[idx, span[0]:span[1]]) local_max_max_statement_list.append(torch.max(cur_span_max_statement, 0)[0]) local_max_max_statement = torch.stack(local_max_max_statement_list) global_max_max_statement = torch.stack(global_max_max_statement_list) max_max_statement = torch.cat([
local_max_max_statement,
global_max_max_statement], dim=-1) return max_max_statement.view(bsz, num_a, -1), targets
def residual_temporal_predictor(self, layer_idx, input_tensor):
input_tensor = input_tensor + self.cls_projection_layers[layer_idx](input_tensor) t_score_st = self.temporal_scoring_st_layers[layer_idx](input_tensor) t_score_ed = self.temporal_scoring_ed_layers[layer_idx](input_tensor) t_score = torch.cat([t_score_st, t_score_ed], dim=2) return input_tensor, t_score
def classfier_head_multi_proposal(self, statement, statement_mask, targets, ts_labels, ts_labels_mask,
max_num_proposal=1, ce_prob_thd=0.01, iou_thd=0.5, extra_span_length=3):
bsz, num_a, num_img, num_words = statement_mask.shape
statement = statement.view(bsz*num_a*num_img, num_words, -1) statement_mask = statement_mask.view(bsz*num_a*num_img, num_words) statement = self.cls_encoder(statement, statement_mask) max_statement = torch.max(mask_logits(statement, statement_mask.unsqueeze(2)), 1)[0] max_statement_mask = (statement_mask.sum(1) != 0).float().view(bsz, num_a, num_img, 1) max_statement = max_statement.view(bsz*num_a, num_img, -1)
t_score_container = []
encoded_max_statement_container = []
encoded_max_statement = max_statement for layer_idx in range(self.t_iter+1):
encoded_max_statement, prev_t_score = \
self.residual_temporal_predictor(layer_idx, encoded_max_statement)
t_score_container.append(prev_t_score.view(bsz, num_a, num_img, 2)) encoded_max_statement_container.append(encoded_max_statement) if self.t_iter > 0:
temporal_scores_st_ed = 0.5 * (t_score_container[0] + torch.stack(t_score_container[:1]).mean(0))
else:
temporal_scores_st_ed = t_score_container[0]
temporal_scores_st_ed = mask_logits(temporal_scores_st_ed, ts_labels_mask.view(bsz, 1, num_img, 1))
stacked_max_statement = encoded_max_statement_container[0].view(bsz, num_a, num_img, -1) if self.add_local:
max_max_statement, targets = self.get_proposals(
stacked_max_statement, max_statement_mask, temporal_scores_st_ed,
targets, ts_labels, max_num_proposal=max_num_proposal, iou_thd=iou_thd,
ce_prob_thd=ce_prob_thd, extra_span_length=extra_span_length) else:
max_max_statement = \
torch.max(mask_logits(stacked_max_statement, max_statement_mask), 2)[0]
answer_scores = self.classifier(max_max_statement).squeeze(2) return answer_scores, targets, temporal_scores_st_ed
def get_ts_loss(self, temporal_scores, ts_labels, answer_indices):
bsz = len(answer_indices)
ca_temporal_scores_st_ed = \
temporal_scores[torch.arange(bsz, dtype=torch.long), answer_indices] loss_st = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 0], ts_labels["st"])
loss_ed = self.temporal_criterion(ca_temporal_scores_st_ed[:, :, 1], ts_labels["ed"])
return (loss_st + loss_ed) / 2.
@classmethod
def sample_negatives(cls, pred_score, pos_indices, neg_indices, num_negatives=2,
use_hard_negatives=False, negative_pool_size=0, num_hard=2, drop_topk=0):
num_unique_pos = len(pos_indices)
sampled_pos_indices = torch.cat([pos_indices] * num_negatives, dim=0)
if use_hard_negatives:
neg_scores = pred_score[neg_indices[:, 0], neg_indices[:, 1], neg_indices[:, 2]] max_indices = torch.sort(neg_scores, descending=True)[1].tolist()
if negative_pool_size > num_negatives: hard_pool = max_indices[drop_topk:drop_topk + negative_pool_size]
hard_pool_indices = neg_indices[hard_pool]
num_hard_negs = num_negatives
sampled_easy_neg_indices = []
if num_hard < num_negatives:
easy_pool = max_indices[drop_topk + negative_pool_size:]
easy_pool_indices = neg_indices[easy_pool]
num_hard_negs = num_hard
num_easy_negs = num_negatives - num_hard_negs
sampled_easy_neg_indices = easy_pool_indices[
torch.randint(low=0, high=len(easy_pool_indices),
size=(num_easy_negs * num_unique_pos, ), dtype=torch.long)
]
sampled_hard_neg_indices = hard_pool_indices[
torch.randint(low=0, high=len(hard_pool_indices),
size=(num_hard_negs * num_unique_pos, ), dtype=torch.long)
]
if len(sampled_easy_neg_indices) != 0:
sampled_neg_indices = torch.cat([sampled_hard_neg_indices, sampled_easy_neg_indices], dim=0)
else:
sampled_neg_indices = sampled_hard_neg_indices
else: sampled_neg_indices = neg_indices[max_indices[drop_topk:drop_topk+len(sampled_pos_indices)]]
else:
sampled_neg_indices = neg_indices[
torch.randint(low=0, high=len(neg_indices), size=(len(sampled_pos_indices),), dtype=torch.long)
]
return sampled_pos_indices, sampled_neg_indices
def get_att_loss(self, scores, att_labels, target, words, vid_names, qids, q_lens, img_indices, boxes,
start_indices, num_negatives=2, use_hard_negatives=False, drop_topk=0):
pos_container = [] neg_container = []
for batch_idx in range(len(target)): ca_idx = target[batch_idx].cpu().item()
gt_score = att_labels[batch_idx] start_idx = start_indices[batch_idx] num_img = len(gt_score)
sen_l, _ = gt_score[0].shape
pred_score = scores[batch_idx, ca_idx, :num_img, :sen_l]
batch_pos_indices = []
batch_neg_indices = []
for img_idx, img_gt_score in enumerate(gt_score):
img_idx = start_idx + img_idx
img_pos_indices = torch.nonzero(img_gt_score) if len(img_pos_indices) == 0: continue
img_pos_indices = torch.cat([img_pos_indices.new_full([len(img_pos_indices), 1], img_idx),
img_pos_indices], dim=1)
img_neg_indices = torch.nonzero(img_gt_score == 0) img_neg_indices = torch.cat([img_neg_indices.new_full([len(img_neg_indices), 1], img_idx),
img_neg_indices], dim=1)
batch_pos_indices.append(img_pos_indices)
batch_neg_indices.append(img_neg_indices)
if len(batch_pos_indices) == 0: continue
batch_pos_indices = torch.cat(batch_pos_indices, dim=0) batch_neg_indices = torch.cat(batch_neg_indices, dim=0)
available_img_indices = batch_pos_indices[:, 0].unique().tolist()
for img_idx in available_img_indices:
img_idx_pos_indices = batch_pos_indices[batch_pos_indices[:, 0] == img_idx]
img_idx_neg_indices = batch_neg_indices[batch_neg_indices[:, 0] == img_idx]
available_word_indices = img_idx_pos_indices[:, 1].unique().tolist()
for word_idx in available_word_indices:
img_idx_word_idx_pos_indices = img_idx_pos_indices[img_idx_pos_indices[:, 1] == word_idx]
img_idx_word_idx_neg_indices = img_idx_neg_indices[img_idx_neg_indices[:, 1] == word_idx]
sampled_pos_indices, sampled_neg_indices = \
self.sample_negatives(pred_score,
img_idx_word_idx_pos_indices, img_idx_word_idx_neg_indices,
num_negatives=num_negatives, use_hard_negatives=use_hard_negatives,
negative_pool_size=self.negative_pool_size,
num_hard=self.num_hard, drop_topk=drop_topk)
base_indices = torch.LongTensor([[batch_idx, ca_idx]] * len(sampled_pos_indices)).\
to(sampled_pos_indices.device)
pos_container.append(torch.cat([base_indices, sampled_pos_indices], dim=1))
neg_container.append(torch.cat([base_indices, sampled_neg_indices], dim=1))
pos_container = torch.cat(pos_container, dim=0)
neg_container = torch.cat(neg_container, dim=0)
att_predictions = None
if not self.training and self.vfeat_flag:
att_predictions = dict(det_q=[],
det_ca=[])
unique_pos_container = np.unique(pos_container.cpu().numpy(), axis=0) for row in unique_pos_container:
batch_idx, ca_idx, img_idx, word_idx, region_idx = row
start_idx = start_indices[batch_idx] cur_q_len = q_lens[batch_idx]
num_region = att_labels[batch_idx][img_idx-start_idx].shape[1] if len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()) != \
len(boxes[batch_idx][img_idx-start_idx]):
print("scores[batch_idx, ca_idx, img_idx, word_idx].data.cpu()",
len(scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu()))
print("len(boxes[batch_idx][img_idx-start_idx])", len(boxes[batch_idx][img_idx-start_idx]))
print("boxes, batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx",
batch_idx, img_idx, start_idx, img_idx - start_idx, word_idx)
print(row)
raise AssertionError
cur_det_data = {
"pred": scores[batch_idx, ca_idx, img_idx, word_idx, :num_region].data.cpu(),
"word": words[batch_idx, ca_idx, word_idx],
"qid": qids[batch_idx],
"vid_name": vid_names[batch_idx],
"img_idx": img_indices[batch_idx][img_idx], "boxes": boxes[batch_idx][img_idx-start_idx] }
if word_idx < cur_q_len:
att_predictions["det_q"].append(cur_det_data)
else:
att_predictions["det_ca"].append(cur_det_data)
pos_scores = scores[pos_container[:, 0], pos_container[:, 1], pos_container[:, 2],
pos_container[:, 3], pos_container[:, 4]]
neg_scores = scores[neg_container[:, 0], neg_container[:, 1], neg_container[:, 2],
neg_container[:, 3], neg_container[:, 4]]
if self.att_loss_type == "hinge":
att_loss = torch.clamp(self.margin + neg_scores - pos_scores, min=0).sum()
elif self.att_loss_type == "lse":
att_loss = torch.log1p(torch.exp(self.alpha * (neg_scores - pos_scores))).sum()
else:
raise NotImplementedError("Only support hinge and lse")
return att_loss, att_predictions
def get_att_prediction(self, scores, object_vocab, words, vid_names, qids, img_indices, boxes,
start_indices, score_thd=0.2):
att_predictions = None
if self.vfeat_flag:
att_predictions = []
for batch_idx in range(len(scores)):
start_idx = start_indices[batch_idx] q_att_predictions = dict() for ans_idx in range(5):
q_att_predictions[ans_idx] = []
for img_idx_local in range(len(boxes[batch_idx])):
img_idx_global = img_idx_local + start_idx
cur_img_scores = scores[batch_idx, ans_idx, img_idx_global] cur_words = words[batch_idx, ans_idx].tolist() cur_img_boxes = boxes[batch_idx][img_idx_local]
for word_idx, w in enumerate(cur_words):
if w in object_vocab:
cur_word_region_scores = cur_img_scores[word_idx].data.cpu().numpy() accepted_region_ids = np.nonzero(cur_word_region_scores >= score_thd)[0].tolist()
accepted_region_scores = [float(cur_word_region_scores[i]) for i in accepted_region_ids]
accepted_region_boxes = [cur_img_boxes[i] for i in accepted_region_ids]
sorted_indices = np.argsort(accepted_region_scores)
accepted_region_scores = [accepted_region_scores[i] for i in sorted_indices]
accepted_region_boxes = [accepted_region_boxes[i] for i in sorted_indices]
cur_det_data = {
"pred": accepted_region_scores,
"bbox": accepted_region_boxes,
"word": int(words[batch_idx, ans_idx, word_idx]),
"qid": int(qids[batch_idx]),
"vid_name": vid_names[batch_idx],
"img_idx": img_indices[batch_idx][img_idx_global], }
q_att_predictions[ans_idx].append(cur_det_data)
att_predictions.append(q_att_predictions)
return att_predictions
| true
| true
|
f7093e4a3f79f87ff0d626827bd64f15c5c476cf
| 8,085
|
py
|
Python
|
legacy/20210120/prun_table.py
|
artec-kk/Studth
|
52cfa9c61355852163e1e4b42832480f51fda410
|
[
"MIT"
] | 1
|
2020-12-24T08:17:52.000Z
|
2020-12-24T08:17:52.000Z
|
legacy/20210120/prun_table.py
|
Nyanyan/Studth
|
d603f209d3125b3eafa875d47bd72c3607454391
|
[
"MIT"
] | 1
|
2021-04-19T09:08:02.000Z
|
2021-04-19T09:08:02.000Z
|
legacy/20210120/prun_table.py
|
artec-kk/Studth
|
52cfa9c61355852163e1e4b42832480f51fda410
|
[
"MIT"
] | 1
|
2021-03-11T09:26:49.000Z
|
2021-03-11T09:26:49.000Z
|
from basic_functions import *
import csv
from collections import deque
inf = 1000
def table_phase0():
trans_ep = []
with open('trans_ep_phase0.csv', mode='r') as f:
for line in map(str.strip, f):
trans_ep.append([int(i) for i in line.replace('\n', '').split(',')])
trans = []
with open('trans_co.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(2187)] for _ in range(495)] for _ in range(3)]
solved1 = ep2idx_phase0(list(range(12)))
solved2 = co2idx([0 for _ in range(8)])
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
sorted_candidate = sorted(list(candidate[0]))
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase0_co_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
trans = []
with open('trans_eo.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(2048)] for _ in range(495)] for _ in range(3)]
solved1 = ep2idx_phase0(list(range(12)))
solved2 = eo2idx([0 for _ in range(12)])
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
n_last_rotated = False
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, n_last_rotated])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
n_last_rotated = True
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, n_last_rotated])
for i in range(3):
with open('prun_phase0_eo_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
def table_phase1():
trans_ep = []
with open('trans_ep_phase1_2.csv', mode='r') as f:
for line in map(str.strip, f):
trans_ep.append([int(i) for i in line.replace('\n', '').split(',')])
trans = []
with open('trans_cp.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(40320)] for _ in range(24)] for _ in range(3)]
solved1 = ep2idx_phase1_2(list(range(12)))
solved2 = cp2idx(list(range(8)))
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
sorted_candidate = sorted(list(candidate[1]))
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase1_cp_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
trans = []
with open('trans_ep_phase1_1.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(40320)] for _ in range(24)] for _ in range(3)]
solved1 = ep2idx_phase1_2(list(range(12)))
solved2 = ep2idx_phase1_1(list(range(12)))
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase1_ep_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
table_phase0()
table_phase1()
| 42.109375
| 85
| 0.557699
|
from basic_functions import *
import csv
from collections import deque
inf = 1000
def table_phase0():
trans_ep = []
with open('trans_ep_phase0.csv', mode='r') as f:
for line in map(str.strip, f):
trans_ep.append([int(i) for i in line.replace('\n', '').split(',')])
trans = []
with open('trans_co.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(2187)] for _ in range(495)] for _ in range(3)]
solved1 = ep2idx_phase0(list(range(12)))
solved2 = co2idx([0 for _ in range(8)])
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
sorted_candidate = sorted(list(candidate[0]))
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase0_co_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
trans = []
with open('trans_eo.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(2048)] for _ in range(495)] for _ in range(3)]
solved1 = ep2idx_phase0(list(range(12)))
solved2 = eo2idx([0 for _ in range(12)])
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
n_last_rotated = False
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, n_last_rotated])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
n_last_rotated = True
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, n_last_rotated])
for i in range(3):
with open('prun_phase0_eo_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
def table_phase1():
trans_ep = []
with open('trans_ep_phase1_2.csv', mode='r') as f:
for line in map(str.strip, f):
trans_ep.append([int(i) for i in line.replace('\n', '').split(',')])
trans = []
with open('trans_cp.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(40320)] for _ in range(24)] for _ in range(3)]
solved1 = ep2idx_phase1_2(list(range(12)))
solved2 = cp2idx(list(range(8)))
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
sorted_candidate = sorted(list(candidate[1]))
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase1_cp_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
trans = []
with open('trans_ep_phase1_1.csv', mode='r') as f:
for line in map(str.strip, f):
trans.append([int(i) for i in line.replace('\n', '').split(',')])
table = [[[inf for _ in range(40320)] for _ in range(24)] for _ in range(3)]
solved1 = ep2idx_phase1_2(list(range(12)))
solved2 = ep2idx_phase1_1(list(range(12)))
que = deque([[solved1, solved2, 0, i, True] for i in range(24)])
for i in range(3):
table[i][solved1][solved2] = 0
cnt = 0
while que:
cnt += 1
if cnt % 10000 == 0:
print(cnt, len(que))
idx1, idx2, cost, direction, last_rotated = que.popleft()
n_cost = cost + 1
for twist_idx, twist in enumerate(sorted_candidate):
if not can_rotate[direction][twist // 6]:
continue
n_dirs = [direction]
n_idx1 = trans_ep[idx1][twist_idx]
n_idx2 = trans[idx2][twist_idx]
for n_direction in n_dirs:
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, False])
if last_rotated:
continue
for rotate in range(12, 14):
n_idx1 = idx1
n_idx2 = idx2
n_direction = rev_move_dir(direction, rotate)
if table[dir_type[n_direction]][n_idx1][n_idx2] > n_cost:
table[dir_type[n_direction]][n_idx1][n_idx2] = n_cost
que.append([n_idx1, n_idx2, n_cost, n_direction, True])
for i in range(3):
with open('prun_phase1_ep_ep_' + str(i) + '.csv', mode='w') as f:
writer = csv.writer(f, lineterminator='\n')
for arr in table[i]:
writer.writerow(arr)
table_phase0()
table_phase1()
| true
| true
|
f7093e4ef346f30cf5169badac19ffab90a46d22
| 1,314
|
py
|
Python
|
past_archive/swexpert/1979(whereToIn).py
|
DongHyunByun/algorithm_practice
|
c726c69d35306d23467f4af6e10f2db6fdc68234
|
[
"MIT"
] | null | null | null |
past_archive/swexpert/1979(whereToIn).py
|
DongHyunByun/algorithm_practice
|
c726c69d35306d23467f4af6e10f2db6fdc68234
|
[
"MIT"
] | null | null | null |
past_archive/swexpert/1979(whereToIn).py
|
DongHyunByun/algorithm_practice
|
c726c69d35306d23467f4af6e10f2db6fdc68234
|
[
"MIT"
] | null | null | null |
def numOfAbled(L,k):
hori=0
verti=0
for i in range(len(L)):
for j in range(len(L)-k+1):
num=0
for m in range(k):
if L[i][j+m]!=1:
break
else :
num+=1
if num==k:
if j==len(L)-k:
if L[i][j-1]==0 :
verti+=1
elif j==0:
if L[i][j+k]==0:
verti+=1
elif L[i][j+k]==0 and L[i][j-1]==0:
verti+=1
for j in range(len(L)):
for i in range(len(L)-k+1):
num=0
for m in range(k):
if L[i+m][j]!=1:
break
else :
num+=1
if num==k:
if i==len(L)-k:
if L[i-1][j]==0:
hori+=1
elif i==0 :
if L[i+k][j]==0:
hori+=1
elif L[i-1][j]==0 and L[i+k][j]==0:
hori+=1
return verti+hori
for t in range(int(input())):
L=[]
[N,K] =list(map(int,input().split()))
for n in range(N):
L.append(list(map(int,input().split())))
print(f"#{t+1} {numOfAbled(L,K)}")
| 25.269231
| 51
| 0.314307
|
def numOfAbled(L,k):
hori=0
verti=0
for i in range(len(L)):
for j in range(len(L)-k+1):
num=0
for m in range(k):
if L[i][j+m]!=1:
break
else :
num+=1
if num==k:
if j==len(L)-k:
if L[i][j-1]==0 :
verti+=1
elif j==0:
if L[i][j+k]==0:
verti+=1
elif L[i][j+k]==0 and L[i][j-1]==0:
verti+=1
for j in range(len(L)):
for i in range(len(L)-k+1):
num=0
for m in range(k):
if L[i+m][j]!=1:
break
else :
num+=1
if num==k:
if i==len(L)-k:
if L[i-1][j]==0:
hori+=1
elif i==0 :
if L[i+k][j]==0:
hori+=1
elif L[i-1][j]==0 and L[i+k][j]==0:
hori+=1
return verti+hori
for t in range(int(input())):
L=[]
[N,K] =list(map(int,input().split()))
for n in range(N):
L.append(list(map(int,input().split())))
print(f"#{t+1} {numOfAbled(L,K)}")
| true
| true
|
f7093f4912a687d4e7ec0a6c86c92b455f7b055d
| 274,474
|
py
|
Python
|
rsl_comm_py/shearwater_registers.py
|
RedshiftLabsPtyLtd/rsl_comm_py
|
e53b4e85079898c894dac25842a08bcc303edfbb
|
[
"MIT"
] | null | null | null |
rsl_comm_py/shearwater_registers.py
|
RedshiftLabsPtyLtd/rsl_comm_py
|
e53b4e85079898c894dac25842a08bcc303edfbb
|
[
"MIT"
] | null | null | null |
rsl_comm_py/shearwater_registers.py
|
RedshiftLabsPtyLtd/rsl_comm_py
|
e53b4e85079898c894dac25842a08bcc303edfbb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Author: Dr. Konstantin Selyunin
# License: MIT
# Created: 2020.08.19
import logging
import os.path
import struct
from abc import abstractmethod, ABC
from typing import Union, Tuple
from .rsl_xml_svd.rsl_svd_parser import RslSvdParser
class ShearWaterRegisters(ABC):
def __init__(self, **kwargs):
self.svd_parser = RslSvdParser(svd_file=ShearWaterRegisters.find_svd('shearwater.svd'))
@staticmethod
def find_svd(svd_file_name: str):
parent_dir = os.path.join(os.path.dirname(__file__), os.pardir)
for root, dirs, files in os.walk(parent_dir):
if svd_file_name in files:
return os.path.join(root, svd_file_name)
@abstractmethod
def connect(self, *args, **kwargs):
pass
@abstractmethod
def read_register(self, reg_addr: int, **kw) -> Tuple[bool, bytes]:
pass
@abstractmethod
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float, str], **kw):
pass
@property
def creg_com_settings(self):
"""
The CREG_COM_SETTINGS register is used to set the boards serial port baud rate and to enable (disable) the
automatic transmission of sensor data and estimated states (telemetry).
Payload structure:
[31:28] : BAUD_RATE -- Sets the baud rate of the boards main serial port:
:return: BAUD_RATE as bitField;
"""
addr = 0x00
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for BAUD_RATE bit field
baud_rate_val = (reg.raw_value >> 28) & 0x000F
baud_rate_enum = reg.find_field_by(name='BAUD_RATE').find_enum_entry_by(value=baud_rate_val)
return reg, baud_rate_enum
@creg_com_settings.setter
def creg_com_settings(self, new_value):
addr = 0x00
self.write_register(addr, new_value)
@property
def creg_com_rates1(self):
"""
The CREG_COM_RATES1 register sets desired telemetry transmission rates in Hz for raw accelerometer 1, gyro 1,
gyro 2 and magnetometer 1 data. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : RAW_ACCEL_1_RATE -- Specifies the desired raw accelerometer 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_GYRO_1_RATE -- Specifies the desired raw gyro 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz
[15:8] : RAW_GYRO_2_RATE -- Specifies the desired raw gyro 2 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : RAW_MAG_1_RATE -- Specifies the desired raw magnetometer 1 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: RAW_ACCEL_1_RATE as uint8_t; RAW_GYRO_1_RATE as uint8_t; RAW_GYRO_2_RATE as uint8_t; RAW_MAG_1_RATE as uint8_t;
"""
addr = 0x01
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate
@creg_com_rates1.setter
def creg_com_rates1(self, new_value):
addr = 0x01
self.write_register(addr, new_value)
@property
def creg_com_rates2(self):
"""
The CREG_COM_RATES2 register sets desired telemetry transmission rates for the magnetometer 2, all raw data,
and temperature data rate. The ALL_RAW_RATE setting has higher priority over the individual raw sensor data
settings, i.e. whenever this bitfield is set, then the individual raw sensor settings are ignored and not
used. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : TEMP_RATE -- Specifies the desired broadcast rate for temperature data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : RAW_MAG_2_RATE -- Specifies the desired raw magnetometer 2 data broadcast rate in Hz. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_RAW_RATE -- Specifies the desired broadcast rate for all raw sensor data. If set, this overrides the broadcast rate setting for individual raw data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: TEMP_RATE as uint8_t; RAW_MAG_2_RATE as uint8_t; ALL_RAW_RATE as uint8_t;
"""
addr = 0x02
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
temp_rate, raw_mag_2_rate, all_raw_rate = struct.unpack('>BBxB', payload[0:4])
return reg, temp_rate, raw_mag_2_rate, all_raw_rate
@creg_com_rates2.setter
def creg_com_rates2(self, new_value):
addr = 0x02
self.write_register(addr, new_value)
@property
def creg_com_rates3(self):
"""
The CREG_COM_RATES3 register sets desired telemetry transmission rates for processed sensor data for the
sensors: the accelerometer 1, gyro 1, gyro 2, and magnetometer 1. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : PROC_ACCEL_1_RATE -- Specifies the desired broadcast rate for processed accelerometer 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : PROC_GYRO_1_RATE -- Specifies the desired broadcast rate for processed rate gyro 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : PROC_GYRO_2_RATE -- Specifies the desired broadcast rate for processed processed rate gyro 2 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : PROC_MAG_1_RATE -- Specifies the desired broadcast rate for processed magnetometer 1 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_ACCEL_1_RATE as uint8_t; PROC_GYRO_1_RATE as uint8_t; PROC_GYRO_2_RATE as uint8_t; PROC_MAG_1_RATE as uint8_t;
"""
addr = 0x03
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES3')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate
@creg_com_rates3.setter
def creg_com_rates3(self, new_value):
addr = 0x03
self.write_register(addr, new_value)
@property
def creg_com_rates4(self):
"""
The CREG_COM_RATES4 register defines the desired telemetry transmission rates for the processed data for the
magnetometer 2, and for all processed data. The ALL_PROC_RATE setting has higher priority over the individual
processed sensor data settings, i.e. whenever this bitfield is set, then the individual processed sensor
transmission rate settings are ignored and not used. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : PROC_MAG_2_RATE -- Specifies the desired broadcast rate for processed magnetometer 2 data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : ALL_PROC_RATE -- Specifies the desired broadcast rate for raw all processed sensor data. If set, this overrides the broadcast rate setting for individual processed data broadcast rates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: PROC_MAG_2_RATE as uint8_t; ALL_PROC_RATE as uint8_t;
"""
addr = 0x04
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES4')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_mag_2_rate, all_proc_rate = struct.unpack('>BxxB', payload[0:4])
return reg, proc_mag_2_rate, all_proc_rate
@creg_com_rates4.setter
def creg_com_rates4(self, new_value):
addr = 0x04
self.write_register(addr, new_value)
@property
def creg_com_rates5(self):
"""
The CREG_COM_RATES5 register sets desired telemetry transmission rates for quaternions, Euler Angles,
position, and velocity estimates. If the specified rate is 0, then no data is transmitted.
Payload structure:
[31:24] : QUAT_RATE -- Specifies the desired broadcast rate for quaternion data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[23:16] : EULER_RATE -- Specifies the desired broadcast rate for Euler Angle data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[15:8] : POSITION_RATE -- Specifies the desired broadcast rate position. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : VELOCITY_RATE -- Specifies the desired broadcast rate for velocity. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: QUAT_RATE as uint8_t; EULER_RATE as uint8_t; POSITION_RATE as uint8_t; VELOCITY_RATE as uint8_t;
"""
addr = 0x05
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES5')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_rate, euler_rate, position_rate, velocity_rate = struct.unpack('>BBBB', payload[0:4])
return reg, quat_rate, euler_rate, position_rate, velocity_rate
@creg_com_rates5.setter
def creg_com_rates5(self, new_value):
addr = 0x05
self.write_register(addr, new_value)
@property
def creg_com_rates6(self):
"""
The CREG_COM_RATES6 register sets desired telemetry transmission rates for pose (Euler/position packet),
health, and gyro bias estimates for the gyro 1 and gyro 2. If the specified rate is 0, then no data is
transmitted.
Payload structure:
[31:24] : POSE_RATE -- Specifies the desired broadcast rate for pose (Euler Angle and position) data. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[19:16] : HEALTH_RATE -- Specifies the desired broadcast rate for the sensor health packet.
[15:8] : GYRO_BIAS_1_RATE -- Specifies the desired broadcast rate for gyro 1 bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
[7:0] : GYRO_BIAS_2_RATE -- Specifies the desired broadcast rate for gyro 2 bias estimates. The data is stored as an unsigned 8-bit integer, yielding a maximum rate of 255 Hz.
:return: POSE_RATE as uint8_t; HEALTH_RATE as bitField; GYRO_BIAS_1_RATE as uint8_t; GYRO_BIAS_2_RATE as uint8_t;
"""
addr = 0x06
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pose_rate, gyro_bias_1_rate, gyro_bias_2_rate = struct.unpack('>BxBB', payload[0:4])
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for HEALTH_RATE bit field
health_rate_val = (reg.raw_value >> 16) & 0x000F
health_rate_enum = reg.find_field_by(name='HEALTH_RATE').find_enum_entry_by(value=health_rate_val)
return reg, pose_rate, gyro_bias_1_rate, gyro_bias_2_rate, reg, health_rate_enum
@creg_com_rates6.setter
def creg_com_rates6(self, new_value):
addr = 0x06
self.write_register(addr, new_value)
@property
def creg_com_rates7(self):
"""
The CREG_COM_RATES7 register sets desired telemetry transmission rates in Hz for NMEA packets.
Payload structure:
[31:28] : NMEA_HEALTH_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style health packet.
[27:24] : NMEA_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style pose (Euler Angle/position) packet.
[23:20] : NMEA_ATTITUDE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style attitude packet.
[19:16] : NMEA_SENSOR_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style sensor data packet.
[15:12] : NMEA_RATES_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style rate data packet.
[11:8] : NMEA_GPS_POSE_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style GPS pose packet.
[7:4] : NMEA_QUAT_RATE -- Specifies the desired broadcast rate for Redshift Labs NMEA-style quaternion packet.
:return: NMEA_HEALTH_RATE as bitField; NMEA_POSE_RATE as bitField; NMEA_ATTITUDE_RATE as bitField; NMEA_SENSOR_RATE as bitField; NMEA_RATES_RATE as bitField; NMEA_GPS_POSE_RATE as bitField; NMEA_QUAT_RATE as bitField;
"""
addr = 0x07
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES7')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for NMEA_HEALTH_RATE bit field
nmea_health_rate_val = (reg.raw_value >> 28) & 0x000F
nmea_health_rate_enum = reg.find_field_by(name='NMEA_HEALTH_RATE').find_enum_entry_by(value=nmea_health_rate_val)
# find value for NMEA_POSE_RATE bit field
nmea_pose_rate_val = (reg.raw_value >> 24) & 0x000F
nmea_pose_rate_enum = reg.find_field_by(name='NMEA_POSE_RATE').find_enum_entry_by(value=nmea_pose_rate_val)
# find value for NMEA_ATTITUDE_RATE bit field
nmea_attitude_rate_val = (reg.raw_value >> 20) & 0x000F
nmea_attitude_rate_enum = reg.find_field_by(name='NMEA_ATTITUDE_RATE').find_enum_entry_by(value=nmea_attitude_rate_val)
# find value for NMEA_SENSOR_RATE bit field
nmea_sensor_rate_val = (reg.raw_value >> 16) & 0x000F
nmea_sensor_rate_enum = reg.find_field_by(name='NMEA_SENSOR_RATE').find_enum_entry_by(value=nmea_sensor_rate_val)
# find value for NMEA_RATES_RATE bit field
nmea_rates_rate_val = (reg.raw_value >> 12) & 0x000F
nmea_rates_rate_enum = reg.find_field_by(name='NMEA_RATES_RATE').find_enum_entry_by(value=nmea_rates_rate_val)
# find value for NMEA_GPS_POSE_RATE bit field
nmea_gps_pose_rate_val = (reg.raw_value >> 8) & 0x000F
nmea_gps_pose_rate_enum = reg.find_field_by(name='NMEA_GPS_POSE_RATE').find_enum_entry_by(value=nmea_gps_pose_rate_val)
# find value for NMEA_QUAT_RATE bit field
nmea_quat_rate_val = (reg.raw_value >> 4) & 0x000F
nmea_quat_rate_enum = reg.find_field_by(name='NMEA_QUAT_RATE').find_enum_entry_by(value=nmea_quat_rate_val)
return reg, nmea_health_rate_enum, nmea_pose_rate_enum, nmea_attitude_rate_enum, nmea_sensor_rate_enum, nmea_rates_rate_enum, nmea_gps_pose_rate_enum, nmea_quat_rate_enum
@creg_com_rates7.setter
def creg_com_rates7(self, new_value):
addr = 0x07
self.write_register(addr, new_value)
@property
def creg_misc_settings(self):
"""
This register contains miscellaneous filter and sensor control options.
Payload structure:
[8] : PPS -- If set, this bit causes the TX2 pin on the IO Expansion header to be used as the PPS input from an external GPS module. PPS pulses will then be used to synchronize the system clock to UTC time of day.
[3] : ZG -- If set, this bit causes the devicee to attempt to measure the rate gyro bias on startup. The sensor must be stationary on startup for this feature to work properly.
[2] : Q -- If this bit is set, the sensor will run in quaternion mode instead of Euler Angle mode.
[1] : MAG1 -- If set, the magnetometer 1 will be used in state updates.
[0] : MAG2 -- If set, the magnetometer 2 will be used in state updates.
:return: PPS as bitField; ZG as bitField; Q as bitField; MAG1 as bitField; MAG2 as bitField;
"""
addr = 0x08
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MISC_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for PPS bit field
pps_val = (reg.raw_value >> 8) & 0x0001
pps_enum = reg.find_field_by(name='PPS').find_enum_entry_by(value=pps_val)
# find value for ZG bit field
zg_val = (reg.raw_value >> 3) & 0x0001
zg_enum = reg.find_field_by(name='ZG').find_enum_entry_by(value=zg_val)
# find value for Q bit field
q_val = (reg.raw_value >> 2) & 0x0001
q_enum = reg.find_field_by(name='Q').find_enum_entry_by(value=q_val)
# find value for MAG1 bit field
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
# find value for MAG2 bit field
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, pps_enum, zg_enum, q_enum, mag1_enum, mag2_enum
@creg_misc_settings.setter
def creg_misc_settings(self, new_value):
addr = 0x08
self.write_register(addr, new_value)
@property
def creg_gyro_1_meas_range(self):
"""
The CREG_GYRO_1_MEAS_RANGE register sets the desired measurement range for the gyro 1 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[1:0] : MEAS_GYRO1 -- Specifies the desired measurement range for the gyro 1 measurements.
:return: MEAS_GYRO1 as bitField;
"""
addr = 0x09
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO1 bit field
meas_gyro1_val = (reg.raw_value >> 0) & 0x0003
meas_gyro1_enum = reg.find_field_by(name='MEAS_GYRO1').find_enum_entry_by(value=meas_gyro1_val)
return reg, meas_gyro1_enum
@creg_gyro_1_meas_range.setter
def creg_gyro_1_meas_range(self, new_value):
addr = 0x09
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_x(self):
"""
This register sets the x-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_X as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_x,
@creg_gyro_1_trim_x.setter
def creg_gyro_1_trim_x(self, new_value):
addr = 0x0A
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_y(self):
"""
This register sets the y-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_Y as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_y,
@creg_gyro_1_trim_y.setter
def creg_gyro_1_trim_y(self, new_value):
addr = 0x0B
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_z(self):
"""
This register sets the z-axis rate gyro 1 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_1_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_1_TRIM_Z as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_z,
@creg_gyro_1_trim_z.setter
def creg_gyro_1_trim_z(self, new_value):
addr = 0x0C
self.write_register(addr, new_value)
@property
def creg_gyro_2_meas_range(self):
"""
The CREG_GYRO_2_MEAS_RANGE register sets the desired measurement range for the gyro 2 sensor. If the rate is
not set, then the default value of 2000 deg/s will be used as a measurement range.
Payload structure:
[1:0] : MEAS_GYRO2 -- Specifies the desired measurement range for the gyro 2 measurements.
:return: MEAS_GYRO2 as bitField;
"""
addr = 0x0D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_GYRO2 bit field
meas_gyro2_val = (reg.raw_value >> 0) & 0x0003
meas_gyro2_enum = reg.find_field_by(name='MEAS_GYRO2').find_enum_entry_by(value=meas_gyro2_val)
return reg, meas_gyro2_enum
@creg_gyro_2_meas_range.setter
def creg_gyro_2_meas_range(self, new_value):
addr = 0x0D
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_x(self):
"""
This register sets the x-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_X -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_X as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_x,
@creg_gyro_2_trim_x.setter
def creg_gyro_2_trim_x(self, new_value):
addr = 0x0E
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_y(self):
"""
This register sets the y-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_Y -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_Y as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_y,
@creg_gyro_2_trim_y.setter
def creg_gyro_2_trim_y(self, new_value):
addr = 0x0F
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_z(self):
"""
This register sets the z-axis rate gyro 2 trim, which is used to add additional bias compensation for the rate
gyros during calls to the ZERO_GYRO_BIAS command.
Payload structure:
[31:0] : GYRO_2_TRIM_Z -- 32-bit IEEE Floating Point Value
:return: GYRO_2_TRIM_Z as float;
"""
addr = 0x10
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_z,
@creg_gyro_2_trim_z.setter
def creg_gyro_2_trim_z(self, new_value):
addr = 0x10
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_1(self):
"""
Row 1, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_1 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_1,
@creg_mag_1_cal1_1.setter
def creg_mag_1_cal1_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_2(self):
"""
Row 1, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_2 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_2,
@creg_mag_1_cal1_2.setter
def creg_mag_1_cal1_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_3(self):
"""
Row 1, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL1_3 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_3,
@creg_mag_1_cal1_3.setter
def creg_mag_1_cal1_3(self, new_value):
addr = 0x13
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_1(self):
"""
Row 2, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_1 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_1,
@creg_mag_1_cal2_1.setter
def creg_mag_1_cal2_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_2(self):
"""
Row 2, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_2 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_2,
@creg_mag_1_cal2_2.setter
def creg_mag_1_cal2_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_3(self):
"""
Row 2, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL2_3 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_3,
@creg_mag_1_cal2_3.setter
def creg_mag_1_cal2_3(self, new_value):
addr = 0x16
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_1(self):
"""
Row 3, Column 1 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_1 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_1,
@creg_mag_1_cal3_1.setter
def creg_mag_1_cal3_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_2(self):
"""
Row 3, Column 2 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_2 as float;
"""
addr = 0x18
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_2,
@creg_mag_1_cal3_2.setter
def creg_mag_1_cal3_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_3(self):
"""
Row 3, Column 3 of magnetometer 1 calibration matrix.
Payload structure:
[31:0] : MAG_1_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_1_CAL3_3 as float;
"""
addr = 0x19
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_3,
@creg_mag_1_cal3_3.setter
def creg_mag_1_cal3_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_x(self):
"""
This register stores a bias term for the magnetometer 1 x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_X as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_x,
@creg_mag_1_bias_x.setter
def creg_mag_1_bias_x(self, new_value):
addr = 0x1A
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_y(self):
"""
This register stores a bias term for the magnetometer 1 y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_Y as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_y,
@creg_mag_1_bias_y.setter
def creg_mag_1_bias_y(self, new_value):
addr = 0x1B
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_z(self):
"""
This register stores a bias term for the magnetometer 1 z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_1_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_1_BIAS_Z as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_z,
@creg_mag_1_bias_z.setter
def creg_mag_1_bias_z(self, new_value):
addr = 0x1C
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_1(self):
"""
Row 1, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_1 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_1,
@creg_mag_2_cal1_1.setter
def creg_mag_2_cal1_1(self, new_value):
addr = 0x1D
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_2(self):
"""
Row 1, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_2 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_2,
@creg_mag_2_cal1_2.setter
def creg_mag_2_cal1_2(self, new_value):
addr = 0x1E
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_3(self):
"""
Row 1, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL1_3 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_3,
@creg_mag_2_cal1_3.setter
def creg_mag_2_cal1_3(self, new_value):
addr = 0x1F
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_1(self):
"""
Row 2, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_1 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_1,
@creg_mag_2_cal2_1.setter
def creg_mag_2_cal2_1(self, new_value):
addr = 0x20
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_2(self):
"""
Row 2, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_2 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_2,
@creg_mag_2_cal2_2.setter
def creg_mag_2_cal2_2(self, new_value):
addr = 0x21
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_3(self):
"""
Row 2, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL2_3 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_3,
@creg_mag_2_cal2_3.setter
def creg_mag_2_cal2_3(self, new_value):
addr = 0x22
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_1(self):
"""
Row 3, Column 1 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_1 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_1,
@creg_mag_2_cal3_1.setter
def creg_mag_2_cal3_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_2(self):
"""
Row 3, Column 2 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_2 as float;
"""
addr = 0x24
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_2,
@creg_mag_2_cal3_2.setter
def creg_mag_2_cal3_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_3(self):
"""
Row 3, Column 3 of magnetometer 2 calibration matrix.
Payload structure:
[31:0] : MAG_2_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: MAG_2_CAL3_3 as float;
"""
addr = 0x25
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_3,
@creg_mag_2_cal3_3.setter
def creg_mag_2_cal3_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_x(self):
"""
This register stores a bias term for the magnetometer 2 x-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_X -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_X as float;
"""
addr = 0x26
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_x,
@creg_mag_2_bias_x.setter
def creg_mag_2_bias_x(self, new_value):
addr = 0x26
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_y(self):
"""
This register stores a bias term for the magnetometer 2 y-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_Y as float;
"""
addr = 0x27
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_y,
@creg_mag_2_bias_y.setter
def creg_mag_2_bias_y(self, new_value):
addr = 0x27
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_z(self):
"""
This register stores a bias term for the magnetometer 2 z-axis for hard-iron calibration. This term can be
computed by performing magnetometer calibration with the Redshift labs Serial Interface.
Payload structure:
[31:0] : MAG_2_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: MAG_2_BIAS_Z as float;
"""
addr = 0x28
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_z,
@creg_mag_2_bias_z.setter
def creg_mag_2_bias_z(self, new_value):
addr = 0x28
self.write_register(addr, new_value)
@property
def creg_accel_1_meas_range(self):
"""
The CREG_ACCEL_1_MEAS_RANGE register sets the desired measurement range for the accelerometer 1. If the rate
is not set, then the default value of the +-2 g will be used as a measurement range.
Payload structure:
[1:0] : MEAS_ACC1 -- Specifies the desired measurement range for the accelerometer 1 measurements.
:return: MEAS_ACC1 as bitField;
"""
addr = 0x29
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for MEAS_ACC1 bit field
meas_acc1_val = (reg.raw_value >> 0) & 0x0003
meas_acc1_enum = reg.find_field_by(name='MEAS_ACC1').find_enum_entry_by(value=meas_acc1_val)
return reg, meas_acc1_enum
@creg_accel_1_meas_range.setter
def creg_accel_1_meas_range(self, new_value):
addr = 0x29
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_1(self):
"""
Row 1, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_1 as float;
"""
addr = 0x2A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_1,
@creg_accel_1_cal1_1.setter
def creg_accel_1_cal1_1(self, new_value):
addr = 0x2A
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_2(self):
"""
Row 1, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_2 as float;
"""
addr = 0x2B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_2,
@creg_accel_1_cal1_2.setter
def creg_accel_1_cal1_2(self, new_value):
addr = 0x2B
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_3(self):
"""
Row 1, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL1_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL1_3 as float;
"""
addr = 0x2C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_3,
@creg_accel_1_cal1_3.setter
def creg_accel_1_cal1_3(self, new_value):
addr = 0x2C
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_1(self):
"""
Row 2, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_1 as float;
"""
addr = 0x2D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_1,
@creg_accel_1_cal2_1.setter
def creg_accel_1_cal2_1(self, new_value):
addr = 0x2D
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_2(self):
"""
Row 2, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_2 as float;
"""
addr = 0x2E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_2,
@creg_accel_1_cal2_2.setter
def creg_accel_1_cal2_2(self, new_value):
addr = 0x2E
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_3(self):
"""
Row 2, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL2_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL2_3 as float;
"""
addr = 0x2F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_3,
@creg_accel_1_cal2_3.setter
def creg_accel_1_cal2_3(self, new_value):
addr = 0x2F
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_1(self):
"""
Row 3, Column 1 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_1 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_1 as float;
"""
addr = 0x30
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_1,
@creg_accel_1_cal3_1.setter
def creg_accel_1_cal3_1(self, new_value):
addr = 0x30
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_2(self):
"""
Row 3, Column 2 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_2 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_2 as float;
"""
addr = 0x31
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_2,
@creg_accel_1_cal3_2.setter
def creg_accel_1_cal3_2(self, new_value):
addr = 0x31
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_3(self):
"""
Row 3, Column 3 of accelerometer 1 calibration matrix.
Payload structure:
[31:0] : ACCEL_1_CAL3_3 -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_CAL3_3 as float;
"""
addr = 0x32
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_3,
@creg_accel_1_cal3_3.setter
def creg_accel_1_cal3_3(self, new_value):
addr = 0x32
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_x(self):
"""
This register stores a bias term for the accelerometer 1 x-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_X -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_X as float;
"""
addr = 0x33
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_x,
@creg_accel_1_bias_x.setter
def creg_accel_1_bias_x(self, new_value):
addr = 0x33
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_y(self):
"""
This register stores a bias term for the accelerometer 1 y-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_Y -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_Y as float;
"""
addr = 0x34
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_y,
@creg_accel_1_bias_y.setter
def creg_accel_1_bias_y(self, new_value):
addr = 0x34
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_z(self):
"""
This register stores a bias term for the accelerometer 1 z-axis for bias calibration. This term can be
computed by performing calibrate accelerometers command within the Redshift labs Serial Interface.
Payload structure:
[31:0] : ACCEL_1_BIAS_Z -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_BIAS_Z as float;
"""
addr = 0x35
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_z,
@creg_accel_1_bias_z.setter
def creg_accel_1_bias_z(self, new_value):
addr = 0x35
self.write_register(addr, new_value)
@property
def dreg_health(self):
"""
The health register reports the current status of the sensors on the board. Monitoring the health register is
the easiest way to watch for other problems that could affect the behavior of the board, status of the
sensors. The analogous to the health register, the status of the GPS signal can be monitored in the
DREG_GPS_HEALTH
Payload structure:
[8] : OVF -- Overflow bit. This bit is set if the board is attempting to transmit data over the serial port faster than is allowed given the baud-rate. If this bit is set, reduce broadcast rates in the COM_RATES registers.
[7] : ACC1_N -- This bit is set if the sensor detects that the norm of the accelerometer measurement is too far away from 1G to be used (i.e. during aggressive acceleration or high vibration).
[6] : MAG1_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement for the magnetometer 1 is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[5] : MAG2_N -- This bit is set if the sensor detects that the norm of the magnetometer measurement for the magnetometer 2 is too far away from 1.0 to be trusted. Usually indicates bad calibration, local field distortions, or both.
[4] : ACCEL1 -- This bit will be set if the accelerometer 1 fails to initialize on startup.
[3] : GYRO1 -- This bit will be set if the rate gyro 1 fails to initialize on startup.
[2] : GYRO2 -- This bit will be set if the rate gyro 2 fails to initialize on startup.
[1] : MAG1 -- This bit will be set if the magnetometer 1 fails to initialize on startup.
[0] : MAG2 -- This bit will be set if the magnetometer 2 fails to initialize on startup.
:return: OVF as bitField; ACC1_N as bitField; MAG1_N as bitField; MAG2_N as bitField; ACCEL1 as bitField; GYRO1 as bitField; GYRO2 as bitField; MAG1 as bitField; MAG2 as bitField;
"""
addr = 0x55
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_HEALTH')
reg.raw_value, = struct.unpack('>I', payload[0:4])
# find value for OVF bit field
ovf_val = (reg.raw_value >> 8) & 0x0001
ovf_enum = reg.find_field_by(name='OVF').find_enum_entry_by(value=ovf_val)
# find value for ACC1_N bit field
acc1_n_val = (reg.raw_value >> 7) & 0x0001
acc1_n_enum = reg.find_field_by(name='ACC1_N').find_enum_entry_by(value=acc1_n_val)
# find value for MAG1_N bit field
mag1_n_val = (reg.raw_value >> 6) & 0x0001
mag1_n_enum = reg.find_field_by(name='MAG1_N').find_enum_entry_by(value=mag1_n_val)
# find value for MAG2_N bit field
mag2_n_val = (reg.raw_value >> 5) & 0x0001
mag2_n_enum = reg.find_field_by(name='MAG2_N').find_enum_entry_by(value=mag2_n_val)
# find value for ACCEL1 bit field
accel1_val = (reg.raw_value >> 4) & 0x0001
accel1_enum = reg.find_field_by(name='ACCEL1').find_enum_entry_by(value=accel1_val)
# find value for GYRO1 bit field
gyro1_val = (reg.raw_value >> 3) & 0x0001
gyro1_enum = reg.find_field_by(name='GYRO1').find_enum_entry_by(value=gyro1_val)
# find value for GYRO2 bit field
gyro2_val = (reg.raw_value >> 2) & 0x0001
gyro2_enum = reg.find_field_by(name='GYRO2').find_enum_entry_by(value=gyro2_val)
# find value for MAG1 bit field
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
# find value for MAG2 bit field
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, ovf_enum, acc1_n_enum, mag1_n_enum, mag2_n_enum, accel1_enum, gyro1_enum, gyro2_enum, mag1_enum, mag2_enum
@property
def dreg_gyro_1_raw_xy(self):
"""
Contains raw X and Y axis rate gyro 1 data.
Payload structure:
[31:16] : GYRO_1_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_1_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_1_RAW_X as int16_t; GYRO_1_RAW_Y as int16_t;
"""
addr = 0x56
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_1_raw_x, gyro_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_1_raw_x, gyro_1_raw_y
@property
def dreg_gyro_1_raw_z(self):
"""
Contains raw Z axis rate gyro 1 data.
Payload structure:
[31:16] : GYRO_1_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_1_RAW_Z as int16_t;
"""
addr = 0x57
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_1_raw_z,
@property
def dreg_gyro_1_raw_time(self):
"""
Contains time at which the last rate gyro 1 data was acquired.
Payload structure:
[31:0] : GYRO_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_1_RAW_TIME as float;
"""
addr = 0x58
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_raw_time,
@property
def dreg_gyro_2_raw_xy(self):
"""
Contains raw X and Y axis rate gyro 2 data.
Payload structure:
[31:16] : GYRO_2_RAW_X -- Gyro X (2s complement 16-bit integer)
[15:0] : GYRO_2_RAW_Y -- Gyro Y (2s complement 16-bit integer)
:return: GYRO_2_RAW_X as int16_t; GYRO_2_RAW_Y as int16_t;
"""
addr = 0x59
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_2_raw_x, gyro_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_2_raw_x, gyro_2_raw_y
@property
def dreg_gyro_2_raw_z(self):
"""
Contains raw Z axis rate gyro 2 data.
Payload structure:
[31:16] : GYRO_2_RAW_Z -- Gyro Z (2s complement 16-bit integer)
:return: GYRO_2_RAW_Z as int16_t;
"""
addr = 0x5A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_2_raw_z,
@property
def dreg_gyro_2_raw_time(self):
"""
Contains time at which the last rate gyro 2 data was acquired.
Payload structure:
[31:0] : GYRO_2_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: GYRO_2_RAW_TIME as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_raw_time,
@property
def dreg_accel_1_raw_xy(self):
"""
Contains raw X and Y axis accelerometer 1 data.
Payload structure:
[31:16] : ACCEL_1_RAW_X -- Accel X (2s complement 16-bit integer)
[15:0] : ACCEL_1_RAW_Y -- Accel Y (2s complement 16-bit integer)
:return: ACCEL_1_RAW_X as int16_t; ACCEL_1_RAW_Y as int16_t;
"""
addr = 0x5C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
accel_1_raw_x, accel_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, accel_1_raw_x, accel_1_raw_y
@property
def dreg_accel_1_raw_z(self):
"""
Contains raw Z axis accelerometer 1 data.
Payload structure:
[31:16] : ACCEL_1_RAW_Z -- Accel Z (2s complement 16-bit integer)
:return: ACCEL_1_RAW_Z as int16_t;
"""
addr = 0x5D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
accel_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, accel_1_raw_z,
@property
def dreg_accel_1_raw_time(self):
"""
Contains time at which the last raw data sample for the accelerometer 1 was acquired.
Payload structure:
[31:0] : ACCEL_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: ACCEL_1_RAW_TIME as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_raw_time,
@property
def dreg_mag_1_raw_x(self):
"""
Contains raw x axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_X -- 32-bit signed integer value
:return: MAG_1_RAW_X as int32_t;
"""
addr = 0x5F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_x, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_x,
@property
def dreg_mag_1_raw_y(self):
"""
Contains raw y axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_Y -- 32-bit signed integer value
:return: MAG_1_RAW_Y as int32_t;
"""
addr = 0x60
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_y, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_y,
@property
def dreg_mag_1_raw_z(self):
"""
Contains raw z axis magnetometer 1 data.
Payload structure:
[31:0] : MAG_1_RAW_Z -- 32-bit signed integer value
:return: MAG_1_RAW_Z as int32_t;
"""
addr = 0x61
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Z')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_z, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_z,
@property
def dreg_mag_1_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer 1 was acquired.
Payload structure:
[31:0] : MAG_1_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_1_RAW_TIME as float;
"""
addr = 0x62
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_raw_time,
@property
def dreg_mag_2_raw_xy(self):
"""
Contains raw X and Y axis magnetometer 2 data.
Payload structure:
[31:16] : MAG_2_RAW_X -- Magnetometer X (2s complement 16-bit integer)
[15:0] : MAG_2_RAW_Y -- Magnetometer Y (2s complement 16-bit integer)
:return: MAG_2_RAW_X as int16_t; MAG_2_RAW_Y as int16_t;
"""
addr = 0x63
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
mag_2_raw_x, mag_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, mag_2_raw_x, mag_2_raw_y
@property
def dreg_mag_2_raw_z(self):
"""
Contains raw Z axis magnetometer 2 data.
Payload structure:
[31:16] : MAG_2_RAW_Z -- Magnetometer Z (2s complement 16-bit integer)
:return: MAG_2_RAW_Z as int16_t;
"""
addr = 0x64
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
mag_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, mag_2_raw_z,
@property
def dreg_mag_2_raw_time(self):
"""
Contains time at which the last magnetometer data from the magnetometer 2 was acquired.
Payload structure:
[31:0] : MAG_2_RAW_TIME -- 32-bit IEEE Floating Point Value
:return: MAG_2_RAW_TIME as float;
"""
addr = 0x65
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_raw_time,
@property
def dreg_temperature(self):
"""
Contains the temperature output of the onboard temperature sensor.
Payload structure:
[31:0] : TEMPERATURE -- Temperature in degrees Celcius (32-bit IEEE Floating Point)
:return: TEMPERATURE as float;
"""
addr = 0x66
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature, = struct.unpack('>f', payload[0:4])
return reg, temperature,
@property
def dreg_temperature_time(self):
"""
Contains time at which the last temperature was acquired.
Payload structure:
[31:0] : TEMPERATURE_TIME -- 32-bit IEEE Floating Point Value
:return: TEMPERATURE_TIME as float;
"""
addr = 0x67
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature_time, = struct.unpack('>f', payload[0:4])
return reg, temperature_time,
@property
def dreg_gyro_1_proc_x(self):
"""
Contains the actual measured angular rate from the gyro 1 for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_X as float;
"""
addr = 0x68
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_x,
@property
def dreg_gyro_1_proc_y(self):
"""
Contains the actual measured angular rate from the gyro 1 for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_Y as float;
"""
addr = 0x69
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_y,
@property
def dreg_gyro_1_proc_z(self):
"""
Contains the actual measured angular rate from the gyro 1 for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_1_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_Z as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_z,
@property
def dreg_gyro_1_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro 1 was measured.
Payload structure:
[31:0] : GYRO_1_PROC_TIME -- Gyro 1 time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_1_PROC_TIME as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_time,
@property
def dreg_gyro_2_proc_x(self):
"""
Contains the actual measured angular rate from the gyro 2 for the x axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_X -- Gyro X in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_X as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_x,
@property
def dreg_gyro_2_proc_y(self):
"""
Contains the actual measured angular rate from the gyro 2 for the y axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_Y -- Gyro Y in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_Y as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_y,
@property
def dreg_gyro_2_proc_z(self):
"""
Contains the actual measured angular rate from the gyro 2 for the z axis in degrees/sec after calibration has
been applied.
Payload structure:
[31:0] : GYRO_2_PROC_Z -- Gyro Z in degrees / sec (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_Z as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_z,
@property
def dreg_gyro_2_proc_time(self):
"""
Contains the time at which the last rate gyro data from the gyro 2 was measured.
Payload structure:
[31:0] : GYRO_2_PROC_TIME -- Gyro 2 time stamp (32-bit IEEE Floating Point Value)
:return: GYRO_2_PROC_TIME as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_time,
@property
def dreg_accel_1_proc_x(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the x axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_X -- Acceleration X in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_X as float;
"""
addr = 0x70
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_x,
@property
def dreg_accel_1_proc_y(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the y axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_Y -- Acceleration Y in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_Y as float;
"""
addr = 0x71
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_y,
@property
def dreg_accel_1_proc_z(self):
"""
Contains the actual measured acceleration from the accelerometer 1 for the z axis in m/s2 after calibration
has been applied.
Payload structure:
[31:0] : ACCEL_1_PROC_Z -- Acceleration Z in m/s2 (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_Z as float;
"""
addr = 0x72
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_z,
@property
def dreg_accel_1_proc_time(self):
"""
Contains the time at which the last acceleration data from the accelerometer 1 was measured.
Payload structure:
[31:0] : ACCEL_1_PROC_TIME -- Accelerometer 1 time stamp (32-bit IEEE Floating Point Value)
:return: ACCEL_1_PROC_TIME as float;
"""
addr = 0x73
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_time,
@property
def dreg_mag_1_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the x axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_X -- Magnetometer X in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_X as float;
"""
addr = 0x74
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_x,
@property
def dreg_mag_1_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the y axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_Y -- Magnetometer Y in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_Y as float;
"""
addr = 0x75
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_y,
@property
def dreg_mag_1_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer 1 for the z axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_1_PROC_Z -- Magnetometer Z in mT (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_Z as float;
"""
addr = 0x76
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_z,
@property
def dreg_mag_1_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 1 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_1_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_1_NORM as float;
"""
addr = 0x77
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_1_norm,
@property
def dreg_mag_1_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer 1 data was acquired.
Payload structure:
[31:0] : MAG_1_PROC_TIME -- Magnetometer 1 time stamp (32-bit IEEE Floating Point Value)
:return: MAG_1_PROC_TIME as float;
"""
addr = 0x78
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_time,
@property
def dreg_mag_2_proc_x(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the x axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_X -- Magnetometer X in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_X as float;
"""
addr = 0x79
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_x,
@property
def dreg_mag_2_proc_y(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the y axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_Y -- Magnetometer Y in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_Y as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_y,
@property
def dreg_mag_2_proc_z(self):
"""
Contains the actual measured magnetic field from the magnetometer 2 for the z axis in mT after calibration has
been applied.
Payload structure:
[31:0] : MAG_2_PROC_Z -- Magnetometer Z in mT (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_Z as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_z,
@property
def dreg_mag_2_norm(self):
"""
Contains the L2-norm (magnetic norm) for the measured magnetic field from the magnetometer 2 computed over the
calibrated values.
Payload structure:
[31:0] : MAG_2_NORM -- Magnetic norm (32-bit IEEE Floating Point Value)
:return: MAG_2_NORM as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_2_norm,
@property
def dreg_mag_2_proc_time(self):
"""
Contains the time stamp at which the calibrated magnetometer 2 data was acquired.
Payload structure:
[31:0] : MAG_2_PROC_TIME -- Magnetometer 2 time stamp (32-bit IEEE Floating Point Value)
:return: MAG_2_PROC_TIME as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_time,
@property
def dreg_quat_ab(self):
"""
Contains the first two components (a and b) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_A -- First quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_B -- Second quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_A as int16_t; QUAT_B as int16_t;
"""
addr = 0x7E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_AB')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_a, quat_b = struct.unpack('>hh', payload[0:4])
return reg, quat_a, quat_b
@property
def dreg_quat_cd(self):
"""
Contains the second two components (c and d) of the estimated quaternion attitude.
Payload structure:
[31:16] : QUAT_C -- Third quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
[15:0] : QUAT_D -- Fourth quaternion component. Stored as a 16-bit signed integer. To get the actual value, divide by 29789.09091.
:return: QUAT_C as int16_t; QUAT_D as int16_t;
"""
addr = 0x7F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_CD')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_c, quat_d = struct.unpack('>hh', payload[0:4])
return reg, quat_c, quat_d
@property
def dreg_quat_time(self):
"""
Contains the time that the quaternion attitude was estimated.
Payload structure:
[31:0] : QUAT_TIME -- Quaternion time (32-bit IEEE Floating Point Value)
:return: QUAT_TIME as float;
"""
addr = 0x80
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
quat_time, = struct.unpack('>f', payload[0:4])
return reg, quat_time,
@property
def dreg_euler_phi_theta(self):
"""
Contains the pitch and roll angle estimates.
Payload structure:
[31:16] : PHI -- Roll angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
[15:0] : THETA -- Pitch angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PHI as int16_t; THETA as int16_t;
"""
addr = 0x81
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi, theta = struct.unpack('>hh', payload[0:4])
return reg, phi, theta
@property
def dreg_euler_psi(self):
"""
Contains the yaw angle estimate.
Payload structure:
[31:16] : PSI -- Yaw angle. Stored as a 16-bit signed integer. To get the actual value, divide by 91.02222.
:return: PSI as int16_t;
"""
addr = 0x82
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi, = struct.unpack('>hxx', payload[0:4])
return reg, psi,
@property
def dreg_euler_phi_theta_dot(self):
"""
Contains the pitch and roll rate estimates.
Payload structure:
[31:16] : PHI_DOT -- Roll rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
[15:0] : THETA_DOT -- Pitch rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PHI_DOT as int16_t; THETA_DOT as int16_t;
"""
addr = 0x83
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA_DOT')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi_dot, theta_dot = struct.unpack('>hh', payload[0:4])
return reg, phi_dot, theta_dot
@property
def dreg_euler_psi_dot(self):
"""
Contains the yaw rate estimate.
Payload structure:
[31:16] : PSI_DOT -- Yaw rate. Stored as a 16-bit signed integer. To get the actual value, divide by 16.0.
:return: PSI_DOT as int16_t;
"""
addr = 0x84
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI_DOT')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi_dot, = struct.unpack('>hxx', payload[0:4])
return reg, psi_dot,
@property
def dreg_euler_time(self):
"""
Contains the time that the Euler Angles were estimated.
Payload structure:
[31:0] : EULER_TIME -- Euler time (32-bit IEEE Floating Point Value)
:return: EULER_TIME as float;
"""
addr = 0x85
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
euler_time, = struct.unpack('>f', payload[0:4])
return reg, euler_time,
@property
def dreg_position_north(self):
"""
Contains the measured north position in meters from the latitude specified in CREG_HOME_NORTH.
Payload structure:
[31:0] : POSITION_NORTH -- North Position (32-bit IEEE Floating Point Value)
:return: POSITION_NORTH as float;
"""
addr = 0x86
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_north, = struct.unpack('>f', payload[0:4])
return reg, position_north,
@property
def dreg_position_east(self):
"""
Contains the measured east position in meters from the longitude specified in CREG_HOME_EAST.
Payload structure:
[31:0] : POSITION_EAST -- East Position (32-bit IEEE Floating Point Value)
:return: POSITION_EAST as float;
"""
addr = 0x87
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_east, = struct.unpack('>f', payload[0:4])
return reg, position_east,
@property
def dreg_position_up(self):
"""
Contains the measured altitude in meters from the altitude specified in CREG_HOME_UP.
Payload structure:
[31:0] : POSITION_UP -- Altitude (32-bit IEEE Floating Point Value)
:return: POSITION_UP as float;
"""
addr = 0x88
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_up, = struct.unpack('>f', payload[0:4])
return reg, position_up,
@property
def dreg_position_time(self):
"""
Contains the time at which the position was acquired.
Payload structure:
[31:0] : POSITION_TIME -- Position Time (32-bit IEEE Floating Point Value)
:return: POSITION_TIME as float;
"""
addr = 0x89
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_time, = struct.unpack('>f', payload[0:4])
return reg, position_time,
@property
def dreg_velocity_north(self):
"""
Contains the measured north velocity in m/s.
Payload structure:
[31:0] : VELOCITY_NORTH -- North Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_NORTH as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_north, = struct.unpack('>f', payload[0:4])
return reg, velocity_north,
@property
def dreg_velocity_east(self):
"""
Contains the measured east velocity in m/s.
Payload structure:
[31:0] : VELOCITY_EAST -- East Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_EAST as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_east, = struct.unpack('>f', payload[0:4])
return reg, velocity_east,
@property
def dreg_velocity_up(self):
"""
Contains the measured altitude velocity in m/s.
Payload structure:
[31:0] : VELOCITY_UP -- Altitude Velocity (32-bit IEEE Floating Point Value)
:return: VELOCITY_UP as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_up, = struct.unpack('>f', payload[0:4])
return reg, velocity_up,
@property
def dreg_velocity_time(self):
"""
Contains the time at which the velocity was measured.
Payload structure:
[31:0] : VELOCITY_TIME -- Velocity time (32-bit IEEE Floating Point Value)
:return: VELOCITY_TIME as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_time, = struct.unpack('>f', payload[0:4])
return reg, velocity_time,
@property
def dreg_gyro_1_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_X -- Gyro 1 bias X (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_X as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_x,
@property
def dreg_gyro_1_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_Y -- Gyro 1 bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_Y as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_y,
@property
def dreg_gyro_1_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro 1 in degrees/s.
Payload structure:
[31:0] : GYRO_1_BIAS_Z -- Gyro 1 bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_1_BIAS_Z as float;
"""
addr = 0x90
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_z,
@property
def dreg_gyro_2_bias_x(self):
"""
Contains the estimated x-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_X -- Gyro 2 bias X (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_X as float;
"""
addr = 0x91
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_x,
@property
def dreg_gyro_2_bias_y(self):
"""
Contains the estimated y-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_Y -- Gyro 2 bias Y (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_Y as float;
"""
addr = 0x92
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_y,
@property
def dreg_gyro_2_bias_z(self):
"""
Contains the estimated z-axis bias for the gyro 2 in degrees/s.
Payload structure:
[31:0] : GYRO_2_BIAS_Z -- Gyro 2 bias Z (32-bit IEEE Floating Point Value)
:return: GYRO_2_BIAS_Z as float;
"""
addr = 0x93
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_z,
@property
def get_fw_build_id(self):
"""
Firmware build identification string: a four byte ASCII character sequence which corresponds to a firmware
series.
Payload structure:
[31:0] : FW_BUILD_ID -- Firmware Build ID string
:return: FW_BUILD_ID as string;
"""
addr = 0xAA
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_ID')
reg.raw_value, = struct.unpack('>I', payload[0:4])
fw_build_id = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return fw_build_id
@property
def get_fw_build_version(self):
"""
Firmware build version provides the unique identifier of the firmware programmed in the board. A response is
four bytes long and identifies major and minor build version, and the build number.
Payload structure:
[31:24] : VERSION_MAJOR -- 8-bit unsigned integer major version number
[23:16] : VERSION_MINOR -- 8-bit unsigned integer minor version number
[15:0] : BUILD_ID -- 16-bit unsigned integer build ID number
:return: VERSION_MAJOR as uint8_t; VERSION_MINOR as uint8_t; BUILD_ID as uint16_t;
"""
addr = 0xAB
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
version_major, version_minor, build_id = struct.unpack('>BBH', payload[0:4])
return reg, version_major, version_minor, build_id
@property
def flash_commit(self):
raise RuntimeError('flash_commit has no getter! The register flash_commit is write-only!')
@flash_commit.setter
def flash_commit(self, new_value):
addr = 0xAC
self.write_register(addr, new_value)
@property
def reset_to_factory(self):
raise RuntimeError('reset_to_factory has no getter! The register reset_to_factory is write-only!')
@reset_to_factory.setter
def reset_to_factory(self, new_value):
addr = 0xAD
self.write_register(addr, new_value)
@property
def zero_gyros(self):
raise RuntimeError('zero_gyros has no getter! The register zero_gyros is write-only!')
@zero_gyros.setter
def zero_gyros(self, new_value):
addr = 0xAE
self.write_register(addr, new_value)
@property
def set_home_position(self):
raise RuntimeError('set_home_position has no getter! The register set_home_position is write-only!')
@set_home_position.setter
def set_home_position(self, new_value):
addr = 0xB0
self.write_register(addr, new_value)
@property
def set_mag_reference(self):
raise RuntimeError('set_mag_reference has no getter! The register set_mag_reference is write-only!')
@set_mag_reference.setter
def set_mag_reference(self, new_value):
addr = 0xB1
self.write_register(addr, new_value)
@property
def calibrate_accelerometers(self):
raise RuntimeError('calibrate_accelerometers has no getter! The register calibrate_accelerometers is write-only!')
@calibrate_accelerometers.setter
def calibrate_accelerometers(self, new_value):
addr = 0xB2
self.write_register(addr, new_value)
@property
def reset_fusion(self):
raise RuntimeError('reset_fusion has no getter! The register reset_fusion is write-only!')
@reset_fusion.setter
def reset_fusion(self, new_value):
addr = 0xB3
self.write_register(addr, new_value)
@property
def enable_zupt(self):
raise RuntimeError('enable_zupt has no getter! The register enable_zupt is write-only!')
@enable_zupt.setter
def enable_zupt(self, new_value):
addr = 0xB4
self.write_register(addr, new_value)
@property
def euler_mode(self):
raise RuntimeError('euler_mode has no getter! The register euler_mode is write-only!')
@euler_mode.setter
def euler_mode(self, new_value):
addr = 0xB5
self.write_register(addr, new_value)
@property
def quaternion_mode(self):
raise RuntimeError('quaternion_mode has no getter! The register quaternion_mode is write-only!')
@quaternion_mode.setter
def quaternion_mode(self, new_value):
addr = 0xB6
self.write_register(addr, new_value)
@property
def enable_rt_calibration(self):
raise RuntimeError('enable_rt_calibration has no getter! The register enable_rt_calibration is write-only!')
@enable_rt_calibration.setter
def enable_rt_calibration(self, new_value):
addr = 0xB7
self.write_register(addr, new_value)
@property
def en_mag_anomaly_detection(self):
raise RuntimeError('en_mag_anomaly_detection has no getter! The register en_mag_anomaly_detection is write-only!')
@en_mag_anomaly_detection.setter
def en_mag_anomaly_detection(self, new_value):
addr = 0xB8
self.write_register(addr, new_value)
@property
def run_self_tests(self):
raise RuntimeError('run_self_tests has no getter! The register run_self_tests is write-only!')
@run_self_tests.setter
def run_self_tests(self, new_value):
addr = 0xB9
self.write_register(addr, new_value)
@property
def enable_external_event(self):
raise RuntimeError('enable_external_event has no getter! The register enable_external_event is write-only!')
@enable_external_event.setter
def enable_external_event(self, new_value):
addr = 0xBA
self.write_register(addr, new_value)
@property
def enable_gnns_fusion(self):
raise RuntimeError('enable_gnns_fusion has no getter! The register enable_gnns_fusion is write-only!')
@enable_gnns_fusion.setter
def enable_gnns_fusion(self, new_value):
addr = 0xBB
self.write_register(addr, new_value)
@property
def enable_usr_euler_output(self):
raise RuntimeError('enable_usr_euler_output has no getter! The register enable_usr_euler_output is write-only!')
@enable_usr_euler_output.setter
def enable_usr_euler_output(self, new_value):
addr = 0xBC
self.write_register(addr, new_value)
@property
def enable_dead_reckoning(self):
raise RuntimeError('enable_dead_reckoning has no getter! The register enable_dead_reckoning is write-only!')
@enable_dead_reckoning.setter
def enable_dead_reckoning(self, new_value):
addr = 0xBD
self.write_register(addr, new_value)
@property
def enable_heave_sway_surge(self):
raise RuntimeError('enable_heave_sway_surge has no getter! The register enable_heave_sway_surge is write-only!')
@enable_heave_sway_surge.setter
def enable_heave_sway_surge(self, new_value):
addr = 0xBE
self.write_register(addr, new_value)
@property
def enable_ukf(self):
raise RuntimeError('enable_ukf has no getter! The register enable_ukf is write-only!')
@enable_ukf.setter
def enable_ukf(self, new_value):
addr = 0xBF
self.write_register(addr, new_value)
@property
def board_unique_id_1(self):
"""
First 32-bits of the 64-bits of the board unique identifier. Bits of the unique identifier cannot be modified
by the user.
Payload structure:
[31:0] : BOARD_UNIQUE_ID_1_BITS -- Board unique ID bits
:return: BOARD_UNIQUE_ID_1_BITS as uint32_t;
"""
addr = 0xFD
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_1_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_1_bits,
@property
def board_unique_id_2(self):
"""
Last 32-bits of the 64-bits of the board unique identifier. Bits of the unique identifier cannot be modified
by the user.
Payload structure:
[31:0] : BOARD_UNIQUE_ID_2_BITS -- Board unique ID bits
:return: BOARD_UNIQUE_ID_2_BITS as uint32_t;
"""
addr = 0xFE
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_2_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_2_bits,
@property
def protocol_version(self):
"""
String version of the protocol.
Payload structure:
[31:0] : PROTOCOL_VERSION_STR -- Protocol version string
:return: PROTOCOL_VERSION_STR as string;
"""
addr = 0xFF
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='PROTOCOL_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
protocol_version_str = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return protocol_version_str
@property
def hidden_gyro_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_VARIANCE as float;
"""
addr = 0x00
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_variance,
@hidden_gyro_1_variance.setter
def hidden_gyro_1_variance(self, new_value):
addr = 0x00
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_VARIANCE as float;
"""
addr = 0x01
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_variance,
@hidden_gyro_2_variance.setter
def hidden_gyro_2_variance(self, new_value):
addr = 0x01
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_VARIANCE as float;
"""
addr = 0x02
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_variance,
@hidden_accel_1_variance.setter
def hidden_accel_1_variance(self, new_value):
addr = 0x02
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_VARIANCE as float;
"""
addr = 0x03
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_variance,
@hidden_mag_1_variance.setter
def hidden_mag_1_variance(self, new_value):
addr = 0x03
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_VARIANCE as float;
"""
addr = 0x04
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_variance,
@hidden_mag_2_variance.setter
def hidden_mag_2_variance(self, new_value):
addr = 0x04
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_course_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_COURSE_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_COURSE_VARIANCE as float;
"""
addr = 0x05
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_COURSE_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_course_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_course_variance,
@hidden_gps_course_variance.setter
def hidden_gps_course_variance(self, new_value):
addr = 0x05
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_position_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_POSITION_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_POSITION_VARIANCE as float;
"""
addr = 0x06
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_POSITION_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_position_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_position_variance,
@hidden_gps_position_variance.setter
def hidden_gps_position_variance(self, new_value):
addr = 0x06
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_velocity_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GPS_VELOCITY_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GPS_VELOCITY_VARIANCE as float;
"""
addr = 0x07
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_VELOCITY_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_velocity_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_velocity_variance,
@hidden_gps_velocity_variance.setter
def hidden_gps_velocity_variance(self, new_value):
addr = 0x07
self.write_register(addr, new_value, hidden=True)
@property
def hidden_static_press_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_STATIC_PRESS_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_STATIC_PRESS_VARIANCE as float;
"""
addr = 0x08
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_STATIC_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_static_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_static_press_variance,
@hidden_static_press_variance.setter
def hidden_static_press_variance(self, new_value):
addr = 0x08
self.write_register(addr, new_value, hidden=True)
@property
def hidden_diff_press_variance(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_DIFF_PRESS_VARIANCE -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_DIFF_PRESS_VARIANCE as float;
"""
addr = 0x09
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_DIFF_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_diff_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_diff_press_variance,
@hidden_diff_press_variance.setter
def hidden_diff_press_variance(self, new_value):
addr = 0x09
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_uvw(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_UVW -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_UVW as float;
"""
addr = 0x0A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_UVW')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_uvw, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_uvw,
@hidden_q_uvw.setter
def hidden_q_uvw(self, new_value):
addr = 0x0A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_quaternion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_QUATERNION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_QUATERNION as float;
"""
addr = 0x0B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_QUATERNION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_quaternion, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_quaternion,
@hidden_q_quaternion.setter
def hidden_q_quaternion(self, new_value):
addr = 0x0B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_gps_position(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_GPS_POSITION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_GPS_POSITION as float;
"""
addr = 0x0C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_GPS_POSITION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_gps_position, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_gps_position,
@hidden_q_gps_position.setter
def hidden_q_gps_position(self, new_value):
addr = 0x0C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_bias(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_BIAS -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_BIAS as float;
"""
addr = 0x0D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_BIAS')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_bias, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_bias,
@hidden_q_bias.setter
def hidden_q_bias(self, new_value):
addr = 0x0D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_euler_angles(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_Q_EULER_ANGLES -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_Q_EULER_ANGLES as float;
"""
addr = 0x0E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_EULER_ANGLES')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_euler_angles, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_euler_angles,
@hidden_q_euler_angles.setter
def hidden_q_euler_angles(self, new_value):
addr = 0x0E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_low_vg_accel_noise_factor(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR as float;
"""
addr = 0x0F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_low_vg_accel_noise_factor, = struct.unpack('>f', payload[0:4])
return reg, hidden_low_vg_accel_noise_factor,
@hidden_low_vg_accel_noise_factor.setter
def hidden_low_vg_accel_noise_factor(self, new_value):
addr = 0x0F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_groundspeed(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GROUNDSPEED -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GROUNDSPEED as float;
"""
addr = 0x10
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GROUNDSPEED')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_groundspeed, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_groundspeed,
@hidden_lpf_tau_groundspeed.setter
def hidden_lpf_tau_groundspeed(self, new_value):
addr = 0x10
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GYRO_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GYRO_1 as float;
"""
addr = 0x11
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_1,
@hidden_lpf_tau_gyro_1.setter
def hidden_lpf_tau_gyro_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_GYRO_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_GYRO_2 as float;
"""
addr = 0x12
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_2,
@hidden_lpf_tau_gyro_2.setter
def hidden_lpf_tau_gyro_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_accel_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_ACCEL_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_ACCEL_1 as float;
"""
addr = 0x13
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_ACCEL_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_accel_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_accel_1,
@hidden_lpf_tau_accel_1.setter
def hidden_lpf_tau_accel_1(self, new_value):
addr = 0x13
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_MAG_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_MAG_1 as float;
"""
addr = 0x14
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_1,
@hidden_lpf_tau_mag_1.setter
def hidden_lpf_tau_mag_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_LPF_TAU_MAG_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_LPF_TAU_MAG_2 as float;
"""
addr = 0x15
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_2,
@hidden_lpf_tau_mag_2.setter
def hidden_lpf_tau_mag_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_0 as float;
"""
addr = 0x16
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_0,
@hidden_c_gyro_1_bias_x_pow_0.setter
def hidden_c_gyro_1_bias_x_pow_0(self, new_value):
addr = 0x16
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_1 as float;
"""
addr = 0x17
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_1,
@hidden_c_gyro_1_bias_x_pow_1.setter
def hidden_c_gyro_1_bias_x_pow_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_2 as float;
"""
addr = 0x18
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_2,
@hidden_c_gyro_1_bias_x_pow_2.setter
def hidden_c_gyro_1_bias_x_pow_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_X_POW_3 as float;
"""
addr = 0x19
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_3,
@hidden_c_gyro_1_bias_x_pow_3.setter
def hidden_c_gyro_1_bias_x_pow_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_0 as float;
"""
addr = 0x1A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_0,
@hidden_c_gyro_1_bias_y_pow_0.setter
def hidden_c_gyro_1_bias_y_pow_0(self, new_value):
addr = 0x1A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_1 as float;
"""
addr = 0x1B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_1,
@hidden_c_gyro_1_bias_y_pow_1.setter
def hidden_c_gyro_1_bias_y_pow_1(self, new_value):
addr = 0x1B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_2 as float;
"""
addr = 0x1C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_2,
@hidden_c_gyro_1_bias_y_pow_2.setter
def hidden_c_gyro_1_bias_y_pow_2(self, new_value):
addr = 0x1C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Y_POW_3 as float;
"""
addr = 0x1D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_3,
@hidden_c_gyro_1_bias_y_pow_3.setter
def hidden_c_gyro_1_bias_y_pow_3(self, new_value):
addr = 0x1D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_0 as float;
"""
addr = 0x1E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_0,
@hidden_c_gyro_1_bias_z_pow_0.setter
def hidden_c_gyro_1_bias_z_pow_0(self, new_value):
addr = 0x1E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_1 as float;
"""
addr = 0x1F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_1,
@hidden_c_gyro_1_bias_z_pow_1.setter
def hidden_c_gyro_1_bias_z_pow_1(self, new_value):
addr = 0x1F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_2 as float;
"""
addr = 0x20
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_2,
@hidden_c_gyro_1_bias_z_pow_2.setter
def hidden_c_gyro_1_bias_z_pow_2(self, new_value):
addr = 0x20
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_BIAS_Z_POW_3 as float;
"""
addr = 0x21
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_3,
@hidden_c_gyro_1_bias_z_pow_3.setter
def hidden_c_gyro_1_bias_z_pow_3(self, new_value):
addr = 0x21
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_0 as float;
"""
addr = 0x22
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_0,
@hidden_c_gyro_1_scale_x_pow_0.setter
def hidden_c_gyro_1_scale_x_pow_0(self, new_value):
addr = 0x22
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_1 as float;
"""
addr = 0x23
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_1,
@hidden_c_gyro_1_scale_x_pow_1.setter
def hidden_c_gyro_1_scale_x_pow_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_2 as float;
"""
addr = 0x24
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_2,
@hidden_c_gyro_1_scale_x_pow_2.setter
def hidden_c_gyro_1_scale_x_pow_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_X_POW_3 as float;
"""
addr = 0x25
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_3,
@hidden_c_gyro_1_scale_x_pow_3.setter
def hidden_c_gyro_1_scale_x_pow_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_0 as float;
"""
addr = 0x26
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_0,
@hidden_c_gyro_1_scale_y_pow_0.setter
def hidden_c_gyro_1_scale_y_pow_0(self, new_value):
addr = 0x26
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_1 as float;
"""
addr = 0x27
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_1,
@hidden_c_gyro_1_scale_y_pow_1.setter
def hidden_c_gyro_1_scale_y_pow_1(self, new_value):
addr = 0x27
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_2 as float;
"""
addr = 0x28
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_2,
@hidden_c_gyro_1_scale_y_pow_2.setter
def hidden_c_gyro_1_scale_y_pow_2(self, new_value):
addr = 0x28
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Y_POW_3 as float;
"""
addr = 0x29
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_3,
@hidden_c_gyro_1_scale_y_pow_3.setter
def hidden_c_gyro_1_scale_y_pow_3(self, new_value):
addr = 0x29
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_0 as float;
"""
addr = 0x2A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_0,
@hidden_c_gyro_1_scale_z_pow_0.setter
def hidden_c_gyro_1_scale_z_pow_0(self, new_value):
addr = 0x2A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_1 as float;
"""
addr = 0x2B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_1,
@hidden_c_gyro_1_scale_z_pow_1.setter
def hidden_c_gyro_1_scale_z_pow_1(self, new_value):
addr = 0x2B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_2 as float;
"""
addr = 0x2C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_2,
@hidden_c_gyro_1_scale_z_pow_2.setter
def hidden_c_gyro_1_scale_z_pow_2(self, new_value):
addr = 0x2C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_1_SCALE_Z_POW_3 as float;
"""
addr = 0x2D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_3,
@hidden_c_gyro_1_scale_z_pow_3.setter
def hidden_c_gyro_1_scale_z_pow_3(self, new_value):
addr = 0x2D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_1 as float;
"""
addr = 0x2E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_1,
@hidden_gyro_1_alignment1_1.setter
def hidden_gyro_1_alignment1_1(self, new_value):
addr = 0x2E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_2 as float;
"""
addr = 0x2F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_2,
@hidden_gyro_1_alignment1_2.setter
def hidden_gyro_1_alignment1_2(self, new_value):
addr = 0x2F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT1_3 as float;
"""
addr = 0x30
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_3,
@hidden_gyro_1_alignment1_3.setter
def hidden_gyro_1_alignment1_3(self, new_value):
addr = 0x30
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_1 as float;
"""
addr = 0x31
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_1,
@hidden_gyro_1_alignment2_1.setter
def hidden_gyro_1_alignment2_1(self, new_value):
addr = 0x31
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_2 as float;
"""
addr = 0x32
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_2,
@hidden_gyro_1_alignment2_2.setter
def hidden_gyro_1_alignment2_2(self, new_value):
addr = 0x32
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT2_3 as float;
"""
addr = 0x33
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_3,
@hidden_gyro_1_alignment2_3.setter
def hidden_gyro_1_alignment2_3(self, new_value):
addr = 0x33
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_1 as float;
"""
addr = 0x34
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_1,
@hidden_gyro_1_alignment3_1.setter
def hidden_gyro_1_alignment3_1(self, new_value):
addr = 0x34
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_2 as float;
"""
addr = 0x35
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_2,
@hidden_gyro_1_alignment3_2.setter
def hidden_gyro_1_alignment3_2(self, new_value):
addr = 0x35
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_ALIGNMENT3_3 as float;
"""
addr = 0x36
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_3,
@hidden_gyro_1_alignment3_3.setter
def hidden_gyro_1_alignment3_3(self, new_value):
addr = 0x36
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_0 as float;
"""
addr = 0x37
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_0,
@hidden_c_gyro_2_bias_x_pow_0.setter
def hidden_c_gyro_2_bias_x_pow_0(self, new_value):
addr = 0x37
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_1 as float;
"""
addr = 0x38
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_1,
@hidden_c_gyro_2_bias_x_pow_1.setter
def hidden_c_gyro_2_bias_x_pow_1(self, new_value):
addr = 0x38
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_2 as float;
"""
addr = 0x39
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_2,
@hidden_c_gyro_2_bias_x_pow_2.setter
def hidden_c_gyro_2_bias_x_pow_2(self, new_value):
addr = 0x39
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_X_POW_3 as float;
"""
addr = 0x3A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_3,
@hidden_c_gyro_2_bias_x_pow_3.setter
def hidden_c_gyro_2_bias_x_pow_3(self, new_value):
addr = 0x3A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_0 as float;
"""
addr = 0x3B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_0,
@hidden_c_gyro_2_bias_y_pow_0.setter
def hidden_c_gyro_2_bias_y_pow_0(self, new_value):
addr = 0x3B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_1 as float;
"""
addr = 0x3C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_1,
@hidden_c_gyro_2_bias_y_pow_1.setter
def hidden_c_gyro_2_bias_y_pow_1(self, new_value):
addr = 0x3C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_2 as float;
"""
addr = 0x3D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_2,
@hidden_c_gyro_2_bias_y_pow_2.setter
def hidden_c_gyro_2_bias_y_pow_2(self, new_value):
addr = 0x3D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Y_POW_3 as float;
"""
addr = 0x3E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_3,
@hidden_c_gyro_2_bias_y_pow_3.setter
def hidden_c_gyro_2_bias_y_pow_3(self, new_value):
addr = 0x3E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_0 as float;
"""
addr = 0x3F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_0,
@hidden_c_gyro_2_bias_z_pow_0.setter
def hidden_c_gyro_2_bias_z_pow_0(self, new_value):
addr = 0x3F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_1 as float;
"""
addr = 0x40
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_1,
@hidden_c_gyro_2_bias_z_pow_1.setter
def hidden_c_gyro_2_bias_z_pow_1(self, new_value):
addr = 0x40
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_2 as float;
"""
addr = 0x41
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_2,
@hidden_c_gyro_2_bias_z_pow_2.setter
def hidden_c_gyro_2_bias_z_pow_2(self, new_value):
addr = 0x41
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_BIAS_Z_POW_3 as float;
"""
addr = 0x42
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_3,
@hidden_c_gyro_2_bias_z_pow_3.setter
def hidden_c_gyro_2_bias_z_pow_3(self, new_value):
addr = 0x42
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_0 as float;
"""
addr = 0x43
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_0,
@hidden_c_gyro_2_scale_x_pow_0.setter
def hidden_c_gyro_2_scale_x_pow_0(self, new_value):
addr = 0x43
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_1 as float;
"""
addr = 0x44
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_1,
@hidden_c_gyro_2_scale_x_pow_1.setter
def hidden_c_gyro_2_scale_x_pow_1(self, new_value):
addr = 0x44
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_2 as float;
"""
addr = 0x45
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_2,
@hidden_c_gyro_2_scale_x_pow_2.setter
def hidden_c_gyro_2_scale_x_pow_2(self, new_value):
addr = 0x45
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_X_POW_3 as float;
"""
addr = 0x46
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_3,
@hidden_c_gyro_2_scale_x_pow_3.setter
def hidden_c_gyro_2_scale_x_pow_3(self, new_value):
addr = 0x46
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_0 as float;
"""
addr = 0x47
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_0,
@hidden_c_gyro_2_scale_y_pow_0.setter
def hidden_c_gyro_2_scale_y_pow_0(self, new_value):
addr = 0x47
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_1 as float;
"""
addr = 0x48
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_1,
@hidden_c_gyro_2_scale_y_pow_1.setter
def hidden_c_gyro_2_scale_y_pow_1(self, new_value):
addr = 0x48
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_2 as float;
"""
addr = 0x49
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_2,
@hidden_c_gyro_2_scale_y_pow_2.setter
def hidden_c_gyro_2_scale_y_pow_2(self, new_value):
addr = 0x49
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Y_POW_3 as float;
"""
addr = 0x4A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_3,
@hidden_c_gyro_2_scale_y_pow_3.setter
def hidden_c_gyro_2_scale_y_pow_3(self, new_value):
addr = 0x4A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_0 as float;
"""
addr = 0x4B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_0,
@hidden_c_gyro_2_scale_z_pow_0.setter
def hidden_c_gyro_2_scale_z_pow_0(self, new_value):
addr = 0x4B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_1 as float;
"""
addr = 0x4C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_1,
@hidden_c_gyro_2_scale_z_pow_1.setter
def hidden_c_gyro_2_scale_z_pow_1(self, new_value):
addr = 0x4C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_2 as float;
"""
addr = 0x4D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_2,
@hidden_c_gyro_2_scale_z_pow_2.setter
def hidden_c_gyro_2_scale_z_pow_2(self, new_value):
addr = 0x4D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_GYRO_2_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_GYRO_2_SCALE_Z_POW_3 as float;
"""
addr = 0x4E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_3,
@hidden_c_gyro_2_scale_z_pow_3.setter
def hidden_c_gyro_2_scale_z_pow_3(self, new_value):
addr = 0x4E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_1 as float;
"""
addr = 0x4F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_1,
@hidden_gyro_2_alignment1_1.setter
def hidden_gyro_2_alignment1_1(self, new_value):
addr = 0x4F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_2 as float;
"""
addr = 0x50
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_2,
@hidden_gyro_2_alignment1_2.setter
def hidden_gyro_2_alignment1_2(self, new_value):
addr = 0x50
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT1_3 as float;
"""
addr = 0x51
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_3,
@hidden_gyro_2_alignment1_3.setter
def hidden_gyro_2_alignment1_3(self, new_value):
addr = 0x51
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_1 as float;
"""
addr = 0x52
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_1,
@hidden_gyro_2_alignment2_1.setter
def hidden_gyro_2_alignment2_1(self, new_value):
addr = 0x52
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_2 as float;
"""
addr = 0x53
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_2,
@hidden_gyro_2_alignment2_2.setter
def hidden_gyro_2_alignment2_2(self, new_value):
addr = 0x53
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT2_3 as float;
"""
addr = 0x54
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_3,
@hidden_gyro_2_alignment2_3.setter
def hidden_gyro_2_alignment2_3(self, new_value):
addr = 0x54
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_1 as float;
"""
addr = 0x55
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_1,
@hidden_gyro_2_alignment3_1.setter
def hidden_gyro_2_alignment3_1(self, new_value):
addr = 0x55
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_2 as float;
"""
addr = 0x56
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_2,
@hidden_gyro_2_alignment3_2.setter
def hidden_gyro_2_alignment3_2(self, new_value):
addr = 0x56
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_ALIGNMENT3_3 as float;
"""
addr = 0x57
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_3,
@hidden_gyro_2_alignment3_3.setter
def hidden_gyro_2_alignment3_3(self, new_value):
addr = 0x57
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_0 as float;
"""
addr = 0x58
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_0,
@hidden_c_accel_1_bias_x_pow_0.setter
def hidden_c_accel_1_bias_x_pow_0(self, new_value):
addr = 0x58
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_1 as float;
"""
addr = 0x59
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_1,
@hidden_c_accel_1_bias_x_pow_1.setter
def hidden_c_accel_1_bias_x_pow_1(self, new_value):
addr = 0x59
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_2 as float;
"""
addr = 0x5A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_2,
@hidden_c_accel_1_bias_x_pow_2.setter
def hidden_c_accel_1_bias_x_pow_2(self, new_value):
addr = 0x5A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_X_POW_3 as float;
"""
addr = 0x5B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_3,
@hidden_c_accel_1_bias_x_pow_3.setter
def hidden_c_accel_1_bias_x_pow_3(self, new_value):
addr = 0x5B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_0 as float;
"""
addr = 0x5C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_0,
@hidden_c_accel_1_bias_y_pow_0.setter
def hidden_c_accel_1_bias_y_pow_0(self, new_value):
addr = 0x5C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_1 as float;
"""
addr = 0x5D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_1,
@hidden_c_accel_1_bias_y_pow_1.setter
def hidden_c_accel_1_bias_y_pow_1(self, new_value):
addr = 0x5D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_2 as float;
"""
addr = 0x5E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_2,
@hidden_c_accel_1_bias_y_pow_2.setter
def hidden_c_accel_1_bias_y_pow_2(self, new_value):
addr = 0x5E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Y_POW_3 as float;
"""
addr = 0x5F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_3,
@hidden_c_accel_1_bias_y_pow_3.setter
def hidden_c_accel_1_bias_y_pow_3(self, new_value):
addr = 0x5F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_0 as float;
"""
addr = 0x60
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_0,
@hidden_c_accel_1_bias_z_pow_0.setter
def hidden_c_accel_1_bias_z_pow_0(self, new_value):
addr = 0x60
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_1 as float;
"""
addr = 0x61
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_1,
@hidden_c_accel_1_bias_z_pow_1.setter
def hidden_c_accel_1_bias_z_pow_1(self, new_value):
addr = 0x61
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_2 as float;
"""
addr = 0x62
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_2,
@hidden_c_accel_1_bias_z_pow_2.setter
def hidden_c_accel_1_bias_z_pow_2(self, new_value):
addr = 0x62
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_BIAS_Z_POW_3 as float;
"""
addr = 0x63
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_3,
@hidden_c_accel_1_bias_z_pow_3.setter
def hidden_c_accel_1_bias_z_pow_3(self, new_value):
addr = 0x63
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_0 as float;
"""
addr = 0x64
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_0,
@hidden_c_accel_1_scale_x_pow_0.setter
def hidden_c_accel_1_scale_x_pow_0(self, new_value):
addr = 0x64
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_1 as float;
"""
addr = 0x65
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_1,
@hidden_c_accel_1_scale_x_pow_1.setter
def hidden_c_accel_1_scale_x_pow_1(self, new_value):
addr = 0x65
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_2 as float;
"""
addr = 0x66
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_2,
@hidden_c_accel_1_scale_x_pow_2.setter
def hidden_c_accel_1_scale_x_pow_2(self, new_value):
addr = 0x66
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_X_POW_3 as float;
"""
addr = 0x67
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_3,
@hidden_c_accel_1_scale_x_pow_3.setter
def hidden_c_accel_1_scale_x_pow_3(self, new_value):
addr = 0x67
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_0 as float;
"""
addr = 0x68
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_0,
@hidden_c_accel_1_scale_y_pow_0.setter
def hidden_c_accel_1_scale_y_pow_0(self, new_value):
addr = 0x68
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_1 as float;
"""
addr = 0x69
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_1,
@hidden_c_accel_1_scale_y_pow_1.setter
def hidden_c_accel_1_scale_y_pow_1(self, new_value):
addr = 0x69
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_2 as float;
"""
addr = 0x6A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_2,
@hidden_c_accel_1_scale_y_pow_2.setter
def hidden_c_accel_1_scale_y_pow_2(self, new_value):
addr = 0x6A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Y_POW_3 as float;
"""
addr = 0x6B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_3,
@hidden_c_accel_1_scale_y_pow_3.setter
def hidden_c_accel_1_scale_y_pow_3(self, new_value):
addr = 0x6B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_0 as float;
"""
addr = 0x6C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_0,
@hidden_c_accel_1_scale_z_pow_0.setter
def hidden_c_accel_1_scale_z_pow_0(self, new_value):
addr = 0x6C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_1 as float;
"""
addr = 0x6D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_1,
@hidden_c_accel_1_scale_z_pow_1.setter
def hidden_c_accel_1_scale_z_pow_1(self, new_value):
addr = 0x6D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_2 as float;
"""
addr = 0x6E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_2,
@hidden_c_accel_1_scale_z_pow_2.setter
def hidden_c_accel_1_scale_z_pow_2(self, new_value):
addr = 0x6E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_ACCEL_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_ACCEL_1_SCALE_Z_POW_3 as float;
"""
addr = 0x6F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_3,
@hidden_c_accel_1_scale_z_pow_3.setter
def hidden_c_accel_1_scale_z_pow_3(self, new_value):
addr = 0x6F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_1 as float;
"""
addr = 0x70
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_1,
@hidden_accel_1_alignment1_1.setter
def hidden_accel_1_alignment1_1(self, new_value):
addr = 0x70
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_2 as float;
"""
addr = 0x71
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_2,
@hidden_accel_1_alignment1_2.setter
def hidden_accel_1_alignment1_2(self, new_value):
addr = 0x71
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT1_3 as float;
"""
addr = 0x72
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_3,
@hidden_accel_1_alignment1_3.setter
def hidden_accel_1_alignment1_3(self, new_value):
addr = 0x72
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_1 as float;
"""
addr = 0x73
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_1,
@hidden_accel_1_alignment2_1.setter
def hidden_accel_1_alignment2_1(self, new_value):
addr = 0x73
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_2 as float;
"""
addr = 0x74
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_2,
@hidden_accel_1_alignment2_2.setter
def hidden_accel_1_alignment2_2(self, new_value):
addr = 0x74
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT2_3 as float;
"""
addr = 0x75
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_3,
@hidden_accel_1_alignment2_3.setter
def hidden_accel_1_alignment2_3(self, new_value):
addr = 0x75
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_1 as float;
"""
addr = 0x76
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_1,
@hidden_accel_1_alignment3_1.setter
def hidden_accel_1_alignment3_1(self, new_value):
addr = 0x76
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_2 as float;
"""
addr = 0x77
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_2,
@hidden_accel_1_alignment3_2.setter
def hidden_accel_1_alignment3_2(self, new_value):
addr = 0x77
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_ALIGNMENT3_3 as float;
"""
addr = 0x78
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_3,
@hidden_accel_1_alignment3_3.setter
def hidden_accel_1_alignment3_3(self, new_value):
addr = 0x78
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_0 as float;
"""
addr = 0x79
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_0,
@hidden_c_mag_1_bias_x_pow_0.setter
def hidden_c_mag_1_bias_x_pow_0(self, new_value):
addr = 0x79
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_1 as float;
"""
addr = 0x7A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_1,
@hidden_c_mag_1_bias_x_pow_1.setter
def hidden_c_mag_1_bias_x_pow_1(self, new_value):
addr = 0x7A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_2 as float;
"""
addr = 0x7B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_2,
@hidden_c_mag_1_bias_x_pow_2.setter
def hidden_c_mag_1_bias_x_pow_2(self, new_value):
addr = 0x7B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_X_POW_3 as float;
"""
addr = 0x7C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_3,
@hidden_c_mag_1_bias_x_pow_3.setter
def hidden_c_mag_1_bias_x_pow_3(self, new_value):
addr = 0x7C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_0 as float;
"""
addr = 0x7D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_0,
@hidden_c_mag_1_bias_y_pow_0.setter
def hidden_c_mag_1_bias_y_pow_0(self, new_value):
addr = 0x7D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_1 as float;
"""
addr = 0x7E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_1,
@hidden_c_mag_1_bias_y_pow_1.setter
def hidden_c_mag_1_bias_y_pow_1(self, new_value):
addr = 0x7E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_2 as float;
"""
addr = 0x7F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_2,
@hidden_c_mag_1_bias_y_pow_2.setter
def hidden_c_mag_1_bias_y_pow_2(self, new_value):
addr = 0x7F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Y_POW_3 as float;
"""
addr = 0x80
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_3,
@hidden_c_mag_1_bias_y_pow_3.setter
def hidden_c_mag_1_bias_y_pow_3(self, new_value):
addr = 0x80
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_0 as float;
"""
addr = 0x81
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_0,
@hidden_c_mag_1_bias_z_pow_0.setter
def hidden_c_mag_1_bias_z_pow_0(self, new_value):
addr = 0x81
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_1 as float;
"""
addr = 0x82
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_1,
@hidden_c_mag_1_bias_z_pow_1.setter
def hidden_c_mag_1_bias_z_pow_1(self, new_value):
addr = 0x82
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_2 as float;
"""
addr = 0x83
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_2,
@hidden_c_mag_1_bias_z_pow_2.setter
def hidden_c_mag_1_bias_z_pow_2(self, new_value):
addr = 0x83
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_BIAS_Z_POW_3 as float;
"""
addr = 0x84
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_3,
@hidden_c_mag_1_bias_z_pow_3.setter
def hidden_c_mag_1_bias_z_pow_3(self, new_value):
addr = 0x84
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_0 as float;
"""
addr = 0x85
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_0,
@hidden_c_mag_1_scale_x_pow_0.setter
def hidden_c_mag_1_scale_x_pow_0(self, new_value):
addr = 0x85
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_1 as float;
"""
addr = 0x86
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_1,
@hidden_c_mag_1_scale_x_pow_1.setter
def hidden_c_mag_1_scale_x_pow_1(self, new_value):
addr = 0x86
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_2 as float;
"""
addr = 0x87
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_2,
@hidden_c_mag_1_scale_x_pow_2.setter
def hidden_c_mag_1_scale_x_pow_2(self, new_value):
addr = 0x87
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_X_POW_3 as float;
"""
addr = 0x88
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_3,
@hidden_c_mag_1_scale_x_pow_3.setter
def hidden_c_mag_1_scale_x_pow_3(self, new_value):
addr = 0x88
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_0 as float;
"""
addr = 0x89
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_0,
@hidden_c_mag_1_scale_y_pow_0.setter
def hidden_c_mag_1_scale_y_pow_0(self, new_value):
addr = 0x89
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_1 as float;
"""
addr = 0x8A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_1,
@hidden_c_mag_1_scale_y_pow_1.setter
def hidden_c_mag_1_scale_y_pow_1(self, new_value):
addr = 0x8A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_2 as float;
"""
addr = 0x8B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_2,
@hidden_c_mag_1_scale_y_pow_2.setter
def hidden_c_mag_1_scale_y_pow_2(self, new_value):
addr = 0x8B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Y_POW_3 as float;
"""
addr = 0x8C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_3,
@hidden_c_mag_1_scale_y_pow_3.setter
def hidden_c_mag_1_scale_y_pow_3(self, new_value):
addr = 0x8C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_0 as float;
"""
addr = 0x8D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_0,
@hidden_c_mag_1_scale_z_pow_0.setter
def hidden_c_mag_1_scale_z_pow_0(self, new_value):
addr = 0x8D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_1 as float;
"""
addr = 0x8E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_1,
@hidden_c_mag_1_scale_z_pow_1.setter
def hidden_c_mag_1_scale_z_pow_1(self, new_value):
addr = 0x8E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_2 as float;
"""
addr = 0x8F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_2,
@hidden_c_mag_1_scale_z_pow_2.setter
def hidden_c_mag_1_scale_z_pow_2(self, new_value):
addr = 0x8F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_1_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_1_SCALE_Z_POW_3 as float;
"""
addr = 0x90
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_3,
@hidden_c_mag_1_scale_z_pow_3.setter
def hidden_c_mag_1_scale_z_pow_3(self, new_value):
addr = 0x90
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_1 as float;
"""
addr = 0x91
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_1,
@hidden_mag_1_alignment1_1.setter
def hidden_mag_1_alignment1_1(self, new_value):
addr = 0x91
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_2 as float;
"""
addr = 0x92
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_2,
@hidden_mag_1_alignment1_2.setter
def hidden_mag_1_alignment1_2(self, new_value):
addr = 0x92
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT1_3 as float;
"""
addr = 0x93
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_3,
@hidden_mag_1_alignment1_3.setter
def hidden_mag_1_alignment1_3(self, new_value):
addr = 0x93
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_1 as float;
"""
addr = 0x94
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_1,
@hidden_mag_1_alignment2_1.setter
def hidden_mag_1_alignment2_1(self, new_value):
addr = 0x94
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_2 as float;
"""
addr = 0x95
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_2,
@hidden_mag_1_alignment2_2.setter
def hidden_mag_1_alignment2_2(self, new_value):
addr = 0x95
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT2_3 as float;
"""
addr = 0x96
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_3,
@hidden_mag_1_alignment2_3.setter
def hidden_mag_1_alignment2_3(self, new_value):
addr = 0x96
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_1 as float;
"""
addr = 0x97
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_1,
@hidden_mag_1_alignment3_1.setter
def hidden_mag_1_alignment3_1(self, new_value):
addr = 0x97
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_2 as float;
"""
addr = 0x98
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_2,
@hidden_mag_1_alignment3_2.setter
def hidden_mag_1_alignment3_2(self, new_value):
addr = 0x98
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_ALIGNMENT3_3 as float;
"""
addr = 0x99
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_3,
@hidden_mag_1_alignment3_3.setter
def hidden_mag_1_alignment3_3(self, new_value):
addr = 0x99
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_x(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_X -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_X as float;
"""
addr = 0x9A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_x,
@hidden_mag_1_reference_x.setter
def hidden_mag_1_reference_x(self, new_value):
addr = 0x9A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_y(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_Y -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_Y as float;
"""
addr = 0x9B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_y,
@hidden_mag_1_reference_y.setter
def hidden_mag_1_reference_y(self, new_value):
addr = 0x9B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_z(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_REFERENCE_Z -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_REFERENCE_Z as float;
"""
addr = 0x9C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_z,
@hidden_mag_1_reference_z.setter
def hidden_mag_1_reference_z(self, new_value):
addr = 0x9C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_0 as float;
"""
addr = 0x9D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_0,
@hidden_c_mag_2_bias_x_pow_0.setter
def hidden_c_mag_2_bias_x_pow_0(self, new_value):
addr = 0x9D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_1 as float;
"""
addr = 0x9E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_1,
@hidden_c_mag_2_bias_x_pow_1.setter
def hidden_c_mag_2_bias_x_pow_1(self, new_value):
addr = 0x9E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_2 as float;
"""
addr = 0x9F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_2,
@hidden_c_mag_2_bias_x_pow_2.setter
def hidden_c_mag_2_bias_x_pow_2(self, new_value):
addr = 0x9F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_X_POW_3 as float;
"""
addr = 0xA0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_3,
@hidden_c_mag_2_bias_x_pow_3.setter
def hidden_c_mag_2_bias_x_pow_3(self, new_value):
addr = 0xA0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_0 as float;
"""
addr = 0xA1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_0,
@hidden_c_mag_2_bias_y_pow_0.setter
def hidden_c_mag_2_bias_y_pow_0(self, new_value):
addr = 0xA1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_1 as float;
"""
addr = 0xA2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_1,
@hidden_c_mag_2_bias_y_pow_1.setter
def hidden_c_mag_2_bias_y_pow_1(self, new_value):
addr = 0xA2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_2 as float;
"""
addr = 0xA3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_2,
@hidden_c_mag_2_bias_y_pow_2.setter
def hidden_c_mag_2_bias_y_pow_2(self, new_value):
addr = 0xA3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Y_POW_3 as float;
"""
addr = 0xA4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_3,
@hidden_c_mag_2_bias_y_pow_3.setter
def hidden_c_mag_2_bias_y_pow_3(self, new_value):
addr = 0xA4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_0 as float;
"""
addr = 0xA5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_0,
@hidden_c_mag_2_bias_z_pow_0.setter
def hidden_c_mag_2_bias_z_pow_0(self, new_value):
addr = 0xA5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_1 as float;
"""
addr = 0xA6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_1,
@hidden_c_mag_2_bias_z_pow_1.setter
def hidden_c_mag_2_bias_z_pow_1(self, new_value):
addr = 0xA6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_2 as float;
"""
addr = 0xA7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_2,
@hidden_c_mag_2_bias_z_pow_2.setter
def hidden_c_mag_2_bias_z_pow_2(self, new_value):
addr = 0xA7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_BIAS_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_BIAS_Z_POW_3 as float;
"""
addr = 0xA8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_3,
@hidden_c_mag_2_bias_z_pow_3.setter
def hidden_c_mag_2_bias_z_pow_3(self, new_value):
addr = 0xA8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_0 as float;
"""
addr = 0xA9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_0,
@hidden_c_mag_2_scale_x_pow_0.setter
def hidden_c_mag_2_scale_x_pow_0(self, new_value):
addr = 0xA9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_1 as float;
"""
addr = 0xAA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_1,
@hidden_c_mag_2_scale_x_pow_1.setter
def hidden_c_mag_2_scale_x_pow_1(self, new_value):
addr = 0xAA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_2 as float;
"""
addr = 0xAB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_2,
@hidden_c_mag_2_scale_x_pow_2.setter
def hidden_c_mag_2_scale_x_pow_2(self, new_value):
addr = 0xAB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_X_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_X_POW_3 as float;
"""
addr = 0xAC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_3,
@hidden_c_mag_2_scale_x_pow_3.setter
def hidden_c_mag_2_scale_x_pow_3(self, new_value):
addr = 0xAC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_0 as float;
"""
addr = 0xAD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_0,
@hidden_c_mag_2_scale_y_pow_0.setter
def hidden_c_mag_2_scale_y_pow_0(self, new_value):
addr = 0xAD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_1 as float;
"""
addr = 0xAE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_1,
@hidden_c_mag_2_scale_y_pow_1.setter
def hidden_c_mag_2_scale_y_pow_1(self, new_value):
addr = 0xAE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_2 as float;
"""
addr = 0xAF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_2,
@hidden_c_mag_2_scale_y_pow_2.setter
def hidden_c_mag_2_scale_y_pow_2(self, new_value):
addr = 0xAF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Y_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Y_POW_3 as float;
"""
addr = 0xB0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_3,
@hidden_c_mag_2_scale_y_pow_3.setter
def hidden_c_mag_2_scale_y_pow_3(self, new_value):
addr = 0xB0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_0(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_0 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_0 as float;
"""
addr = 0xB1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_0,
@hidden_c_mag_2_scale_z_pow_0.setter
def hidden_c_mag_2_scale_z_pow_0(self, new_value):
addr = 0xB1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_1 as float;
"""
addr = 0xB2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_1,
@hidden_c_mag_2_scale_z_pow_1.setter
def hidden_c_mag_2_scale_z_pow_1(self, new_value):
addr = 0xB2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_2 as float;
"""
addr = 0xB3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_2,
@hidden_c_mag_2_scale_z_pow_2.setter
def hidden_c_mag_2_scale_z_pow_2(self, new_value):
addr = 0xB3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_C_MAG_2_SCALE_Z_POW_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_C_MAG_2_SCALE_Z_POW_3 as float;
"""
addr = 0xB4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_3,
@hidden_c_mag_2_scale_z_pow_3.setter
def hidden_c_mag_2_scale_z_pow_3(self, new_value):
addr = 0xB4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_1 as float;
"""
addr = 0xB5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_1,
@hidden_mag_2_alignment1_1.setter
def hidden_mag_2_alignment1_1(self, new_value):
addr = 0xB5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_2 as float;
"""
addr = 0xB6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_2,
@hidden_mag_2_alignment1_2.setter
def hidden_mag_2_alignment1_2(self, new_value):
addr = 0xB6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT1_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT1_3 as float;
"""
addr = 0xB7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_3,
@hidden_mag_2_alignment1_3.setter
def hidden_mag_2_alignment1_3(self, new_value):
addr = 0xB7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_1 as float;
"""
addr = 0xB8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_1,
@hidden_mag_2_alignment2_1.setter
def hidden_mag_2_alignment2_1(self, new_value):
addr = 0xB8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_2 as float;
"""
addr = 0xB9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_2,
@hidden_mag_2_alignment2_2.setter
def hidden_mag_2_alignment2_2(self, new_value):
addr = 0xB9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT2_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT2_3 as float;
"""
addr = 0xBA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_3,
@hidden_mag_2_alignment2_3.setter
def hidden_mag_2_alignment2_3(self, new_value):
addr = 0xBA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_1(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_1 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_1 as float;
"""
addr = 0xBB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_1,
@hidden_mag_2_alignment3_1.setter
def hidden_mag_2_alignment3_1(self, new_value):
addr = 0xBB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_2(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_2 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_2 as float;
"""
addr = 0xBC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_2,
@hidden_mag_2_alignment3_2.setter
def hidden_mag_2_alignment3_2(self, new_value):
addr = 0xBC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_3(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_ALIGNMENT3_3 -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_ALIGNMENT3_3 as float;
"""
addr = 0xBD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_3,
@hidden_mag_2_alignment3_3.setter
def hidden_mag_2_alignment3_3(self, new_value):
addr = 0xBD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_x(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_X -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_X as float;
"""
addr = 0xBE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_x,
@hidden_mag_2_reference_x.setter
def hidden_mag_2_reference_x(self, new_value):
addr = 0xBE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_y(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_Y -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_Y as float;
"""
addr = 0xBF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_y,
@hidden_mag_2_reference_y.setter
def hidden_mag_2_reference_y(self, new_value):
addr = 0xBF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_z(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_REFERENCE_Z -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_REFERENCE_Z as float;
"""
addr = 0xC0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_z,
@hidden_mag_2_reference_z.setter
def hidden_mag_2_reference_z(self, new_value):
addr = 0xC0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_1_CONVERSION as float;
"""
addr = 0xC1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_conversion,
@property
def hidden_gyro_2_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_GYRO_2_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_GYRO_2_CONVERSION as float;
"""
addr = 0xC2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_conversion,
@property
def hidden_accel_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_ACCEL_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_ACCEL_1_CONVERSION as float;
"""
addr = 0xC3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_conversion,
@property
def hidden_mag_1_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_1_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_1_CONVERSION as float;
"""
addr = 0xC4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_conversion,
@property
def hidden_mag_2_conversion(self):
"""
TODO: add description
Payload structure:
[31:0] : HIDDEN_MAG_2_CONVERSION -- 32-bit IEEE 754 Floating Point Value
:return: HIDDEN_MAG_2_CONVERSION as float;
"""
addr = 0xC5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_conversion,
if __name__ == '__main__':
pass
| 40.66884
| 287
| 0.63888
|
import logging
import os.path
import struct
from abc import abstractmethod, ABC
from typing import Union, Tuple
from .rsl_xml_svd.rsl_svd_parser import RslSvdParser
class ShearWaterRegisters(ABC):
def __init__(self, **kwargs):
self.svd_parser = RslSvdParser(svd_file=ShearWaterRegisters.find_svd('shearwater.svd'))
@staticmethod
def find_svd(svd_file_name: str):
parent_dir = os.path.join(os.path.dirname(__file__), os.pardir)
for root, dirs, files in os.walk(parent_dir):
if svd_file_name in files:
return os.path.join(root, svd_file_name)
@abstractmethod
def connect(self, *args, **kwargs):
pass
@abstractmethod
def read_register(self, reg_addr: int, **kw) -> Tuple[bool, bytes]:
pass
@abstractmethod
def write_register(self, reg_addr: int, reg_value: Union[int, bytes, float, str], **kw):
pass
@property
def creg_com_settings(self):
addr = 0x00
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
baud_rate_val = (reg.raw_value >> 28) & 0x000F
baud_rate_enum = reg.find_field_by(name='BAUD_RATE').find_enum_entry_by(value=baud_rate_val)
return reg, baud_rate_enum
@creg_com_settings.setter
def creg_com_settings(self, new_value):
addr = 0x00
self.write_register(addr, new_value)
@property
def creg_com_rates1(self):
addr = 0x01
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, raw_accel_1_rate, raw_gyro_1_rate, raw_gyro_2_rate, raw_mag_1_rate
@creg_com_rates1.setter
def creg_com_rates1(self, new_value):
addr = 0x01
self.write_register(addr, new_value)
@property
def creg_com_rates2(self):
addr = 0x02
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
temp_rate, raw_mag_2_rate, all_raw_rate = struct.unpack('>BBxB', payload[0:4])
return reg, temp_rate, raw_mag_2_rate, all_raw_rate
@creg_com_rates2.setter
def creg_com_rates2(self, new_value):
addr = 0x02
self.write_register(addr, new_value)
@property
def creg_com_rates3(self):
addr = 0x03
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES3')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate = struct.unpack('>BBBB', payload[0:4])
return reg, proc_accel_1_rate, proc_gyro_1_rate, proc_gyro_2_rate, proc_mag_1_rate
@creg_com_rates3.setter
def creg_com_rates3(self, new_value):
addr = 0x03
self.write_register(addr, new_value)
@property
def creg_com_rates4(self):
addr = 0x04
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES4')
reg.raw_value, = struct.unpack('>I', payload[0:4])
proc_mag_2_rate, all_proc_rate = struct.unpack('>BxxB', payload[0:4])
return reg, proc_mag_2_rate, all_proc_rate
@creg_com_rates4.setter
def creg_com_rates4(self, new_value):
addr = 0x04
self.write_register(addr, new_value)
@property
def creg_com_rates5(self):
addr = 0x05
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES5')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_rate, euler_rate, position_rate, velocity_rate = struct.unpack('>BBBB', payload[0:4])
return reg, quat_rate, euler_rate, position_rate, velocity_rate
@creg_com_rates5.setter
def creg_com_rates5(self, new_value):
addr = 0x05
self.write_register(addr, new_value)
@property
def creg_com_rates6(self):
addr = 0x06
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES6')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pose_rate, gyro_bias_1_rate, gyro_bias_2_rate = struct.unpack('>BxBB', payload[0:4])
reg.raw_value, = struct.unpack('>I', payload[0:4])
health_rate_val = (reg.raw_value >> 16) & 0x000F
health_rate_enum = reg.find_field_by(name='HEALTH_RATE').find_enum_entry_by(value=health_rate_val)
return reg, pose_rate, gyro_bias_1_rate, gyro_bias_2_rate, reg, health_rate_enum
@creg_com_rates6.setter
def creg_com_rates6(self, new_value):
addr = 0x06
self.write_register(addr, new_value)
@property
def creg_com_rates7(self):
addr = 0x07
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_COM_RATES7')
reg.raw_value, = struct.unpack('>I', payload[0:4])
nmea_health_rate_val = (reg.raw_value >> 28) & 0x000F
nmea_health_rate_enum = reg.find_field_by(name='NMEA_HEALTH_RATE').find_enum_entry_by(value=nmea_health_rate_val)
nmea_pose_rate_val = (reg.raw_value >> 24) & 0x000F
nmea_pose_rate_enum = reg.find_field_by(name='NMEA_POSE_RATE').find_enum_entry_by(value=nmea_pose_rate_val)
nmea_attitude_rate_val = (reg.raw_value >> 20) & 0x000F
nmea_attitude_rate_enum = reg.find_field_by(name='NMEA_ATTITUDE_RATE').find_enum_entry_by(value=nmea_attitude_rate_val)
nmea_sensor_rate_val = (reg.raw_value >> 16) & 0x000F
nmea_sensor_rate_enum = reg.find_field_by(name='NMEA_SENSOR_RATE').find_enum_entry_by(value=nmea_sensor_rate_val)
nmea_rates_rate_val = (reg.raw_value >> 12) & 0x000F
nmea_rates_rate_enum = reg.find_field_by(name='NMEA_RATES_RATE').find_enum_entry_by(value=nmea_rates_rate_val)
nmea_gps_pose_rate_val = (reg.raw_value >> 8) & 0x000F
nmea_gps_pose_rate_enum = reg.find_field_by(name='NMEA_GPS_POSE_RATE').find_enum_entry_by(value=nmea_gps_pose_rate_val)
nmea_quat_rate_val = (reg.raw_value >> 4) & 0x000F
nmea_quat_rate_enum = reg.find_field_by(name='NMEA_QUAT_RATE').find_enum_entry_by(value=nmea_quat_rate_val)
return reg, nmea_health_rate_enum, nmea_pose_rate_enum, nmea_attitude_rate_enum, nmea_sensor_rate_enum, nmea_rates_rate_enum, nmea_gps_pose_rate_enum, nmea_quat_rate_enum
@creg_com_rates7.setter
def creg_com_rates7(self, new_value):
addr = 0x07
self.write_register(addr, new_value)
@property
def creg_misc_settings(self):
addr = 0x08
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MISC_SETTINGS')
reg.raw_value, = struct.unpack('>I', payload[0:4])
pps_val = (reg.raw_value >> 8) & 0x0001
pps_enum = reg.find_field_by(name='PPS').find_enum_entry_by(value=pps_val)
zg_val = (reg.raw_value >> 3) & 0x0001
zg_enum = reg.find_field_by(name='ZG').find_enum_entry_by(value=zg_val)
q_val = (reg.raw_value >> 2) & 0x0001
q_enum = reg.find_field_by(name='Q').find_enum_entry_by(value=q_val)
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, pps_enum, zg_enum, q_enum, mag1_enum, mag2_enum
@creg_misc_settings.setter
def creg_misc_settings(self, new_value):
addr = 0x08
self.write_register(addr, new_value)
@property
def creg_gyro_1_meas_range(self):
addr = 0x09
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
meas_gyro1_val = (reg.raw_value >> 0) & 0x0003
meas_gyro1_enum = reg.find_field_by(name='MEAS_GYRO1').find_enum_entry_by(value=meas_gyro1_val)
return reg, meas_gyro1_enum
@creg_gyro_1_meas_range.setter
def creg_gyro_1_meas_range(self, new_value):
addr = 0x09
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_x(self):
addr = 0x0A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_x,
@creg_gyro_1_trim_x.setter
def creg_gyro_1_trim_x(self, new_value):
addr = 0x0A
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_y(self):
addr = 0x0B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_y,
@creg_gyro_1_trim_y.setter
def creg_gyro_1_trim_y(self, new_value):
addr = 0x0B
self.write_register(addr, new_value)
@property
def creg_gyro_1_trim_z(self):
addr = 0x0C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_1_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_trim_z,
@creg_gyro_1_trim_z.setter
def creg_gyro_1_trim_z(self, new_value):
addr = 0x0C
self.write_register(addr, new_value)
@property
def creg_gyro_2_meas_range(self):
addr = 0x0D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
meas_gyro2_val = (reg.raw_value >> 0) & 0x0003
meas_gyro2_enum = reg.find_field_by(name='MEAS_GYRO2').find_enum_entry_by(value=meas_gyro2_val)
return reg, meas_gyro2_enum
@creg_gyro_2_meas_range.setter
def creg_gyro_2_meas_range(self, new_value):
addr = 0x0D
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_x(self):
addr = 0x0E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_x,
@creg_gyro_2_trim_x.setter
def creg_gyro_2_trim_x(self, new_value):
addr = 0x0E
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_y(self):
addr = 0x0F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_y,
@creg_gyro_2_trim_y.setter
def creg_gyro_2_trim_y(self, new_value):
addr = 0x0F
self.write_register(addr, new_value)
@property
def creg_gyro_2_trim_z(self):
addr = 0x10
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_GYRO_2_TRIM_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_trim_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_trim_z,
@creg_gyro_2_trim_z.setter
def creg_gyro_2_trim_z(self, new_value):
addr = 0x10
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_1(self):
addr = 0x11
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_1,
@creg_mag_1_cal1_1.setter
def creg_mag_1_cal1_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_2(self):
addr = 0x12
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_2,
@creg_mag_1_cal1_2.setter
def creg_mag_1_cal1_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value)
@property
def creg_mag_1_cal1_3(self):
addr = 0x13
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal1_3,
@creg_mag_1_cal1_3.setter
def creg_mag_1_cal1_3(self, new_value):
addr = 0x13
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_1(self):
addr = 0x14
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_1,
@creg_mag_1_cal2_1.setter
def creg_mag_1_cal2_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_2(self):
addr = 0x15
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_2,
@creg_mag_1_cal2_2.setter
def creg_mag_1_cal2_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value)
@property
def creg_mag_1_cal2_3(self):
addr = 0x16
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal2_3,
@creg_mag_1_cal2_3.setter
def creg_mag_1_cal2_3(self, new_value):
addr = 0x16
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_1(self):
addr = 0x17
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_1,
@creg_mag_1_cal3_1.setter
def creg_mag_1_cal3_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_2(self):
addr = 0x18
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_2,
@creg_mag_1_cal3_2.setter
def creg_mag_1_cal3_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value)
@property
def creg_mag_1_cal3_3(self):
addr = 0x19
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_1_cal3_3,
@creg_mag_1_cal3_3.setter
def creg_mag_1_cal3_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_x(self):
addr = 0x1A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_x,
@creg_mag_1_bias_x.setter
def creg_mag_1_bias_x(self, new_value):
addr = 0x1A
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_y(self):
addr = 0x1B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_y,
@creg_mag_1_bias_y.setter
def creg_mag_1_bias_y(self, new_value):
addr = 0x1B
self.write_register(addr, new_value)
@property
def creg_mag_1_bias_z(self):
addr = 0x1C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_bias_z,
@creg_mag_1_bias_z.setter
def creg_mag_1_bias_z(self, new_value):
addr = 0x1C
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_1(self):
addr = 0x1D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_1,
@creg_mag_2_cal1_1.setter
def creg_mag_2_cal1_1(self, new_value):
addr = 0x1D
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_2(self):
addr = 0x1E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_2,
@creg_mag_2_cal1_2.setter
def creg_mag_2_cal1_2(self, new_value):
addr = 0x1E
self.write_register(addr, new_value)
@property
def creg_mag_2_cal1_3(self):
addr = 0x1F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal1_3,
@creg_mag_2_cal1_3.setter
def creg_mag_2_cal1_3(self, new_value):
addr = 0x1F
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_1(self):
addr = 0x20
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_1,
@creg_mag_2_cal2_1.setter
def creg_mag_2_cal2_1(self, new_value):
addr = 0x20
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_2(self):
addr = 0x21
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_2,
@creg_mag_2_cal2_2.setter
def creg_mag_2_cal2_2(self, new_value):
addr = 0x21
self.write_register(addr, new_value)
@property
def creg_mag_2_cal2_3(self):
addr = 0x22
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal2_3,
@creg_mag_2_cal2_3.setter
def creg_mag_2_cal2_3(self, new_value):
addr = 0x22
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_1(self):
addr = 0x23
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_1,
@creg_mag_2_cal3_1.setter
def creg_mag_2_cal3_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_2(self):
addr = 0x24
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_2,
@creg_mag_2_cal3_2.setter
def creg_mag_2_cal3_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value)
@property
def creg_mag_2_cal3_3(self):
addr = 0x25
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, mag_2_cal3_3,
@creg_mag_2_cal3_3.setter
def creg_mag_2_cal3_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_x(self):
addr = 0x26
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_x,
@creg_mag_2_bias_x.setter
def creg_mag_2_bias_x(self, new_value):
addr = 0x26
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_y(self):
addr = 0x27
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_y,
@creg_mag_2_bias_y.setter
def creg_mag_2_bias_y(self, new_value):
addr = 0x27
self.write_register(addr, new_value)
@property
def creg_mag_2_bias_z(self):
addr = 0x28
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_MAG_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_bias_z,
@creg_mag_2_bias_z.setter
def creg_mag_2_bias_z(self, new_value):
addr = 0x28
self.write_register(addr, new_value)
@property
def creg_accel_1_meas_range(self):
addr = 0x29
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_MEAS_RANGE')
reg.raw_value, = struct.unpack('>I', payload[0:4])
meas_acc1_val = (reg.raw_value >> 0) & 0x0003
meas_acc1_enum = reg.find_field_by(name='MEAS_ACC1').find_enum_entry_by(value=meas_acc1_val)
return reg, meas_acc1_enum
@creg_accel_1_meas_range.setter
def creg_accel_1_meas_range(self, new_value):
addr = 0x29
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_1(self):
addr = 0x2A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_1,
@creg_accel_1_cal1_1.setter
def creg_accel_1_cal1_1(self, new_value):
addr = 0x2A
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_2(self):
addr = 0x2B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_2,
@creg_accel_1_cal1_2.setter
def creg_accel_1_cal1_2(self, new_value):
addr = 0x2B
self.write_register(addr, new_value)
@property
def creg_accel_1_cal1_3(self):
addr = 0x2C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal1_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal1_3,
@creg_accel_1_cal1_3.setter
def creg_accel_1_cal1_3(self, new_value):
addr = 0x2C
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_1(self):
addr = 0x2D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_1,
@creg_accel_1_cal2_1.setter
def creg_accel_1_cal2_1(self, new_value):
addr = 0x2D
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_2(self):
addr = 0x2E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_2,
@creg_accel_1_cal2_2.setter
def creg_accel_1_cal2_2(self, new_value):
addr = 0x2E
self.write_register(addr, new_value)
@property
def creg_accel_1_cal2_3(self):
addr = 0x2F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal2_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal2_3,
@creg_accel_1_cal2_3.setter
def creg_accel_1_cal2_3(self, new_value):
addr = 0x2F
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_1(self):
addr = 0x30
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_1, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_1,
@creg_accel_1_cal3_1.setter
def creg_accel_1_cal3_1(self, new_value):
addr = 0x30
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_2(self):
addr = 0x31
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_2, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_2,
@creg_accel_1_cal3_2.setter
def creg_accel_1_cal3_2(self, new_value):
addr = 0x31
self.write_register(addr, new_value)
@property
def creg_accel_1_cal3_3(self):
addr = 0x32
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_CAL3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_cal3_3, = struct.unpack('>f', payload[0:4])
return reg, accel_1_cal3_3,
@creg_accel_1_cal3_3.setter
def creg_accel_1_cal3_3(self, new_value):
addr = 0x32
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_x(self):
addr = 0x33
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_x,
@creg_accel_1_bias_x.setter
def creg_accel_1_bias_x(self, new_value):
addr = 0x33
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_y(self):
addr = 0x34
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_y,
@creg_accel_1_bias_y.setter
def creg_accel_1_bias_y(self, new_value):
addr = 0x34
self.write_register(addr, new_value)
@property
def creg_accel_1_bias_z(self):
addr = 0x35
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='CREG_ACCEL_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_bias_z,
@creg_accel_1_bias_z.setter
def creg_accel_1_bias_z(self, new_value):
addr = 0x35
self.write_register(addr, new_value)
@property
def dreg_health(self):
addr = 0x55
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_HEALTH')
reg.raw_value, = struct.unpack('>I', payload[0:4])
ovf_val = (reg.raw_value >> 8) & 0x0001
ovf_enum = reg.find_field_by(name='OVF').find_enum_entry_by(value=ovf_val)
acc1_n_val = (reg.raw_value >> 7) & 0x0001
acc1_n_enum = reg.find_field_by(name='ACC1_N').find_enum_entry_by(value=acc1_n_val)
mag1_n_val = (reg.raw_value >> 6) & 0x0001
mag1_n_enum = reg.find_field_by(name='MAG1_N').find_enum_entry_by(value=mag1_n_val)
mag2_n_val = (reg.raw_value >> 5) & 0x0001
mag2_n_enum = reg.find_field_by(name='MAG2_N').find_enum_entry_by(value=mag2_n_val)
accel1_val = (reg.raw_value >> 4) & 0x0001
accel1_enum = reg.find_field_by(name='ACCEL1').find_enum_entry_by(value=accel1_val)
gyro1_val = (reg.raw_value >> 3) & 0x0001
gyro1_enum = reg.find_field_by(name='GYRO1').find_enum_entry_by(value=gyro1_val)
gyro2_val = (reg.raw_value >> 2) & 0x0001
gyro2_enum = reg.find_field_by(name='GYRO2').find_enum_entry_by(value=gyro2_val)
mag1_val = (reg.raw_value >> 1) & 0x0001
mag1_enum = reg.find_field_by(name='MAG1').find_enum_entry_by(value=mag1_val)
mag2_val = (reg.raw_value >> 0) & 0x0001
mag2_enum = reg.find_field_by(name='MAG2').find_enum_entry_by(value=mag2_val)
return reg, ovf_enum, acc1_n_enum, mag1_n_enum, mag2_n_enum, accel1_enum, gyro1_enum, gyro2_enum, mag1_enum, mag2_enum
@property
def dreg_gyro_1_raw_xy(self):
addr = 0x56
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_1_raw_x, gyro_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_1_raw_x, gyro_1_raw_y
@property
def dreg_gyro_1_raw_z(self):
addr = 0x57
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_1_raw_z,
@property
def dreg_gyro_1_raw_time(self):
addr = 0x58
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_raw_time,
@property
def dreg_gyro_2_raw_xy(self):
addr = 0x59
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
gyro_2_raw_x, gyro_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, gyro_2_raw_x, gyro_2_raw_y
@property
def dreg_gyro_2_raw_z(self):
addr = 0x5A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
gyro_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, gyro_2_raw_z,
@property
def dreg_gyro_2_raw_time(self):
addr = 0x5B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_raw_time,
@property
def dreg_accel_1_raw_xy(self):
addr = 0x5C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
accel_1_raw_x, accel_1_raw_y = struct.unpack('>hh', payload[0:4])
return reg, accel_1_raw_x, accel_1_raw_y
@property
def dreg_accel_1_raw_z(self):
addr = 0x5D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
accel_1_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, accel_1_raw_z,
@property
def dreg_accel_1_raw_time(self):
addr = 0x5E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_raw_time,
@property
def dreg_mag_1_raw_x(self):
addr = 0x5F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_X')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_x, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_x,
@property
def dreg_mag_1_raw_y(self):
addr = 0x60
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Y')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_y, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_y,
@property
def dreg_mag_1_raw_z(self):
addr = 0x61
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_Z')
reg.raw_value, = struct.unpack('>i', payload[0:4])
mag_1_raw_z, = struct.unpack('>i', payload[0:4])
return reg, mag_1_raw_z,
@property
def dreg_mag_1_raw_time(self):
addr = 0x62
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_raw_time,
@property
def dreg_mag_2_raw_xy(self):
addr = 0x63
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_XY')
reg.raw_value, = struct.unpack('>I', payload[0:4])
mag_2_raw_x, mag_2_raw_y = struct.unpack('>hh', payload[0:4])
return reg, mag_2_raw_x, mag_2_raw_y
@property
def dreg_mag_2_raw_z(self):
addr = 0x64
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_Z')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
mag_2_raw_z, = struct.unpack('>hxx', payload[0:4])
return reg, mag_2_raw_z,
@property
def dreg_mag_2_raw_time(self):
addr = 0x65
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_RAW_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_raw_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_raw_time,
@property
def dreg_temperature(self):
addr = 0x66
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature, = struct.unpack('>f', payload[0:4])
return reg, temperature,
@property
def dreg_temperature_time(self):
addr = 0x67
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_TEMPERATURE_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
temperature_time, = struct.unpack('>f', payload[0:4])
return reg, temperature_time,
@property
def dreg_gyro_1_proc_x(self):
addr = 0x68
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_x,
@property
def dreg_gyro_1_proc_y(self):
addr = 0x69
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_y,
@property
def dreg_gyro_1_proc_z(self):
addr = 0x6A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_z,
@property
def dreg_gyro_1_proc_time(self):
addr = 0x6B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_proc_time,
@property
def dreg_gyro_2_proc_x(self):
addr = 0x6C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_x,
@property
def dreg_gyro_2_proc_y(self):
addr = 0x6D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_y,
@property
def dreg_gyro_2_proc_z(self):
addr = 0x6E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_z,
@property
def dreg_gyro_2_proc_time(self):
addr = 0x6F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_proc_time,
@property
def dreg_accel_1_proc_x(self):
addr = 0x70
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_x,
@property
def dreg_accel_1_proc_y(self):
addr = 0x71
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_y,
@property
def dreg_accel_1_proc_z(self):
addr = 0x72
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_z,
@property
def dreg_accel_1_proc_time(self):
addr = 0x73
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_ACCEL_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
accel_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, accel_1_proc_time,
@property
def dreg_mag_1_proc_x(self):
addr = 0x74
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_x,
@property
def dreg_mag_1_proc_y(self):
addr = 0x75
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_y,
@property
def dreg_mag_1_proc_z(self):
addr = 0x76
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_z,
@property
def dreg_mag_1_norm(self):
addr = 0x77
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_1_norm,
@property
def dreg_mag_1_proc_time(self):
addr = 0x78
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_1_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_1_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_1_proc_time,
@property
def dreg_mag_2_proc_x(self):
addr = 0x79
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_x, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_x,
@property
def dreg_mag_2_proc_y(self):
addr = 0x7A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_y, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_y,
@property
def dreg_mag_2_proc_z(self):
addr = 0x7B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_z, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_z,
@property
def dreg_mag_2_norm(self):
addr = 0x7C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_NORM')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_norm, = struct.unpack('>f', payload[0:4])
return reg, mag_2_norm,
@property
def dreg_mag_2_proc_time(self):
addr = 0x7D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_MAG_2_PROC_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
mag_2_proc_time, = struct.unpack('>f', payload[0:4])
return reg, mag_2_proc_time,
@property
def dreg_quat_ab(self):
addr = 0x7E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_AB')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_a, quat_b = struct.unpack('>hh', payload[0:4])
return reg, quat_a, quat_b
@property
def dreg_quat_cd(self):
addr = 0x7F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_CD')
reg.raw_value, = struct.unpack('>I', payload[0:4])
quat_c, quat_d = struct.unpack('>hh', payload[0:4])
return reg, quat_c, quat_d
@property
def dreg_quat_time(self):
addr = 0x80
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_QUAT_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
quat_time, = struct.unpack('>f', payload[0:4])
return reg, quat_time,
@property
def dreg_euler_phi_theta(self):
addr = 0x81
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi, theta = struct.unpack('>hh', payload[0:4])
return reg, phi, theta
@property
def dreg_euler_psi(self):
addr = 0x82
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi, = struct.unpack('>hxx', payload[0:4])
return reg, psi,
@property
def dreg_euler_phi_theta_dot(self):
addr = 0x83
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PHI_THETA_DOT')
reg.raw_value, = struct.unpack('>I', payload[0:4])
phi_dot, theta_dot = struct.unpack('>hh', payload[0:4])
return reg, phi_dot, theta_dot
@property
def dreg_euler_psi_dot(self):
addr = 0x84
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_PSI_DOT')
reg.raw_value, = struct.unpack('>hxx', payload[0:4])
psi_dot, = struct.unpack('>hxx', payload[0:4])
return reg, psi_dot,
@property
def dreg_euler_time(self):
addr = 0x85
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_EULER_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
euler_time, = struct.unpack('>f', payload[0:4])
return reg, euler_time,
@property
def dreg_position_north(self):
addr = 0x86
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_north, = struct.unpack('>f', payload[0:4])
return reg, position_north,
@property
def dreg_position_east(self):
addr = 0x87
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_east, = struct.unpack('>f', payload[0:4])
return reg, position_east,
@property
def dreg_position_up(self):
addr = 0x88
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_up, = struct.unpack('>f', payload[0:4])
return reg, position_up,
@property
def dreg_position_time(self):
addr = 0x89
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_POSITION_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
position_time, = struct.unpack('>f', payload[0:4])
return reg, position_time,
@property
def dreg_velocity_north(self):
addr = 0x8A
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_NORTH')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_north, = struct.unpack('>f', payload[0:4])
return reg, velocity_north,
@property
def dreg_velocity_east(self):
addr = 0x8B
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_EAST')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_east, = struct.unpack('>f', payload[0:4])
return reg, velocity_east,
@property
def dreg_velocity_up(self):
addr = 0x8C
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_UP')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_up, = struct.unpack('>f', payload[0:4])
return reg, velocity_up,
@property
def dreg_velocity_time(self):
addr = 0x8D
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_VELOCITY_TIME')
reg.raw_value, = struct.unpack('>f', payload[0:4])
velocity_time, = struct.unpack('>f', payload[0:4])
return reg, velocity_time,
@property
def dreg_gyro_1_bias_x(self):
addr = 0x8E
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_x,
@property
def dreg_gyro_1_bias_y(self):
addr = 0x8F
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_y,
@property
def dreg_gyro_1_bias_z(self):
addr = 0x90
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_1_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_1_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_1_bias_z,
@property
def dreg_gyro_2_bias_x(self):
addr = 0x91
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_x, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_x,
@property
def dreg_gyro_2_bias_y(self):
addr = 0x92
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_y, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_y,
@property
def dreg_gyro_2_bias_z(self):
addr = 0x93
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='DREG_GYRO_2_BIAS_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
gyro_2_bias_z, = struct.unpack('>f', payload[0:4])
return reg, gyro_2_bias_z,
@property
def get_fw_build_id(self):
addr = 0xAA
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_ID')
reg.raw_value, = struct.unpack('>I', payload[0:4])
fw_build_id = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return fw_build_id
@property
def get_fw_build_version(self):
addr = 0xAB
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='GET_FW_BUILD_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
version_major, version_minor, build_id = struct.unpack('>BBH', payload[0:4])
return reg, version_major, version_minor, build_id
@property
def flash_commit(self):
raise RuntimeError('flash_commit has no getter! The register flash_commit is write-only!')
@flash_commit.setter
def flash_commit(self, new_value):
addr = 0xAC
self.write_register(addr, new_value)
@property
def reset_to_factory(self):
raise RuntimeError('reset_to_factory has no getter! The register reset_to_factory is write-only!')
@reset_to_factory.setter
def reset_to_factory(self, new_value):
addr = 0xAD
self.write_register(addr, new_value)
@property
def zero_gyros(self):
raise RuntimeError('zero_gyros has no getter! The register zero_gyros is write-only!')
@zero_gyros.setter
def zero_gyros(self, new_value):
addr = 0xAE
self.write_register(addr, new_value)
@property
def set_home_position(self):
raise RuntimeError('set_home_position has no getter! The register set_home_position is write-only!')
@set_home_position.setter
def set_home_position(self, new_value):
addr = 0xB0
self.write_register(addr, new_value)
@property
def set_mag_reference(self):
raise RuntimeError('set_mag_reference has no getter! The register set_mag_reference is write-only!')
@set_mag_reference.setter
def set_mag_reference(self, new_value):
addr = 0xB1
self.write_register(addr, new_value)
@property
def calibrate_accelerometers(self):
raise RuntimeError('calibrate_accelerometers has no getter! The register calibrate_accelerometers is write-only!')
@calibrate_accelerometers.setter
def calibrate_accelerometers(self, new_value):
addr = 0xB2
self.write_register(addr, new_value)
@property
def reset_fusion(self):
raise RuntimeError('reset_fusion has no getter! The register reset_fusion is write-only!')
@reset_fusion.setter
def reset_fusion(self, new_value):
addr = 0xB3
self.write_register(addr, new_value)
@property
def enable_zupt(self):
raise RuntimeError('enable_zupt has no getter! The register enable_zupt is write-only!')
@enable_zupt.setter
def enable_zupt(self, new_value):
addr = 0xB4
self.write_register(addr, new_value)
@property
def euler_mode(self):
raise RuntimeError('euler_mode has no getter! The register euler_mode is write-only!')
@euler_mode.setter
def euler_mode(self, new_value):
addr = 0xB5
self.write_register(addr, new_value)
@property
def quaternion_mode(self):
raise RuntimeError('quaternion_mode has no getter! The register quaternion_mode is write-only!')
@quaternion_mode.setter
def quaternion_mode(self, new_value):
addr = 0xB6
self.write_register(addr, new_value)
@property
def enable_rt_calibration(self):
raise RuntimeError('enable_rt_calibration has no getter! The register enable_rt_calibration is write-only!')
@enable_rt_calibration.setter
def enable_rt_calibration(self, new_value):
addr = 0xB7
self.write_register(addr, new_value)
@property
def en_mag_anomaly_detection(self):
raise RuntimeError('en_mag_anomaly_detection has no getter! The register en_mag_anomaly_detection is write-only!')
@en_mag_anomaly_detection.setter
def en_mag_anomaly_detection(self, new_value):
addr = 0xB8
self.write_register(addr, new_value)
@property
def run_self_tests(self):
raise RuntimeError('run_self_tests has no getter! The register run_self_tests is write-only!')
@run_self_tests.setter
def run_self_tests(self, new_value):
addr = 0xB9
self.write_register(addr, new_value)
@property
def enable_external_event(self):
raise RuntimeError('enable_external_event has no getter! The register enable_external_event is write-only!')
@enable_external_event.setter
def enable_external_event(self, new_value):
addr = 0xBA
self.write_register(addr, new_value)
@property
def enable_gnns_fusion(self):
raise RuntimeError('enable_gnns_fusion has no getter! The register enable_gnns_fusion is write-only!')
@enable_gnns_fusion.setter
def enable_gnns_fusion(self, new_value):
addr = 0xBB
self.write_register(addr, new_value)
@property
def enable_usr_euler_output(self):
raise RuntimeError('enable_usr_euler_output has no getter! The register enable_usr_euler_output is write-only!')
@enable_usr_euler_output.setter
def enable_usr_euler_output(self, new_value):
addr = 0xBC
self.write_register(addr, new_value)
@property
def enable_dead_reckoning(self):
raise RuntimeError('enable_dead_reckoning has no getter! The register enable_dead_reckoning is write-only!')
@enable_dead_reckoning.setter
def enable_dead_reckoning(self, new_value):
addr = 0xBD
self.write_register(addr, new_value)
@property
def enable_heave_sway_surge(self):
raise RuntimeError('enable_heave_sway_surge has no getter! The register enable_heave_sway_surge is write-only!')
@enable_heave_sway_surge.setter
def enable_heave_sway_surge(self, new_value):
addr = 0xBE
self.write_register(addr, new_value)
@property
def enable_ukf(self):
raise RuntimeError('enable_ukf has no getter! The register enable_ukf is write-only!')
@enable_ukf.setter
def enable_ukf(self, new_value):
addr = 0xBF
self.write_register(addr, new_value)
@property
def board_unique_id_1(self):
addr = 0xFD
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_1')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_1_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_1_bits,
@property
def board_unique_id_2(self):
addr = 0xFE
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='BOARD_UNIQUE_ID_2')
reg.raw_value, = struct.unpack('>I', payload[0:4])
board_unique_id_2_bits, = struct.unpack('>I', payload[0:4])
return reg, board_unique_id_2_bits,
@property
def protocol_version(self):
addr = 0xFF
ok, payload = self.read_register(addr)
if ok:
reg = self.svd_parser.find_register_by(name='PROTOCOL_VERSION')
reg.raw_value, = struct.unpack('>I', payload[0:4])
protocol_version_str = struct.unpack('>4s', payload[0:4])[0].decode('utf-8')
return protocol_version_str
@property
def hidden_gyro_1_variance(self):
addr = 0x00
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_variance,
@hidden_gyro_1_variance.setter
def hidden_gyro_1_variance(self, new_value):
addr = 0x00
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_variance(self):
addr = 0x01
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_variance,
@hidden_gyro_2_variance.setter
def hidden_gyro_2_variance(self, new_value):
addr = 0x01
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_variance(self):
addr = 0x02
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_variance,
@hidden_accel_1_variance.setter
def hidden_accel_1_variance(self, new_value):
addr = 0x02
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_variance(self):
addr = 0x03
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_variance,
@hidden_mag_1_variance.setter
def hidden_mag_1_variance(self, new_value):
addr = 0x03
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_variance(self):
addr = 0x04
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_variance,
@hidden_mag_2_variance.setter
def hidden_mag_2_variance(self, new_value):
addr = 0x04
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_course_variance(self):
addr = 0x05
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_COURSE_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_course_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_course_variance,
@hidden_gps_course_variance.setter
def hidden_gps_course_variance(self, new_value):
addr = 0x05
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_position_variance(self):
addr = 0x06
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_POSITION_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_position_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_position_variance,
@hidden_gps_position_variance.setter
def hidden_gps_position_variance(self, new_value):
addr = 0x06
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gps_velocity_variance(self):
addr = 0x07
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GPS_VELOCITY_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gps_velocity_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_gps_velocity_variance,
@hidden_gps_velocity_variance.setter
def hidden_gps_velocity_variance(self, new_value):
addr = 0x07
self.write_register(addr, new_value, hidden=True)
@property
def hidden_static_press_variance(self):
addr = 0x08
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_STATIC_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_static_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_static_press_variance,
@hidden_static_press_variance.setter
def hidden_static_press_variance(self, new_value):
addr = 0x08
self.write_register(addr, new_value, hidden=True)
@property
def hidden_diff_press_variance(self):
addr = 0x09
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_DIFF_PRESS_VARIANCE')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_diff_press_variance, = struct.unpack('>f', payload[0:4])
return reg, hidden_diff_press_variance,
@hidden_diff_press_variance.setter
def hidden_diff_press_variance(self, new_value):
addr = 0x09
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_uvw(self):
addr = 0x0A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_UVW')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_uvw, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_uvw,
@hidden_q_uvw.setter
def hidden_q_uvw(self, new_value):
addr = 0x0A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_quaternion(self):
addr = 0x0B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_QUATERNION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_quaternion, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_quaternion,
@hidden_q_quaternion.setter
def hidden_q_quaternion(self, new_value):
addr = 0x0B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_gps_position(self):
addr = 0x0C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_GPS_POSITION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_gps_position, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_gps_position,
@hidden_q_gps_position.setter
def hidden_q_gps_position(self, new_value):
addr = 0x0C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_bias(self):
addr = 0x0D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_BIAS')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_bias, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_bias,
@hidden_q_bias.setter
def hidden_q_bias(self, new_value):
addr = 0x0D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_q_euler_angles(self):
addr = 0x0E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_Q_EULER_ANGLES')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_q_euler_angles, = struct.unpack('>f', payload[0:4])
return reg, hidden_q_euler_angles,
@hidden_q_euler_angles.setter
def hidden_q_euler_angles(self, new_value):
addr = 0x0E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_low_vg_accel_noise_factor(self):
addr = 0x0F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LOW_VG_ACCEL_NOISE_FACTOR')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_low_vg_accel_noise_factor, = struct.unpack('>f', payload[0:4])
return reg, hidden_low_vg_accel_noise_factor,
@hidden_low_vg_accel_noise_factor.setter
def hidden_low_vg_accel_noise_factor(self, new_value):
addr = 0x0F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_groundspeed(self):
addr = 0x10
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GROUNDSPEED')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_groundspeed, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_groundspeed,
@hidden_lpf_tau_groundspeed.setter
def hidden_lpf_tau_groundspeed(self, new_value):
addr = 0x10
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_1(self):
addr = 0x11
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_1,
@hidden_lpf_tau_gyro_1.setter
def hidden_lpf_tau_gyro_1(self, new_value):
addr = 0x11
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_gyro_2(self):
addr = 0x12
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_GYRO_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_gyro_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_gyro_2,
@hidden_lpf_tau_gyro_2.setter
def hidden_lpf_tau_gyro_2(self, new_value):
addr = 0x12
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_accel_1(self):
addr = 0x13
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_ACCEL_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_accel_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_accel_1,
@hidden_lpf_tau_accel_1.setter
def hidden_lpf_tau_accel_1(self, new_value):
addr = 0x13
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_1(self):
addr = 0x14
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_1,
@hidden_lpf_tau_mag_1.setter
def hidden_lpf_tau_mag_1(self, new_value):
addr = 0x14
self.write_register(addr, new_value, hidden=True)
@property
def hidden_lpf_tau_mag_2(self):
addr = 0x15
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_LPF_TAU_MAG_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_lpf_tau_mag_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_lpf_tau_mag_2,
@hidden_lpf_tau_mag_2.setter
def hidden_lpf_tau_mag_2(self, new_value):
addr = 0x15
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_0(self):
addr = 0x16
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_0,
@hidden_c_gyro_1_bias_x_pow_0.setter
def hidden_c_gyro_1_bias_x_pow_0(self, new_value):
addr = 0x16
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_1(self):
addr = 0x17
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_1,
@hidden_c_gyro_1_bias_x_pow_1.setter
def hidden_c_gyro_1_bias_x_pow_1(self, new_value):
addr = 0x17
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_2(self):
addr = 0x18
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_2,
@hidden_c_gyro_1_bias_x_pow_2.setter
def hidden_c_gyro_1_bias_x_pow_2(self, new_value):
addr = 0x18
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_x_pow_3(self):
addr = 0x19
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_x_pow_3,
@hidden_c_gyro_1_bias_x_pow_3.setter
def hidden_c_gyro_1_bias_x_pow_3(self, new_value):
addr = 0x19
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_0(self):
addr = 0x1A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_0,
@hidden_c_gyro_1_bias_y_pow_0.setter
def hidden_c_gyro_1_bias_y_pow_0(self, new_value):
addr = 0x1A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_1(self):
addr = 0x1B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_1,
@hidden_c_gyro_1_bias_y_pow_1.setter
def hidden_c_gyro_1_bias_y_pow_1(self, new_value):
addr = 0x1B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_2(self):
addr = 0x1C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_2,
@hidden_c_gyro_1_bias_y_pow_2.setter
def hidden_c_gyro_1_bias_y_pow_2(self, new_value):
addr = 0x1C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_y_pow_3(self):
addr = 0x1D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_y_pow_3,
@hidden_c_gyro_1_bias_y_pow_3.setter
def hidden_c_gyro_1_bias_y_pow_3(self, new_value):
addr = 0x1D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_0(self):
addr = 0x1E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_0,
@hidden_c_gyro_1_bias_z_pow_0.setter
def hidden_c_gyro_1_bias_z_pow_0(self, new_value):
addr = 0x1E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_1(self):
addr = 0x1F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_1,
@hidden_c_gyro_1_bias_z_pow_1.setter
def hidden_c_gyro_1_bias_z_pow_1(self, new_value):
addr = 0x1F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_2(self):
addr = 0x20
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_2,
@hidden_c_gyro_1_bias_z_pow_2.setter
def hidden_c_gyro_1_bias_z_pow_2(self, new_value):
addr = 0x20
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_bias_z_pow_3(self):
addr = 0x21
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_bias_z_pow_3,
@hidden_c_gyro_1_bias_z_pow_3.setter
def hidden_c_gyro_1_bias_z_pow_3(self, new_value):
addr = 0x21
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_0(self):
addr = 0x22
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_0,
@hidden_c_gyro_1_scale_x_pow_0.setter
def hidden_c_gyro_1_scale_x_pow_0(self, new_value):
addr = 0x22
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_1(self):
addr = 0x23
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_1,
@hidden_c_gyro_1_scale_x_pow_1.setter
def hidden_c_gyro_1_scale_x_pow_1(self, new_value):
addr = 0x23
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_2(self):
addr = 0x24
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_2,
@hidden_c_gyro_1_scale_x_pow_2.setter
def hidden_c_gyro_1_scale_x_pow_2(self, new_value):
addr = 0x24
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_x_pow_3(self):
addr = 0x25
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_x_pow_3,
@hidden_c_gyro_1_scale_x_pow_3.setter
def hidden_c_gyro_1_scale_x_pow_3(self, new_value):
addr = 0x25
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_0(self):
addr = 0x26
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_0,
@hidden_c_gyro_1_scale_y_pow_0.setter
def hidden_c_gyro_1_scale_y_pow_0(self, new_value):
addr = 0x26
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_1(self):
addr = 0x27
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_1,
@hidden_c_gyro_1_scale_y_pow_1.setter
def hidden_c_gyro_1_scale_y_pow_1(self, new_value):
addr = 0x27
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_2(self):
addr = 0x28
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_2,
@hidden_c_gyro_1_scale_y_pow_2.setter
def hidden_c_gyro_1_scale_y_pow_2(self, new_value):
addr = 0x28
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_y_pow_3(self):
addr = 0x29
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_y_pow_3,
@hidden_c_gyro_1_scale_y_pow_3.setter
def hidden_c_gyro_1_scale_y_pow_3(self, new_value):
addr = 0x29
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_0(self):
addr = 0x2A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_0,
@hidden_c_gyro_1_scale_z_pow_0.setter
def hidden_c_gyro_1_scale_z_pow_0(self, new_value):
addr = 0x2A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_1(self):
addr = 0x2B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_1,
@hidden_c_gyro_1_scale_z_pow_1.setter
def hidden_c_gyro_1_scale_z_pow_1(self, new_value):
addr = 0x2B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_2(self):
addr = 0x2C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_2,
@hidden_c_gyro_1_scale_z_pow_2.setter
def hidden_c_gyro_1_scale_z_pow_2(self, new_value):
addr = 0x2C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_1_scale_z_pow_3(self):
addr = 0x2D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_1_scale_z_pow_3,
@hidden_c_gyro_1_scale_z_pow_3.setter
def hidden_c_gyro_1_scale_z_pow_3(self, new_value):
addr = 0x2D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_1(self):
addr = 0x2E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_1,
@hidden_gyro_1_alignment1_1.setter
def hidden_gyro_1_alignment1_1(self, new_value):
addr = 0x2E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_2(self):
addr = 0x2F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_2,
@hidden_gyro_1_alignment1_2.setter
def hidden_gyro_1_alignment1_2(self, new_value):
addr = 0x2F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment1_3(self):
addr = 0x30
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment1_3,
@hidden_gyro_1_alignment1_3.setter
def hidden_gyro_1_alignment1_3(self, new_value):
addr = 0x30
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_1(self):
addr = 0x31
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_1,
@hidden_gyro_1_alignment2_1.setter
def hidden_gyro_1_alignment2_1(self, new_value):
addr = 0x31
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_2(self):
addr = 0x32
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_2,
@hidden_gyro_1_alignment2_2.setter
def hidden_gyro_1_alignment2_2(self, new_value):
addr = 0x32
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment2_3(self):
addr = 0x33
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment2_3,
@hidden_gyro_1_alignment2_3.setter
def hidden_gyro_1_alignment2_3(self, new_value):
addr = 0x33
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_1(self):
addr = 0x34
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_1,
@hidden_gyro_1_alignment3_1.setter
def hidden_gyro_1_alignment3_1(self, new_value):
addr = 0x34
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_2(self):
addr = 0x35
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_2,
@hidden_gyro_1_alignment3_2.setter
def hidden_gyro_1_alignment3_2(self, new_value):
addr = 0x35
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_alignment3_3(self):
addr = 0x36
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_alignment3_3,
@hidden_gyro_1_alignment3_3.setter
def hidden_gyro_1_alignment3_3(self, new_value):
addr = 0x36
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_0(self):
addr = 0x37
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_0,
@hidden_c_gyro_2_bias_x_pow_0.setter
def hidden_c_gyro_2_bias_x_pow_0(self, new_value):
addr = 0x37
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_1(self):
addr = 0x38
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_1,
@hidden_c_gyro_2_bias_x_pow_1.setter
def hidden_c_gyro_2_bias_x_pow_1(self, new_value):
addr = 0x38
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_2(self):
addr = 0x39
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_2,
@hidden_c_gyro_2_bias_x_pow_2.setter
def hidden_c_gyro_2_bias_x_pow_2(self, new_value):
addr = 0x39
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_x_pow_3(self):
addr = 0x3A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_x_pow_3,
@hidden_c_gyro_2_bias_x_pow_3.setter
def hidden_c_gyro_2_bias_x_pow_3(self, new_value):
addr = 0x3A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_0(self):
addr = 0x3B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_0,
@hidden_c_gyro_2_bias_y_pow_0.setter
def hidden_c_gyro_2_bias_y_pow_0(self, new_value):
addr = 0x3B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_1(self):
addr = 0x3C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_1,
@hidden_c_gyro_2_bias_y_pow_1.setter
def hidden_c_gyro_2_bias_y_pow_1(self, new_value):
addr = 0x3C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_2(self):
addr = 0x3D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_2,
@hidden_c_gyro_2_bias_y_pow_2.setter
def hidden_c_gyro_2_bias_y_pow_2(self, new_value):
addr = 0x3D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_y_pow_3(self):
addr = 0x3E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_y_pow_3,
@hidden_c_gyro_2_bias_y_pow_3.setter
def hidden_c_gyro_2_bias_y_pow_3(self, new_value):
addr = 0x3E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_0(self):
addr = 0x3F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_0,
@hidden_c_gyro_2_bias_z_pow_0.setter
def hidden_c_gyro_2_bias_z_pow_0(self, new_value):
addr = 0x3F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_1(self):
addr = 0x40
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_1,
@hidden_c_gyro_2_bias_z_pow_1.setter
def hidden_c_gyro_2_bias_z_pow_1(self, new_value):
addr = 0x40
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_2(self):
addr = 0x41
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_2,
@hidden_c_gyro_2_bias_z_pow_2.setter
def hidden_c_gyro_2_bias_z_pow_2(self, new_value):
addr = 0x41
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_bias_z_pow_3(self):
addr = 0x42
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_bias_z_pow_3,
@hidden_c_gyro_2_bias_z_pow_3.setter
def hidden_c_gyro_2_bias_z_pow_3(self, new_value):
addr = 0x42
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_0(self):
addr = 0x43
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_0,
@hidden_c_gyro_2_scale_x_pow_0.setter
def hidden_c_gyro_2_scale_x_pow_0(self, new_value):
addr = 0x43
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_1(self):
addr = 0x44
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_1,
@hidden_c_gyro_2_scale_x_pow_1.setter
def hidden_c_gyro_2_scale_x_pow_1(self, new_value):
addr = 0x44
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_2(self):
addr = 0x45
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_2,
@hidden_c_gyro_2_scale_x_pow_2.setter
def hidden_c_gyro_2_scale_x_pow_2(self, new_value):
addr = 0x45
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_x_pow_3(self):
addr = 0x46
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_x_pow_3,
@hidden_c_gyro_2_scale_x_pow_3.setter
def hidden_c_gyro_2_scale_x_pow_3(self, new_value):
addr = 0x46
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_0(self):
addr = 0x47
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_0,
@hidden_c_gyro_2_scale_y_pow_0.setter
def hidden_c_gyro_2_scale_y_pow_0(self, new_value):
addr = 0x47
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_1(self):
addr = 0x48
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_1,
@hidden_c_gyro_2_scale_y_pow_1.setter
def hidden_c_gyro_2_scale_y_pow_1(self, new_value):
addr = 0x48
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_2(self):
addr = 0x49
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_2,
@hidden_c_gyro_2_scale_y_pow_2.setter
def hidden_c_gyro_2_scale_y_pow_2(self, new_value):
addr = 0x49
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_y_pow_3(self):
addr = 0x4A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_y_pow_3,
@hidden_c_gyro_2_scale_y_pow_3.setter
def hidden_c_gyro_2_scale_y_pow_3(self, new_value):
addr = 0x4A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_0(self):
addr = 0x4B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_0,
@hidden_c_gyro_2_scale_z_pow_0.setter
def hidden_c_gyro_2_scale_z_pow_0(self, new_value):
addr = 0x4B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_1(self):
addr = 0x4C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_1,
@hidden_c_gyro_2_scale_z_pow_1.setter
def hidden_c_gyro_2_scale_z_pow_1(self, new_value):
addr = 0x4C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_2(self):
addr = 0x4D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_2,
@hidden_c_gyro_2_scale_z_pow_2.setter
def hidden_c_gyro_2_scale_z_pow_2(self, new_value):
addr = 0x4D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_gyro_2_scale_z_pow_3(self):
addr = 0x4E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_GYRO_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_gyro_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_gyro_2_scale_z_pow_3,
@hidden_c_gyro_2_scale_z_pow_3.setter
def hidden_c_gyro_2_scale_z_pow_3(self, new_value):
addr = 0x4E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_1(self):
addr = 0x4F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_1,
@hidden_gyro_2_alignment1_1.setter
def hidden_gyro_2_alignment1_1(self, new_value):
addr = 0x4F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_2(self):
addr = 0x50
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_2,
@hidden_gyro_2_alignment1_2.setter
def hidden_gyro_2_alignment1_2(self, new_value):
addr = 0x50
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment1_3(self):
addr = 0x51
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment1_3,
@hidden_gyro_2_alignment1_3.setter
def hidden_gyro_2_alignment1_3(self, new_value):
addr = 0x51
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_1(self):
addr = 0x52
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_1,
@hidden_gyro_2_alignment2_1.setter
def hidden_gyro_2_alignment2_1(self, new_value):
addr = 0x52
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_2(self):
addr = 0x53
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_2,
@hidden_gyro_2_alignment2_2.setter
def hidden_gyro_2_alignment2_2(self, new_value):
addr = 0x53
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment2_3(self):
addr = 0x54
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment2_3,
@hidden_gyro_2_alignment2_3.setter
def hidden_gyro_2_alignment2_3(self, new_value):
addr = 0x54
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_1(self):
addr = 0x55
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_1,
@hidden_gyro_2_alignment3_1.setter
def hidden_gyro_2_alignment3_1(self, new_value):
addr = 0x55
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_2(self):
addr = 0x56
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_2,
@hidden_gyro_2_alignment3_2.setter
def hidden_gyro_2_alignment3_2(self, new_value):
addr = 0x56
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_2_alignment3_3(self):
addr = 0x57
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_alignment3_3,
@hidden_gyro_2_alignment3_3.setter
def hidden_gyro_2_alignment3_3(self, new_value):
addr = 0x57
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_0(self):
addr = 0x58
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_0,
@hidden_c_accel_1_bias_x_pow_0.setter
def hidden_c_accel_1_bias_x_pow_0(self, new_value):
addr = 0x58
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_1(self):
addr = 0x59
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_1,
@hidden_c_accel_1_bias_x_pow_1.setter
def hidden_c_accel_1_bias_x_pow_1(self, new_value):
addr = 0x59
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_2(self):
addr = 0x5A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_2,
@hidden_c_accel_1_bias_x_pow_2.setter
def hidden_c_accel_1_bias_x_pow_2(self, new_value):
addr = 0x5A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_x_pow_3(self):
addr = 0x5B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_x_pow_3,
@hidden_c_accel_1_bias_x_pow_3.setter
def hidden_c_accel_1_bias_x_pow_3(self, new_value):
addr = 0x5B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_0(self):
addr = 0x5C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_0,
@hidden_c_accel_1_bias_y_pow_0.setter
def hidden_c_accel_1_bias_y_pow_0(self, new_value):
addr = 0x5C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_1(self):
addr = 0x5D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_1,
@hidden_c_accel_1_bias_y_pow_1.setter
def hidden_c_accel_1_bias_y_pow_1(self, new_value):
addr = 0x5D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_2(self):
addr = 0x5E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_2,
@hidden_c_accel_1_bias_y_pow_2.setter
def hidden_c_accel_1_bias_y_pow_2(self, new_value):
addr = 0x5E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_y_pow_3(self):
addr = 0x5F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_y_pow_3,
@hidden_c_accel_1_bias_y_pow_3.setter
def hidden_c_accel_1_bias_y_pow_3(self, new_value):
addr = 0x5F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_0(self):
addr = 0x60
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_0,
@hidden_c_accel_1_bias_z_pow_0.setter
def hidden_c_accel_1_bias_z_pow_0(self, new_value):
addr = 0x60
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_1(self):
addr = 0x61
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_1,
@hidden_c_accel_1_bias_z_pow_1.setter
def hidden_c_accel_1_bias_z_pow_1(self, new_value):
addr = 0x61
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_2(self):
addr = 0x62
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_2,
@hidden_c_accel_1_bias_z_pow_2.setter
def hidden_c_accel_1_bias_z_pow_2(self, new_value):
addr = 0x62
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_bias_z_pow_3(self):
addr = 0x63
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_bias_z_pow_3,
@hidden_c_accel_1_bias_z_pow_3.setter
def hidden_c_accel_1_bias_z_pow_3(self, new_value):
addr = 0x63
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_0(self):
addr = 0x64
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_0,
@hidden_c_accel_1_scale_x_pow_0.setter
def hidden_c_accel_1_scale_x_pow_0(self, new_value):
addr = 0x64
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_1(self):
addr = 0x65
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_1,
@hidden_c_accel_1_scale_x_pow_1.setter
def hidden_c_accel_1_scale_x_pow_1(self, new_value):
addr = 0x65
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_2(self):
addr = 0x66
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_2,
@hidden_c_accel_1_scale_x_pow_2.setter
def hidden_c_accel_1_scale_x_pow_2(self, new_value):
addr = 0x66
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_x_pow_3(self):
addr = 0x67
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_x_pow_3,
@hidden_c_accel_1_scale_x_pow_3.setter
def hidden_c_accel_1_scale_x_pow_3(self, new_value):
addr = 0x67
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_0(self):
addr = 0x68
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_0,
@hidden_c_accel_1_scale_y_pow_0.setter
def hidden_c_accel_1_scale_y_pow_0(self, new_value):
addr = 0x68
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_1(self):
addr = 0x69
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_1,
@hidden_c_accel_1_scale_y_pow_1.setter
def hidden_c_accel_1_scale_y_pow_1(self, new_value):
addr = 0x69
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_2(self):
addr = 0x6A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_2,
@hidden_c_accel_1_scale_y_pow_2.setter
def hidden_c_accel_1_scale_y_pow_2(self, new_value):
addr = 0x6A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_y_pow_3(self):
addr = 0x6B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_y_pow_3,
@hidden_c_accel_1_scale_y_pow_3.setter
def hidden_c_accel_1_scale_y_pow_3(self, new_value):
addr = 0x6B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_0(self):
addr = 0x6C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_0,
@hidden_c_accel_1_scale_z_pow_0.setter
def hidden_c_accel_1_scale_z_pow_0(self, new_value):
addr = 0x6C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_1(self):
addr = 0x6D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_1,
@hidden_c_accel_1_scale_z_pow_1.setter
def hidden_c_accel_1_scale_z_pow_1(self, new_value):
addr = 0x6D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_2(self):
addr = 0x6E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_2,
@hidden_c_accel_1_scale_z_pow_2.setter
def hidden_c_accel_1_scale_z_pow_2(self, new_value):
addr = 0x6E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_accel_1_scale_z_pow_3(self):
addr = 0x6F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_ACCEL_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_accel_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_accel_1_scale_z_pow_3,
@hidden_c_accel_1_scale_z_pow_3.setter
def hidden_c_accel_1_scale_z_pow_3(self, new_value):
addr = 0x6F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_1(self):
addr = 0x70
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_1,
@hidden_accel_1_alignment1_1.setter
def hidden_accel_1_alignment1_1(self, new_value):
addr = 0x70
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_2(self):
addr = 0x71
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_2,
@hidden_accel_1_alignment1_2.setter
def hidden_accel_1_alignment1_2(self, new_value):
addr = 0x71
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment1_3(self):
addr = 0x72
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment1_3,
@hidden_accel_1_alignment1_3.setter
def hidden_accel_1_alignment1_3(self, new_value):
addr = 0x72
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_1(self):
addr = 0x73
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_1,
@hidden_accel_1_alignment2_1.setter
def hidden_accel_1_alignment2_1(self, new_value):
addr = 0x73
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_2(self):
addr = 0x74
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_2,
@hidden_accel_1_alignment2_2.setter
def hidden_accel_1_alignment2_2(self, new_value):
addr = 0x74
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment2_3(self):
addr = 0x75
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment2_3,
@hidden_accel_1_alignment2_3.setter
def hidden_accel_1_alignment2_3(self, new_value):
addr = 0x75
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_1(self):
addr = 0x76
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_1,
@hidden_accel_1_alignment3_1.setter
def hidden_accel_1_alignment3_1(self, new_value):
addr = 0x76
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_2(self):
addr = 0x77
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_2,
@hidden_accel_1_alignment3_2.setter
def hidden_accel_1_alignment3_2(self, new_value):
addr = 0x77
self.write_register(addr, new_value, hidden=True)
@property
def hidden_accel_1_alignment3_3(self):
addr = 0x78
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_alignment3_3,
@hidden_accel_1_alignment3_3.setter
def hidden_accel_1_alignment3_3(self, new_value):
addr = 0x78
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_0(self):
addr = 0x79
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_0,
@hidden_c_mag_1_bias_x_pow_0.setter
def hidden_c_mag_1_bias_x_pow_0(self, new_value):
addr = 0x79
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_1(self):
addr = 0x7A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_1,
@hidden_c_mag_1_bias_x_pow_1.setter
def hidden_c_mag_1_bias_x_pow_1(self, new_value):
addr = 0x7A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_2(self):
addr = 0x7B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_2,
@hidden_c_mag_1_bias_x_pow_2.setter
def hidden_c_mag_1_bias_x_pow_2(self, new_value):
addr = 0x7B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_x_pow_3(self):
addr = 0x7C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_x_pow_3,
@hidden_c_mag_1_bias_x_pow_3.setter
def hidden_c_mag_1_bias_x_pow_3(self, new_value):
addr = 0x7C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_0(self):
addr = 0x7D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_0,
@hidden_c_mag_1_bias_y_pow_0.setter
def hidden_c_mag_1_bias_y_pow_0(self, new_value):
addr = 0x7D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_1(self):
addr = 0x7E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_1,
@hidden_c_mag_1_bias_y_pow_1.setter
def hidden_c_mag_1_bias_y_pow_1(self, new_value):
addr = 0x7E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_2(self):
addr = 0x7F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_2,
@hidden_c_mag_1_bias_y_pow_2.setter
def hidden_c_mag_1_bias_y_pow_2(self, new_value):
addr = 0x7F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_y_pow_3(self):
addr = 0x80
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_y_pow_3,
@hidden_c_mag_1_bias_y_pow_3.setter
def hidden_c_mag_1_bias_y_pow_3(self, new_value):
addr = 0x80
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_0(self):
addr = 0x81
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_0,
@hidden_c_mag_1_bias_z_pow_0.setter
def hidden_c_mag_1_bias_z_pow_0(self, new_value):
addr = 0x81
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_1(self):
addr = 0x82
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_1,
@hidden_c_mag_1_bias_z_pow_1.setter
def hidden_c_mag_1_bias_z_pow_1(self, new_value):
addr = 0x82
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_2(self):
addr = 0x83
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_2,
@hidden_c_mag_1_bias_z_pow_2.setter
def hidden_c_mag_1_bias_z_pow_2(self, new_value):
addr = 0x83
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_bias_z_pow_3(self):
addr = 0x84
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_bias_z_pow_3,
@hidden_c_mag_1_bias_z_pow_3.setter
def hidden_c_mag_1_bias_z_pow_3(self, new_value):
addr = 0x84
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_0(self):
addr = 0x85
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_0,
@hidden_c_mag_1_scale_x_pow_0.setter
def hidden_c_mag_1_scale_x_pow_0(self, new_value):
addr = 0x85
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_1(self):
addr = 0x86
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_1,
@hidden_c_mag_1_scale_x_pow_1.setter
def hidden_c_mag_1_scale_x_pow_1(self, new_value):
addr = 0x86
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_2(self):
addr = 0x87
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_2,
@hidden_c_mag_1_scale_x_pow_2.setter
def hidden_c_mag_1_scale_x_pow_2(self, new_value):
addr = 0x87
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_x_pow_3(self):
addr = 0x88
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_x_pow_3,
@hidden_c_mag_1_scale_x_pow_3.setter
def hidden_c_mag_1_scale_x_pow_3(self, new_value):
addr = 0x88
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_0(self):
addr = 0x89
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_0,
@hidden_c_mag_1_scale_y_pow_0.setter
def hidden_c_mag_1_scale_y_pow_0(self, new_value):
addr = 0x89
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_1(self):
addr = 0x8A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_1,
@hidden_c_mag_1_scale_y_pow_1.setter
def hidden_c_mag_1_scale_y_pow_1(self, new_value):
addr = 0x8A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_2(self):
addr = 0x8B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_2,
@hidden_c_mag_1_scale_y_pow_2.setter
def hidden_c_mag_1_scale_y_pow_2(self, new_value):
addr = 0x8B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_y_pow_3(self):
addr = 0x8C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_y_pow_3,
@hidden_c_mag_1_scale_y_pow_3.setter
def hidden_c_mag_1_scale_y_pow_3(self, new_value):
addr = 0x8C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_0(self):
addr = 0x8D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_0,
@hidden_c_mag_1_scale_z_pow_0.setter
def hidden_c_mag_1_scale_z_pow_0(self, new_value):
addr = 0x8D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_1(self):
addr = 0x8E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_1,
@hidden_c_mag_1_scale_z_pow_1.setter
def hidden_c_mag_1_scale_z_pow_1(self, new_value):
addr = 0x8E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_2(self):
addr = 0x8F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_2,
@hidden_c_mag_1_scale_z_pow_2.setter
def hidden_c_mag_1_scale_z_pow_2(self, new_value):
addr = 0x8F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_1_scale_z_pow_3(self):
addr = 0x90
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_1_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_1_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_1_scale_z_pow_3,
@hidden_c_mag_1_scale_z_pow_3.setter
def hidden_c_mag_1_scale_z_pow_3(self, new_value):
addr = 0x90
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_1(self):
addr = 0x91
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_1,
@hidden_mag_1_alignment1_1.setter
def hidden_mag_1_alignment1_1(self, new_value):
addr = 0x91
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_2(self):
addr = 0x92
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_2,
@hidden_mag_1_alignment1_2.setter
def hidden_mag_1_alignment1_2(self, new_value):
addr = 0x92
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment1_3(self):
addr = 0x93
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment1_3,
@hidden_mag_1_alignment1_3.setter
def hidden_mag_1_alignment1_3(self, new_value):
addr = 0x93
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_1(self):
addr = 0x94
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_1,
@hidden_mag_1_alignment2_1.setter
def hidden_mag_1_alignment2_1(self, new_value):
addr = 0x94
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_2(self):
addr = 0x95
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_2,
@hidden_mag_1_alignment2_2.setter
def hidden_mag_1_alignment2_2(self, new_value):
addr = 0x95
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment2_3(self):
addr = 0x96
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment2_3,
@hidden_mag_1_alignment2_3.setter
def hidden_mag_1_alignment2_3(self, new_value):
addr = 0x96
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_1(self):
addr = 0x97
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_1,
@hidden_mag_1_alignment3_1.setter
def hidden_mag_1_alignment3_1(self, new_value):
addr = 0x97
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_2(self):
addr = 0x98
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_2,
@hidden_mag_1_alignment3_2.setter
def hidden_mag_1_alignment3_2(self, new_value):
addr = 0x98
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_alignment3_3(self):
addr = 0x99
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_alignment3_3,
@hidden_mag_1_alignment3_3.setter
def hidden_mag_1_alignment3_3(self, new_value):
addr = 0x99
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_x(self):
addr = 0x9A
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_x,
@hidden_mag_1_reference_x.setter
def hidden_mag_1_reference_x(self, new_value):
addr = 0x9A
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_y(self):
addr = 0x9B
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_y,
@hidden_mag_1_reference_y.setter
def hidden_mag_1_reference_y(self, new_value):
addr = 0x9B
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_1_reference_z(self):
addr = 0x9C
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_reference_z,
@hidden_mag_1_reference_z.setter
def hidden_mag_1_reference_z(self, new_value):
addr = 0x9C
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_0(self):
addr = 0x9D
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_0,
@hidden_c_mag_2_bias_x_pow_0.setter
def hidden_c_mag_2_bias_x_pow_0(self, new_value):
addr = 0x9D
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_1(self):
addr = 0x9E
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_1,
@hidden_c_mag_2_bias_x_pow_1.setter
def hidden_c_mag_2_bias_x_pow_1(self, new_value):
addr = 0x9E
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_2(self):
addr = 0x9F
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_2,
@hidden_c_mag_2_bias_x_pow_2.setter
def hidden_c_mag_2_bias_x_pow_2(self, new_value):
addr = 0x9F
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_x_pow_3(self):
addr = 0xA0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_x_pow_3,
@hidden_c_mag_2_bias_x_pow_3.setter
def hidden_c_mag_2_bias_x_pow_3(self, new_value):
addr = 0xA0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_0(self):
addr = 0xA1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_0,
@hidden_c_mag_2_bias_y_pow_0.setter
def hidden_c_mag_2_bias_y_pow_0(self, new_value):
addr = 0xA1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_1(self):
addr = 0xA2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_1,
@hidden_c_mag_2_bias_y_pow_1.setter
def hidden_c_mag_2_bias_y_pow_1(self, new_value):
addr = 0xA2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_2(self):
addr = 0xA3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_2,
@hidden_c_mag_2_bias_y_pow_2.setter
def hidden_c_mag_2_bias_y_pow_2(self, new_value):
addr = 0xA3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_y_pow_3(self):
addr = 0xA4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_y_pow_3,
@hidden_c_mag_2_bias_y_pow_3.setter
def hidden_c_mag_2_bias_y_pow_3(self, new_value):
addr = 0xA4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_0(self):
addr = 0xA5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_0,
@hidden_c_mag_2_bias_z_pow_0.setter
def hidden_c_mag_2_bias_z_pow_0(self, new_value):
addr = 0xA5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_1(self):
addr = 0xA6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_1,
@hidden_c_mag_2_bias_z_pow_1.setter
def hidden_c_mag_2_bias_z_pow_1(self, new_value):
addr = 0xA6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_2(self):
addr = 0xA7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_2,
@hidden_c_mag_2_bias_z_pow_2.setter
def hidden_c_mag_2_bias_z_pow_2(self, new_value):
addr = 0xA7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_bias_z_pow_3(self):
addr = 0xA8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_BIAS_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_bias_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_bias_z_pow_3,
@hidden_c_mag_2_bias_z_pow_3.setter
def hidden_c_mag_2_bias_z_pow_3(self, new_value):
addr = 0xA8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_0(self):
addr = 0xA9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_0,
@hidden_c_mag_2_scale_x_pow_0.setter
def hidden_c_mag_2_scale_x_pow_0(self, new_value):
addr = 0xA9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_1(self):
addr = 0xAA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_1,
@hidden_c_mag_2_scale_x_pow_1.setter
def hidden_c_mag_2_scale_x_pow_1(self, new_value):
addr = 0xAA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_2(self):
addr = 0xAB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_2,
@hidden_c_mag_2_scale_x_pow_2.setter
def hidden_c_mag_2_scale_x_pow_2(self, new_value):
addr = 0xAB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_x_pow_3(self):
addr = 0xAC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_X_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_x_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_x_pow_3,
@hidden_c_mag_2_scale_x_pow_3.setter
def hidden_c_mag_2_scale_x_pow_3(self, new_value):
addr = 0xAC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_0(self):
addr = 0xAD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_0,
@hidden_c_mag_2_scale_y_pow_0.setter
def hidden_c_mag_2_scale_y_pow_0(self, new_value):
addr = 0xAD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_1(self):
addr = 0xAE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_1,
@hidden_c_mag_2_scale_y_pow_1.setter
def hidden_c_mag_2_scale_y_pow_1(self, new_value):
addr = 0xAE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_2(self):
addr = 0xAF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_2,
@hidden_c_mag_2_scale_y_pow_2.setter
def hidden_c_mag_2_scale_y_pow_2(self, new_value):
addr = 0xAF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_y_pow_3(self):
addr = 0xB0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Y_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_y_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_y_pow_3,
@hidden_c_mag_2_scale_y_pow_3.setter
def hidden_c_mag_2_scale_y_pow_3(self, new_value):
addr = 0xB0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_0(self):
addr = 0xB1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_0')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_0, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_0,
@hidden_c_mag_2_scale_z_pow_0.setter
def hidden_c_mag_2_scale_z_pow_0(self, new_value):
addr = 0xB1
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_1(self):
addr = 0xB2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_1,
@hidden_c_mag_2_scale_z_pow_1.setter
def hidden_c_mag_2_scale_z_pow_1(self, new_value):
addr = 0xB2
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_2(self):
addr = 0xB3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_2,
@hidden_c_mag_2_scale_z_pow_2.setter
def hidden_c_mag_2_scale_z_pow_2(self, new_value):
addr = 0xB3
self.write_register(addr, new_value, hidden=True)
@property
def hidden_c_mag_2_scale_z_pow_3(self):
addr = 0xB4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_C_MAG_2_SCALE_Z_POW_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_c_mag_2_scale_z_pow_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_c_mag_2_scale_z_pow_3,
@hidden_c_mag_2_scale_z_pow_3.setter
def hidden_c_mag_2_scale_z_pow_3(self, new_value):
addr = 0xB4
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_1(self):
addr = 0xB5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_1,
@hidden_mag_2_alignment1_1.setter
def hidden_mag_2_alignment1_1(self, new_value):
addr = 0xB5
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_2(self):
addr = 0xB6
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_2,
@hidden_mag_2_alignment1_2.setter
def hidden_mag_2_alignment1_2(self, new_value):
addr = 0xB6
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment1_3(self):
addr = 0xB7
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT1_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment1_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment1_3,
@hidden_mag_2_alignment1_3.setter
def hidden_mag_2_alignment1_3(self, new_value):
addr = 0xB7
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_1(self):
addr = 0xB8
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_1,
@hidden_mag_2_alignment2_1.setter
def hidden_mag_2_alignment2_1(self, new_value):
addr = 0xB8
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_2(self):
addr = 0xB9
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_2,
@hidden_mag_2_alignment2_2.setter
def hidden_mag_2_alignment2_2(self, new_value):
addr = 0xB9
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment2_3(self):
addr = 0xBA
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT2_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment2_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment2_3,
@hidden_mag_2_alignment2_3.setter
def hidden_mag_2_alignment2_3(self, new_value):
addr = 0xBA
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_1(self):
addr = 0xBB
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_1')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_1, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_1,
@hidden_mag_2_alignment3_1.setter
def hidden_mag_2_alignment3_1(self, new_value):
addr = 0xBB
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_2(self):
addr = 0xBC
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_2')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_2, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_2,
@hidden_mag_2_alignment3_2.setter
def hidden_mag_2_alignment3_2(self, new_value):
addr = 0xBC
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_alignment3_3(self):
addr = 0xBD
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_ALIGNMENT3_3')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_alignment3_3, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_alignment3_3,
@hidden_mag_2_alignment3_3.setter
def hidden_mag_2_alignment3_3(self, new_value):
addr = 0xBD
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_x(self):
addr = 0xBE
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_X')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_x, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_x,
@hidden_mag_2_reference_x.setter
def hidden_mag_2_reference_x(self, new_value):
addr = 0xBE
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_y(self):
addr = 0xBF
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Y')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_y, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_y,
@hidden_mag_2_reference_y.setter
def hidden_mag_2_reference_y(self, new_value):
addr = 0xBF
self.write_register(addr, new_value, hidden=True)
@property
def hidden_mag_2_reference_z(self):
addr = 0xC0
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_REFERENCE_Z')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_reference_z, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_reference_z,
@hidden_mag_2_reference_z.setter
def hidden_mag_2_reference_z(self, new_value):
addr = 0xC0
self.write_register(addr, new_value, hidden=True)
@property
def hidden_gyro_1_conversion(self):
addr = 0xC1
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_1_conversion,
@property
def hidden_gyro_2_conversion(self):
addr = 0xC2
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_GYRO_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_gyro_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_gyro_2_conversion,
@property
def hidden_accel_1_conversion(self):
addr = 0xC3
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_ACCEL_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_accel_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_accel_1_conversion,
@property
def hidden_mag_1_conversion(self):
addr = 0xC4
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_1_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_1_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_1_conversion,
@property
def hidden_mag_2_conversion(self):
addr = 0xC5
ok, payload = self.read_register(addr, hidden=True)
if ok:
reg = self.svd_parser.find_hidden_register_by(name='HIDDEN_MAG_2_CONVERSION')
reg.raw_value, = struct.unpack('>f', payload[0:4])
hidden_mag_2_conversion, = struct.unpack('>f', payload[0:4])
return reg, hidden_mag_2_conversion,
if __name__ == '__main__':
pass
| true
| true
|
f7093fe62cd2fc34b1c1b4274b909f0daecbb510
| 418
|
py
|
Python
|
day-3/binary_converter.py
|
MarkGhebrial/advent-of-code-2021
|
eb38b152f41a61e4d92578fe1187636f22e82d6c
|
[
"MIT"
] | null | null | null |
day-3/binary_converter.py
|
MarkGhebrial/advent-of-code-2021
|
eb38b152f41a61e4d92578fe1187636f22e82d6c
|
[
"MIT"
] | null | null | null |
day-3/binary_converter.py
|
MarkGhebrial/advent-of-code-2021
|
eb38b152f41a61e4d92578fe1187636f22e82d6c
|
[
"MIT"
] | null | null | null |
def binaryToInt (string: str, oneChar = "1", zeroChar = "0"):
out = 0
for i in range(len(string)):
currentDigit = None
if string[len(string) - 1 - i] == oneChar:
currentDigit = 1
elif string[len(string) - 1 - i] == zeroChar:
currentDigit = 0
out += (2**i) * currentDigit
return(out)
if __name__ == "__main__":
print(binaryToInt("1011"))
| 29.857143
| 61
| 0.538278
|
def binaryToInt (string: str, oneChar = "1", zeroChar = "0"):
out = 0
for i in range(len(string)):
currentDigit = None
if string[len(string) - 1 - i] == oneChar:
currentDigit = 1
elif string[len(string) - 1 - i] == zeroChar:
currentDigit = 0
out += (2**i) * currentDigit
return(out)
if __name__ == "__main__":
print(binaryToInt("1011"))
| true
| true
|
f70940369734952cf51e11dce622dd3ae73eaef4
| 4,192
|
py
|
Python
|
tests/test_poetry.py
|
radek-sprta/poetry
|
c57e1ddfd50da4e4ec60c3c27152811e07bdba2a
|
[
"MIT"
] | null | null | null |
tests/test_poetry.py
|
radek-sprta/poetry
|
c57e1ddfd50da4e4ec60c3c27152811e07bdba2a
|
[
"MIT"
] | null | null | null |
tests/test_poetry.py
|
radek-sprta/poetry
|
c57e1ddfd50da4e4ec60c3c27152811e07bdba2a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from poetry.poetry import Poetry
from poetry.utils._compat import Path
from poetry.utils.toml_file import TomlFile
fixtures_dir = Path(__file__).parent / "fixtures"
def test_poetry():
poetry = Poetry.create(str(fixtures_dir / "sample_project"))
package = poetry.package
assert package.name == "my-package"
assert package.version.text == "1.2.3"
assert package.description == "Some description."
assert package.authors == ["Sébastien Eustace <sebastien@eustace.io>"]
assert package.license.id == "MIT"
assert (
package.readme.relative_to(fixtures_dir).as_posix()
== "sample_project/README.rst"
)
assert package.homepage == "https://poetry.eustace.io"
assert package.repository_url == "https://github.com/sdispater/poetry"
assert package.keywords == ["packaging", "dependency", "poetry"]
assert package.python_versions == "~2.7 || ^3.6"
assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0"
dependencies = {}
for dep in package.requires:
dependencies[dep.name] = dep
cleo = dependencies["cleo"]
assert cleo.pretty_constraint == "^0.6"
assert not cleo.is_optional()
pendulum = dependencies["pendulum"]
assert pendulum.pretty_constraint == "branch 2.0"
assert pendulum.is_vcs()
assert pendulum.vcs == "git"
assert pendulum.branch == "2.0"
assert pendulum.source == "https://github.com/sdispater/pendulum.git"
assert pendulum.allows_prereleases()
requests = dependencies["requests"]
assert requests.pretty_constraint == "^2.18"
assert not requests.is_vcs()
assert not requests.allows_prereleases()
assert requests.is_optional()
assert requests.extras == ["security"]
pathlib2 = dependencies["pathlib2"]
assert pathlib2.pretty_constraint == "^2.2"
assert pathlib2.python_versions == "~2.7"
assert not pathlib2.is_optional()
demo = dependencies["demo"]
assert demo.is_file()
assert not demo.is_vcs()
assert demo.name == "demo"
assert demo.pretty_constraint == "0.1.0"
demo = dependencies["my-package"]
assert not demo.is_file()
assert demo.is_directory()
assert not demo.is_vcs()
assert demo.name == "my-package"
assert demo.pretty_constraint == "0.1.2"
assert demo.package.requires[0].name == "pendulum"
assert demo.package.requires[1].name == "cachy"
assert demo.package.requires[1].extras == ["msgpack"]
simple_project = dependencies["simple-project"]
assert not simple_project.is_file()
assert simple_project.is_directory()
assert not simple_project.is_vcs()
assert simple_project.name == "simple-project"
assert simple_project.pretty_constraint == "1.2.3"
assert simple_project.package.requires == []
assert "db" in package.extras
classifiers = package.classifiers
assert classifiers == [
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
assert package.all_classifiers == [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def test_poetry_with_packages_and_includes():
poetry = Poetry.create(
str(fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include")
)
package = poetry.package
assert package.packages == [
{"include": "extra_dir/**/*.py"},
{"include": "my_module.py"},
{"include": "package_with_include"},
]
assert package.include == ["extra_dir/vcs_excluded.txt", "notes.txt"]
def test_check():
complete = TomlFile(fixtures_dir / "complete.toml")
content = complete.read(raw=True)["tool"]["poetry"]
assert Poetry.check(content)
| 32.75
| 87
| 0.666508
|
from __future__ import absolute_import
from __future__ import unicode_literals
from poetry.poetry import Poetry
from poetry.utils._compat import Path
from poetry.utils.toml_file import TomlFile
fixtures_dir = Path(__file__).parent / "fixtures"
def test_poetry():
poetry = Poetry.create(str(fixtures_dir / "sample_project"))
package = poetry.package
assert package.name == "my-package"
assert package.version.text == "1.2.3"
assert package.description == "Some description."
assert package.authors == ["Sébastien Eustace <sebastien@eustace.io>"]
assert package.license.id == "MIT"
assert (
package.readme.relative_to(fixtures_dir).as_posix()
== "sample_project/README.rst"
)
assert package.homepage == "https://poetry.eustace.io"
assert package.repository_url == "https://github.com/sdispater/poetry"
assert package.keywords == ["packaging", "dependency", "poetry"]
assert package.python_versions == "~2.7 || ^3.6"
assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0"
dependencies = {}
for dep in package.requires:
dependencies[dep.name] = dep
cleo = dependencies["cleo"]
assert cleo.pretty_constraint == "^0.6"
assert not cleo.is_optional()
pendulum = dependencies["pendulum"]
assert pendulum.pretty_constraint == "branch 2.0"
assert pendulum.is_vcs()
assert pendulum.vcs == "git"
assert pendulum.branch == "2.0"
assert pendulum.source == "https://github.com/sdispater/pendulum.git"
assert pendulum.allows_prereleases()
requests = dependencies["requests"]
assert requests.pretty_constraint == "^2.18"
assert not requests.is_vcs()
assert not requests.allows_prereleases()
assert requests.is_optional()
assert requests.extras == ["security"]
pathlib2 = dependencies["pathlib2"]
assert pathlib2.pretty_constraint == "^2.2"
assert pathlib2.python_versions == "~2.7"
assert not pathlib2.is_optional()
demo = dependencies["demo"]
assert demo.is_file()
assert not demo.is_vcs()
assert demo.name == "demo"
assert demo.pretty_constraint == "0.1.0"
demo = dependencies["my-package"]
assert not demo.is_file()
assert demo.is_directory()
assert not demo.is_vcs()
assert demo.name == "my-package"
assert demo.pretty_constraint == "0.1.2"
assert demo.package.requires[0].name == "pendulum"
assert demo.package.requires[1].name == "cachy"
assert demo.package.requires[1].extras == ["msgpack"]
simple_project = dependencies["simple-project"]
assert not simple_project.is_file()
assert simple_project.is_directory()
assert not simple_project.is_vcs()
assert simple_project.name == "simple-project"
assert simple_project.pretty_constraint == "1.2.3"
assert simple_project.package.requires == []
assert "db" in package.extras
classifiers = package.classifiers
assert classifiers == [
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
assert package.all_classifiers == [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def test_poetry_with_packages_and_includes():
poetry = Poetry.create(
str(fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include")
)
package = poetry.package
assert package.packages == [
{"include": "extra_dir/**/*.py"},
{"include": "my_module.py"},
{"include": "package_with_include"},
]
assert package.include == ["extra_dir/vcs_excluded.txt", "notes.txt"]
def test_check():
complete = TomlFile(fixtures_dir / "complete.toml")
content = complete.read(raw=True)["tool"]["poetry"]
assert Poetry.check(content)
| true
| true
|
f7094221dcff97c2f472e146693d17b871a4455a
| 5,032
|
py
|
Python
|
SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/barcode_ccs.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 5
|
2022-02-20T07:10:02.000Z
|
2022-03-18T17:47:53.000Z
|
SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/barcode_ccs.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/barcode_ccs.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | null | null | null |
#! python
"""
Modified version of barcode report for use on CCS inputs
"""
from pprint import pformat
import functools
import logging
import json
import os.path as op
import sys
from pbcommand.models import DataStore, FileTypes
from pbcommand.models.report import PlotGroup
from pbcommand.cli import pbparser_runner
from pbcommand.utils import setup_log
from pbcore.io import ConsensusReadSet
from pbreports.report import barcode as barcode_report
from pbreports.report.barcode import (read_inputs, get_barcode_info_parallel,
save_demuxed_dataset_reports)
from pbreports.io.barcode import get_unbarcoded_reads_info
from pbreports.io.specs import load_spec
from pbreports.plot.helper import to_plotgroup
from pbreports.plot.tools import plot_read_lengths_with_cdf
log = logging.getLogger(__name__)
__version__ = "0.2.1"
class Constants(barcode_report.Constants):
TOOL_ID = "pbreports.tasks.barcode_ccs"
TOOL_NAME = "barcode_ccs"
DRIVER_EXE = "python -m pbreports.report.barcode_ccs --resolved-tool-contract"
VERSION = __version__
DOC = __doc__
FILE_TYPE_READS_IN = FileTypes.DS_CCS
SHOW_COLUMNS = [
barcode_report.Constants.C_BIOSAMPLE,
barcode_report.Constants.C_IDX,
barcode_report.Constants.C_BARCODE,
barcode_report.Constants.C_NREADS,
barcode_report.Constants.C_NBASES,
barcode_report.Constants.C_BCQUAL,
barcode_report.Constants.C_RANK
]
SHOW_ATTRIBUTES = [
barcode_report.Constants.A_NBARCODES,
barcode_report.Constants.A_NREADS_BARCODED,
barcode_report.Constants.A_NREADS_UNBARCODED,
barcode_report.Constants.A_MEAN_READS,
barcode_report.Constants.A_MAX_READS,
barcode_report.Constants.A_MIN_READS,
barcode_report.Constants.A_MEAN_RL
]
make_report = functools.partial(barcode_report._make_report_impl,
Constants.SHOW_ATTRIBUTES,
Constants.SHOW_COLUMNS,
Constants.LABEL_NONE)
def run_to_report(ds_bc_file,
barcodes_file,
reads_in_file,
base_dir=None,
datastore_json=None,
nproc=1,
test_mode=False,
min_bq_filter=Constants.MIN_BQ_FILTER):
spec = load_spec("barcode")
inputs = read_inputs(ds_bc_file, barcodes_file, reads_in_file)
read_info, barcoded_zmws, biosamples, subrpt_files, bc_dataset_uuids = get_barcode_info_parallel(
inputs.ds_files,
barcodes_file,
nproc=nproc,
subrpt_output_dir=op.join(base_dir, "sub_reports"),
isoseq_mode=False)
read_info.extend(list(get_unbarcoded_reads_info(
inputs.reads_in, barcoded_zmws)))
if datastore_json is not None:
save_demuxed_dataset_reports(
subrpt_files, base_dir, datastore_json)
else:
barcode_report.write_empty_datastore(datastore_json)
rpt = make_report(biosamples=biosamples,
read_info=read_info,
bc_dataset_uuids=bc_dataset_uuids,
dataset_uuids=inputs.dataset_uuids,
base_dir=base_dir,
use_spec=spec,
test_mode=test_mode,
min_bq_filter=min_bq_filter)
return spec.apply_view(rpt)
def args_runner(args):
log.info("Starting {f} version {v} report generation".format(
f=__file__, v=__version__))
report = run_to_report(args.ds_bc, args.barcodes, args.reads_in,
base_dir=op.dirname(args.report_json),
datastore_json=args.dataset_reports,
nproc=args.nproc,
test_mode=args.test_mode,
min_bq_filter=args.min_bq_filter)
log.info(pformat(report.to_dict()))
report.write_json(args.report_json)
report.tables[0].to_csv(args.report_csv)
return 0
def resolved_tool_contract_runner(rtc):
log.info("Starting {f} version {v} report generation".format(
f=__file__, v=__version__))
report = run_to_report(
ds_bc_file=rtc.task.input_files[0],
barcodes_file=rtc.task.input_files[2],
reads_in_file=rtc.task.input_files[1],
base_dir=op.dirname(rtc.task.output_files[0]),
datastore_json=rtc.task.output_files[2],
nproc=rtc.task.nproc)
log.debug(pformat(report.to_dict()))
report.write_json(rtc.task.output_files[0])
report.tables[0].to_csv(rtc.task.output_files[1])
return 0
def _get_parser():
return barcode_report.get_parser(Constants)
def main(argv=sys.argv):
return pbparser_runner(
argv=argv[1:],
parser=_get_parser(),
args_runner_func=args_runner,
contract_runner_func=resolved_tool_contract_runner,
alog=log,
setup_log_func=setup_log)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 34
| 101
| 0.67031
|
from pprint import pformat
import functools
import logging
import json
import os.path as op
import sys
from pbcommand.models import DataStore, FileTypes
from pbcommand.models.report import PlotGroup
from pbcommand.cli import pbparser_runner
from pbcommand.utils import setup_log
from pbcore.io import ConsensusReadSet
from pbreports.report import barcode as barcode_report
from pbreports.report.barcode import (read_inputs, get_barcode_info_parallel,
save_demuxed_dataset_reports)
from pbreports.io.barcode import get_unbarcoded_reads_info
from pbreports.io.specs import load_spec
from pbreports.plot.helper import to_plotgroup
from pbreports.plot.tools import plot_read_lengths_with_cdf
log = logging.getLogger(__name__)
__version__ = "0.2.1"
class Constants(barcode_report.Constants):
TOOL_ID = "pbreports.tasks.barcode_ccs"
TOOL_NAME = "barcode_ccs"
DRIVER_EXE = "python -m pbreports.report.barcode_ccs --resolved-tool-contract"
VERSION = __version__
DOC = __doc__
FILE_TYPE_READS_IN = FileTypes.DS_CCS
SHOW_COLUMNS = [
barcode_report.Constants.C_BIOSAMPLE,
barcode_report.Constants.C_IDX,
barcode_report.Constants.C_BARCODE,
barcode_report.Constants.C_NREADS,
barcode_report.Constants.C_NBASES,
barcode_report.Constants.C_BCQUAL,
barcode_report.Constants.C_RANK
]
SHOW_ATTRIBUTES = [
barcode_report.Constants.A_NBARCODES,
barcode_report.Constants.A_NREADS_BARCODED,
barcode_report.Constants.A_NREADS_UNBARCODED,
barcode_report.Constants.A_MEAN_READS,
barcode_report.Constants.A_MAX_READS,
barcode_report.Constants.A_MIN_READS,
barcode_report.Constants.A_MEAN_RL
]
make_report = functools.partial(barcode_report._make_report_impl,
Constants.SHOW_ATTRIBUTES,
Constants.SHOW_COLUMNS,
Constants.LABEL_NONE)
def run_to_report(ds_bc_file,
barcodes_file,
reads_in_file,
base_dir=None,
datastore_json=None,
nproc=1,
test_mode=False,
min_bq_filter=Constants.MIN_BQ_FILTER):
spec = load_spec("barcode")
inputs = read_inputs(ds_bc_file, barcodes_file, reads_in_file)
read_info, barcoded_zmws, biosamples, subrpt_files, bc_dataset_uuids = get_barcode_info_parallel(
inputs.ds_files,
barcodes_file,
nproc=nproc,
subrpt_output_dir=op.join(base_dir, "sub_reports"),
isoseq_mode=False)
read_info.extend(list(get_unbarcoded_reads_info(
inputs.reads_in, barcoded_zmws)))
if datastore_json is not None:
save_demuxed_dataset_reports(
subrpt_files, base_dir, datastore_json)
else:
barcode_report.write_empty_datastore(datastore_json)
rpt = make_report(biosamples=biosamples,
read_info=read_info,
bc_dataset_uuids=bc_dataset_uuids,
dataset_uuids=inputs.dataset_uuids,
base_dir=base_dir,
use_spec=spec,
test_mode=test_mode,
min_bq_filter=min_bq_filter)
return spec.apply_view(rpt)
def args_runner(args):
log.info("Starting {f} version {v} report generation".format(
f=__file__, v=__version__))
report = run_to_report(args.ds_bc, args.barcodes, args.reads_in,
base_dir=op.dirname(args.report_json),
datastore_json=args.dataset_reports,
nproc=args.nproc,
test_mode=args.test_mode,
min_bq_filter=args.min_bq_filter)
log.info(pformat(report.to_dict()))
report.write_json(args.report_json)
report.tables[0].to_csv(args.report_csv)
return 0
def resolved_tool_contract_runner(rtc):
log.info("Starting {f} version {v} report generation".format(
f=__file__, v=__version__))
report = run_to_report(
ds_bc_file=rtc.task.input_files[0],
barcodes_file=rtc.task.input_files[2],
reads_in_file=rtc.task.input_files[1],
base_dir=op.dirname(rtc.task.output_files[0]),
datastore_json=rtc.task.output_files[2],
nproc=rtc.task.nproc)
log.debug(pformat(report.to_dict()))
report.write_json(rtc.task.output_files[0])
report.tables[0].to_csv(rtc.task.output_files[1])
return 0
def _get_parser():
return barcode_report.get_parser(Constants)
def main(argv=sys.argv):
return pbparser_runner(
argv=argv[1:],
parser=_get_parser(),
args_runner_func=args_runner,
contract_runner_func=resolved_tool_contract_runner,
alog=log,
setup_log_func=setup_log)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| true
| true
|
f7094232e1c00bad93777f3c61dca4a3fa60846e
| 3,130
|
py
|
Python
|
custom-recipes/amazon-comprehend-nlp-language-detection/recipe.py
|
dataiku/dss-plugin-amazon-comprehend
|
95ce7bc560209f849a55849f8846bd6dbc0ec6f5
|
[
"Apache-2.0"
] | 2
|
2020-05-15T16:29:47.000Z
|
2021-12-10T18:47:31.000Z
|
custom-recipes/amazon-comprehend-nlp-language-detection/recipe.py
|
dataiku/dss-plugin-amazon-comprehend-nlp
|
95ce7bc560209f849a55849f8846bd6dbc0ec6f5
|
[
"Apache-2.0"
] | 3
|
2020-04-23T18:39:51.000Z
|
2020-10-14T16:45:05.000Z
|
custom-recipes/amazon-comprehend-nlp-language-detection/recipe.py
|
dataiku/dss-plugin-amazon-comprehend
|
95ce7bc560209f849a55849f8846bd6dbc0ec6f5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import List, Dict, AnyStr
from retry import retry
from ratelimit import limits, RateLimitException
import dataiku
from dataiku.customrecipe import get_recipe_config, get_input_names_for_role, get_output_names_for_role
from plugin_io_utils import ErrorHandlingEnum, validate_column_input
from dku_io_utils import set_column_description
from amazon_comprehend_api_client import API_EXCEPTIONS, batch_api_response_parser, get_client
from api_parallelizer import api_parallelizer
from amazon_comprehend_api_formatting import LanguageDetectionAPIFormatter
# ==============================================================================
# SETUP
# ==============================================================================
api_configuration_preset = get_recipe_config().get("api_configuration_preset")
api_quota_rate_limit = api_configuration_preset.get("api_quota_rate_limit")
api_quota_period = api_configuration_preset.get("api_quota_period")
parallel_workers = api_configuration_preset.get("parallel_workers")
batch_size = api_configuration_preset.get("batch_size")
text_column = get_recipe_config().get("text_column")
error_handling = ErrorHandlingEnum[get_recipe_config().get("error_handling")]
input_dataset_name = get_input_names_for_role("input_dataset")[0]
input_dataset = dataiku.Dataset(input_dataset_name)
input_schema = input_dataset.read_schema()
input_columns_names = [col["name"] for col in input_schema]
output_dataset_name = get_output_names_for_role("output_dataset")[0]
output_dataset = dataiku.Dataset(output_dataset_name)
validate_column_input(text_column, input_columns_names)
input_df = input_dataset.get_dataframe()
client = get_client(api_configuration_preset)
column_prefix = "lang_detect_api"
batch_kwargs = {
"api_support_batch": True,
"batch_size": batch_size,
"batch_api_response_parser": batch_api_response_parser,
}
# ==============================================================================
# RUN
# ==============================================================================
@retry((RateLimitException, OSError), delay=api_quota_period, tries=5)
@limits(calls=api_quota_rate_limit, period=api_quota_period)
def call_api_language_detection(batch: List[Dict], text_column: AnyStr) -> List[Dict]:
text_list = [str(r.get(text_column, "")).strip() for r in batch]
responses = client.batch_detect_dominant_language(TextList=text_list)
return responses
df = api_parallelizer(
input_df=input_df,
api_call_function=call_api_language_detection,
api_exceptions=API_EXCEPTIONS,
column_prefix=column_prefix,
text_column=text_column,
parallel_workers=parallel_workers,
error_handling=error_handling,
**batch_kwargs
)
api_formatter = LanguageDetectionAPIFormatter(
input_df=input_df, column_prefix=column_prefix, error_handling=error_handling,
)
output_df = api_formatter.format_df(df)
output_dataset.write_with_schema(output_df)
set_column_description(
input_dataset=input_dataset,
output_dataset=output_dataset,
column_description_dict=api_formatter.column_description_dict,
)
| 37.710843
| 103
| 0.741853
|
from typing import List, Dict, AnyStr
from retry import retry
from ratelimit import limits, RateLimitException
import dataiku
from dataiku.customrecipe import get_recipe_config, get_input_names_for_role, get_output_names_for_role
from plugin_io_utils import ErrorHandlingEnum, validate_column_input
from dku_io_utils import set_column_description
from amazon_comprehend_api_client import API_EXCEPTIONS, batch_api_response_parser, get_client
from api_parallelizer import api_parallelizer
from amazon_comprehend_api_formatting import LanguageDetectionAPIFormatter
api_configuration_preset = get_recipe_config().get("api_configuration_preset")
api_quota_rate_limit = api_configuration_preset.get("api_quota_rate_limit")
api_quota_period = api_configuration_preset.get("api_quota_period")
parallel_workers = api_configuration_preset.get("parallel_workers")
batch_size = api_configuration_preset.get("batch_size")
text_column = get_recipe_config().get("text_column")
error_handling = ErrorHandlingEnum[get_recipe_config().get("error_handling")]
input_dataset_name = get_input_names_for_role("input_dataset")[0]
input_dataset = dataiku.Dataset(input_dataset_name)
input_schema = input_dataset.read_schema()
input_columns_names = [col["name"] for col in input_schema]
output_dataset_name = get_output_names_for_role("output_dataset")[0]
output_dataset = dataiku.Dataset(output_dataset_name)
validate_column_input(text_column, input_columns_names)
input_df = input_dataset.get_dataframe()
client = get_client(api_configuration_preset)
column_prefix = "lang_detect_api"
batch_kwargs = {
"api_support_batch": True,
"batch_size": batch_size,
"batch_api_response_parser": batch_api_response_parser,
}
@retry((RateLimitException, OSError), delay=api_quota_period, tries=5)
@limits(calls=api_quota_rate_limit, period=api_quota_period)
def call_api_language_detection(batch: List[Dict], text_column: AnyStr) -> List[Dict]:
text_list = [str(r.get(text_column, "")).strip() for r in batch]
responses = client.batch_detect_dominant_language(TextList=text_list)
return responses
df = api_parallelizer(
input_df=input_df,
api_call_function=call_api_language_detection,
api_exceptions=API_EXCEPTIONS,
column_prefix=column_prefix,
text_column=text_column,
parallel_workers=parallel_workers,
error_handling=error_handling,
**batch_kwargs
)
api_formatter = LanguageDetectionAPIFormatter(
input_df=input_df, column_prefix=column_prefix, error_handling=error_handling,
)
output_df = api_formatter.format_df(df)
output_dataset.write_with_schema(output_df)
set_column_description(
input_dataset=input_dataset,
output_dataset=output_dataset,
column_description_dict=api_formatter.column_description_dict,
)
| true
| true
|
f709424b1033001a56abcfb6869bc07c967d7d91
| 720
|
py
|
Python
|
src/examples_in_my_book/general_problems/strings/find_edit_distance.py
|
lucidrohit/Over-100-Exercises-Python-and-Algorithms
|
62345c7d7c9cc2269f240d134189645fc96c3e80
|
[
"MIT"
] | 2
|
2022-01-07T11:46:32.000Z
|
2022-02-24T08:44:31.000Z
|
src/examples_in_my_book/general_problems/strings/find_edit_distance.py
|
lucidrohit/Over-100-Exercises-Python-and-Algorithms
|
62345c7d7c9cc2269f240d134189645fc96c3e80
|
[
"MIT"
] | null | null | null |
src/examples_in_my_book/general_problems/strings/find_edit_distance.py
|
lucidrohit/Over-100-Exercises-Python-and-Algorithms
|
62345c7d7c9cc2269f240d134189645fc96c3e80
|
[
"MIT"
] | 1
|
2021-10-01T15:35:05.000Z
|
2021-10-01T15:35:05.000Z
|
#!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
def find_edit_distance(str1, str2):
''' computes the edit distance between two strings '''
m = len(str1)
n = len(str2)
diff = lambda c1, c2: 0 if c1 == c2 else 1
E = [[0] * (n + 1) for i in range(m + 1)]
for i in range(m + 1):
E[i][0] = i
for j in range(1, n + 1):
E[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
E[i][j] = min(E[i-1][j] + 1, E[i][j-1] + 1, E[i-1][j-1] + diff(str1[i-1], str2[j-1]))
return E[m][n]
def test_find_edit_distance():
s = 'sunday'
t = 'saturday'
assert(find_edit_distance(s, t) == 3)
print('Tests passed!')
if __name__ == '__main__':
test_find_edit_distance()
| 20.571429
| 88
| 0.575
|
def find_edit_distance(str1, str2):
m = len(str1)
n = len(str2)
diff = lambda c1, c2: 0 if c1 == c2 else 1
E = [[0] * (n + 1) for i in range(m + 1)]
for i in range(m + 1):
E[i][0] = i
for j in range(1, n + 1):
E[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
E[i][j] = min(E[i-1][j] + 1, E[i][j-1] + 1, E[i-1][j-1] + diff(str1[i-1], str2[j-1]))
return E[m][n]
def test_find_edit_distance():
s = 'sunday'
t = 'saturday'
assert(find_edit_distance(s, t) == 3)
print('Tests passed!')
if __name__ == '__main__':
test_find_edit_distance()
| true
| true
|
f70942b36a139c80f233905649b19f8e01e0b8a4
| 7,712
|
py
|
Python
|
cats_and_dogs_classification.py
|
janewen134/catsdogs
|
051dc0d4bf695ca2db03df6fc3cf758331df4aaa
|
[
"MIT"
] | null | null | null |
cats_and_dogs_classification.py
|
janewen134/catsdogs
|
051dc0d4bf695ca2db03df6fc3cf758331df4aaa
|
[
"MIT"
] | null | null | null |
cats_and_dogs_classification.py
|
janewen134/catsdogs
|
051dc0d4bf695ca2db03df6fc3cf758331df4aaa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Cats and Dogs Classification
# Data Loading and Exploring
# In[1]:
import os
base_dir = './cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# cat training pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# dog training pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# cat validation pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# dog validation pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
# In[2]:
# view file names
train_cat_fnames = os.listdir(train_cats_dir)
print(train_cat_fnames[:10])
train_dog_fnames = os.listdir(train_dogs_dir)
train_dog_fnames.sort()
print(train_dog_fnames[:10])
# In[3]:
# preview images to know what the dataset is like
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Parameters for our graph; we'll output images in a 4*4 configuration
nrows = 4
ncols = 4
# Index for iterating over images
pic_index = 0
# Set up matplotlib fig, and size it to fit 4*4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
# 8 images for cats and dogs separately
pic_index += 8
next_cat_pix = [os.path.join(train_cats_dir, fname) for fname in train_cat_fnames[pic_index-8:pic_index]]
next_dog_pix = [os.path.join(train_dogs_dir, fname) for fname in train_dog_fnames[pic_index-8:pic_index]]
for i, img_path in enumerate(next_cat_pix + next_dog_pix):
# Set up subplot; subplot indices starts at 1
sp = plt.subplot(nrows, ncols, i+1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# build a small convnet from scratch to get to 72% accuracy
# In[4]:
from tensorflow.keras import layers
from tensorflow.keras import Model
# Our input feature map is 150*150*3: 150*150 for the image pixels,
# and 3 for the three color channels: R, G and B
img_input = layers.Input(shape=(150,150,3))
# First convolution extracts 16 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(16,3,activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
# Second convolution extracts 32 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(32,3,activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Third convolution extracts 64 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(64,3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# fully-connected layers: because we are facing a binary classification problem, we will end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1.
# In[5]:
# Flatten feature map to a 1-dim tensor so we can add fully connected layers
x = layers.Flatten()(x)
# Generate a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512,activation='relu')(x)
# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation='sigmoid')(x)
# Create Model
# input = input feature map
# output = output feature map
# connected layer + sigmoid output layer
model = Model(img_input,output)
# Let's summarize the model architecture
# In[6]:
model.summary()
# In[7]:
# use RMSprop instead of stochastic gradient
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])
# Data Preprocessing
# In[8]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150,150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary'
)
# Flow validation images in batches of 20 using val_datagen generator
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
# Training
# <br>train on 2000 images, for 15 epochs and validate on 1000 images
# In[ ]:
history = model.fit_generator(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=15,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=1
)
# Visualizing Intermediate Representations
# Visualize how an input gets transformed as it goes through the convnet
# In[ ]:
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
# define a new Model that takes an img as input and will output
# intermediate representations for all layers in the previous model after
# the first
successive_outputs = [layers.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
# prepare a random input img of a cat or dog from the training set
cat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]
dog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]
img_path = random.choice(cat_img_files + dog_img_files)
img = load_img(img_path, target_size=(150, 150)) # this is a PIL img
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape)
# Rescale by 1/255
x /= 255
# Let's run our image through our network, thus obtaining all
# intermediate representations for this img.
successive_feature_maps = visualization_model.predict(x)
# These are names of the layers
layer_names = [layer.name for layer in model.layers]
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
# Just do this for the conv/ maxpool layers, not the fully-connected layers
n_features = feature_map.shape[-1] # number of features in feature map
# retrieve a list of lists results on training and validattion data
# sets for each training epoch
loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Train and validation accuracy')
plt.figure()
# plot training and validation loss per epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
# Evaluating Accuracy and Loss for the Model
# plot the training / validation accuracy and loss as collected during training
# In[ ]:
# Retrieve a list of accuracy results on training and validation data
# sets for each training epoch
acc = history.history['acc']
val_acc = history.history['val_acc']
# Retrieve a list of list results on training and validation data
# sets for each training epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
# Plot training and validation loss per epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
# Clean Up
# In[ ]:
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
| 27.741007
| 206
| 0.741701
|
import os
base_dir = './cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
train_cat_fnames = os.listdir(train_cats_dir)
print(train_cat_fnames[:10])
train_dog_fnames = os.listdir(train_dogs_dir)
train_dog_fnames.sort()
print(train_dog_fnames[:10])
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
nrows = 4
ncols = 4
# Index for iterating over images
pic_index = 0
# Set up matplotlib fig, and size it to fit 4*4 pics
fig = plt.gcf()
fig.set_size_inches(ncols*4, nrows*4)
# 8 images for cats and dogs separately
pic_index += 8
next_cat_pix = [os.path.join(train_cats_dir, fname) for fname in train_cat_fnames[pic_index-8:pic_index]]
next_dog_pix = [os.path.join(train_dogs_dir, fname) for fname in train_dog_fnames[pic_index-8:pic_index]]
for i, img_path in enumerate(next_cat_pix + next_dog_pix):
# Set up subplot; subplot indices starts at 1
sp = plt.subplot(nrows, ncols, i+1)
sp.axis('Off')
img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()
# build a small convnet from scratch to get to 72% accuracy
# In[4]:
from tensorflow.keras import layers
from tensorflow.keras import Model
# Our input feature map is 150*150*3: 150*150 for the image pixels,
# and 3 for the three color channels: R, G and B
img_input = layers.Input(shape=(150,150,3))
# First convolution extracts 16 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(16,3,activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
# Second convolution extracts 32 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(32,3,activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Third convolution extracts 64 filters that are 3*3
# Convolution is followed by max-pooling layer with a 2*2 window
x = layers.Conv2D(64,3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# fully-connected layers: because we are facing a binary classification problem, we will end our network with a sigmoid activation, so that the output of our network will be a single scalar between 0 and 1.
# In[5]:
# Flatten feature map to a 1-dim tensor so we can add fully connected layers
x = layers.Flatten()(x)
# Generate a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512,activation='relu')(x)
# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation='sigmoid')(x)
# Create Model
# input = input feature map
# output = output feature map
# connected layer + sigmoid output layer
model = Model(img_input,output)
# Let's summarize the model architecture
model.summary()
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir, target_size=(150,150),
batch_size=20,
class_mode='binary'
)
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
history = model.fit_generator(
train_generator,
steps_per_epoch=100, epochs=15,
validation_data=validation_generator,
validation_steps=50, verbose=1
)
import numpy as np
import random
from tensorflow.keras.preprocessing.image import img_to_array, load_img
successive_outputs = [layers.output for layer in model.layers[1:]]
visualization_model = Model(img_input, successive_outputs)
cat_img_files = [os.path.join(train_cats_dir, f) for f in train_cat_fnames]
dog_img_files = [os.path.join(train_dogs_dir, f) for f in train_dog_fnames]
img_path = random.choice(cat_img_files + dog_img_files)
img = load_img(img_path, target_size=(150, 150)) x = img_to_array(img) x = x.reshape((1,) + x.shape)
x /= 255
# intermediate representations for this img.
successive_feature_maps = visualization_model.predict(x)
# These are names of the layers
layer_names = [layer.name for layer in model.layers]
# Now let's display our representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
if len(feature_map.shape) == 4:
n_features = feature_map.shape[-1]
loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Train and validation accuracy')
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
| true
| true
|
f709437e54330c11d20bceb3045c97b073567f65
| 4,211
|
py
|
Python
|
data_preparation/rebuild_limited_train/utils.py
|
dweekly/libri-light
|
2a181b2482e0e3a539bf4d8b387b1838fdbfacac
|
[
"MIT"
] | 246
|
2019-12-19T21:40:48.000Z
|
2022-03-30T12:40:22.000Z
|
data_preparation/rebuild_limited_train/utils.py
|
dweekly/libri-light
|
2a181b2482e0e3a539bf4d8b387b1838fdbfacac
|
[
"MIT"
] | 11
|
2020-01-17T14:45:05.000Z
|
2021-09-20T21:53:27.000Z
|
data_preparation/rebuild_limited_train/utils.py
|
dweekly/libri-light
|
2a181b2482e0e3a539bf4d8b387b1838fdbfacac
|
[
"MIT"
] | 48
|
2019-12-20T16:34:12.000Z
|
2022-03-26T13:14:35.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pathlib
from collections import namedtuple
import torchaudio
import shutil
Speaker = namedtuple('Speaker', ['id', 'gender', 'subset'])
FileRecord = namedtuple(
'FileRecord', ['fname', 'length', 'speaker', 'book', 'text_file'])
def get_speakers(speaker_path):
all_speakers = []
with open(speaker_path) as f:
for line in f:
if line.startswith(';'):
continue
line = line.split('|')
speaker_id, gender, subset = [x.strip() for x in line[0:3]]
speaker_id = int(speaker_id)
assert subset in ['test-clean', 'train-clean-360', 'train-clean-100',
'test-other', 'dev-clean', 'train-other-500', 'dev-other'], subset
speaker = Speaker(id=speaker_id, gender=gender, subset=subset)
all_speakers.append(speaker)
return all_speakers
def get_filelength(fname):
info = torchaudio.info(fname)[0]
return info.length
def traverse_tree(root, ext='flac'):
fnames = pathlib.Path(root).rglob(f"*.{ext}")
fnames = sorted(list(fnames))
lengths = []
for file in fnames:
file = str(file.resolve())
length = get_filelength(file)
lengths.append(length)
return list(zip(fnames, lengths))
def get_speaker_fname(fname):
stemmed = fname.stem
speaker, book, seq = stemmed.split('-')
return int(speaker), int(book)
def full_records(speakers, fname2length, subset_name=None):
all_records = []
speakers = dict((speaker.id, speaker) for speaker in speakers)
for fname, length in fname2length:
speaker, book = get_speaker_fname(fname)
assert speaker in speakers, f'Unknown speaker! {speaker}'
speaker = speakers[speaker]
if subset_name is not None:
assert subset_name == speaker.subset
# hacky
text_file = fname.parent / f'{speaker.id}-{book}.trans.txt'
frecord = FileRecord(speaker=speaker, length=length,
fname=fname, book=book, text_file=text_file)
all_records.append(frecord)
return all_records
def get_histogram(records, lambda_key, lambda_value):
from collections import defaultdict
key_value = defaultdict(int)
for record in records:
key = lambda_key(record)
value = lambda_value(record)
key_value[key] += value
return key_value
def materialize(records, target_dir, tag=None, move=False):
target_dir = pathlib.Path(target_dir)
to_copy = set()
to_move = set()
for record in records:
# outline:
# target_dir / speaker / book / file
if tag is None:
target_book_dir = target_dir / \
str(record.speaker.id) / str(record.book)
else:
target_book_dir = target_dir / tag / \
str(record.speaker.id) / str(record.book)
target_book_dir.mkdir(exist_ok=True, parents=True)
if not move:
to_copy.add((record.fname, target_book_dir / record.fname.name))
else:
to_move.add((record.fname, target_book_dir / record.fname.name))
to_copy.add((record.text_file, target_book_dir / record.text_file.name))
to_copy = sorted(list(to_copy))
for src, dst in to_copy:
shutil.copy(src, dst)
if len(to_move) > 0:
to_move = sorted(list(to_move))
for src, dst in to_move:
shutil.move(src, dst)
def print_stats(records):
def lambda_speaker(r): return r.speaker.id
def lambda_time(r): return r.length / 16000.0
speaker_time = get_histogram(
records, lambda_key=lambda_speaker, lambda_value=lambda_time)
print(f'Unique speakers: {len(speaker_time)}')
times = speaker_time.values()
min_time, max_time, mean_time, total_time = min(
times), max(times), sum(times) / len(times), sum(times)
min_time, max_time, mean_time, total_time = map(
int, [min_time, max_time, mean_time, total_time])
print(
f'Min/Mean/Max/Total, seconds: {min_time}/{mean_time}/{max_time}/{total_time}')
print(f'n_utterances: {len(records)}')
| 30.078571
| 96
| 0.632391
|
import pathlib
from collections import namedtuple
import torchaudio
import shutil
Speaker = namedtuple('Speaker', ['id', 'gender', 'subset'])
FileRecord = namedtuple(
'FileRecord', ['fname', 'length', 'speaker', 'book', 'text_file'])
def get_speakers(speaker_path):
all_speakers = []
with open(speaker_path) as f:
for line in f:
if line.startswith(';'):
continue
line = line.split('|')
speaker_id, gender, subset = [x.strip() for x in line[0:3]]
speaker_id = int(speaker_id)
assert subset in ['test-clean', 'train-clean-360', 'train-clean-100',
'test-other', 'dev-clean', 'train-other-500', 'dev-other'], subset
speaker = Speaker(id=speaker_id, gender=gender, subset=subset)
all_speakers.append(speaker)
return all_speakers
def get_filelength(fname):
info = torchaudio.info(fname)[0]
return info.length
def traverse_tree(root, ext='flac'):
fnames = pathlib.Path(root).rglob(f"*.{ext}")
fnames = sorted(list(fnames))
lengths = []
for file in fnames:
file = str(file.resolve())
length = get_filelength(file)
lengths.append(length)
return list(zip(fnames, lengths))
def get_speaker_fname(fname):
stemmed = fname.stem
speaker, book, seq = stemmed.split('-')
return int(speaker), int(book)
def full_records(speakers, fname2length, subset_name=None):
all_records = []
speakers = dict((speaker.id, speaker) for speaker in speakers)
for fname, length in fname2length:
speaker, book = get_speaker_fname(fname)
assert speaker in speakers, f'Unknown speaker! {speaker}'
speaker = speakers[speaker]
if subset_name is not None:
assert subset_name == speaker.subset
text_file = fname.parent / f'{speaker.id}-{book}.trans.txt'
frecord = FileRecord(speaker=speaker, length=length,
fname=fname, book=book, text_file=text_file)
all_records.append(frecord)
return all_records
def get_histogram(records, lambda_key, lambda_value):
from collections import defaultdict
key_value = defaultdict(int)
for record in records:
key = lambda_key(record)
value = lambda_value(record)
key_value[key] += value
return key_value
def materialize(records, target_dir, tag=None, move=False):
target_dir = pathlib.Path(target_dir)
to_copy = set()
to_move = set()
for record in records:
if tag is None:
target_book_dir = target_dir / \
str(record.speaker.id) / str(record.book)
else:
target_book_dir = target_dir / tag / \
str(record.speaker.id) / str(record.book)
target_book_dir.mkdir(exist_ok=True, parents=True)
if not move:
to_copy.add((record.fname, target_book_dir / record.fname.name))
else:
to_move.add((record.fname, target_book_dir / record.fname.name))
to_copy.add((record.text_file, target_book_dir / record.text_file.name))
to_copy = sorted(list(to_copy))
for src, dst in to_copy:
shutil.copy(src, dst)
if len(to_move) > 0:
to_move = sorted(list(to_move))
for src, dst in to_move:
shutil.move(src, dst)
def print_stats(records):
def lambda_speaker(r): return r.speaker.id
def lambda_time(r): return r.length / 16000.0
speaker_time = get_histogram(
records, lambda_key=lambda_speaker, lambda_value=lambda_time)
print(f'Unique speakers: {len(speaker_time)}')
times = speaker_time.values()
min_time, max_time, mean_time, total_time = min(
times), max(times), sum(times) / len(times), sum(times)
min_time, max_time, mean_time, total_time = map(
int, [min_time, max_time, mean_time, total_time])
print(
f'Min/Mean/Max/Total, seconds: {min_time}/{mean_time}/{max_time}/{total_time}')
print(f'n_utterances: {len(records)}')
| true
| true
|
f70943c6ac3aca9a74aba66e5048de86b69d40c3
| 201
|
py
|
Python
|
code/global_vars.py
|
btaille/sincere
|
dd1c34916ddcdc5ceb2799d64b17e80cdf1a5b31
|
[
"Apache-2.0"
] | 12
|
2020-10-22T13:04:36.000Z
|
2021-12-30T10:34:28.000Z
|
code/global_vars.py
|
btaille/sincere
|
dd1c34916ddcdc5ceb2799d64b17e80cdf1a5b31
|
[
"Apache-2.0"
] | null | null | null |
code/global_vars.py
|
btaille/sincere
|
dd1c34916ddcdc5ceb2799d64b17e80cdf1a5b31
|
[
"Apache-2.0"
] | 2
|
2020-11-27T12:23:22.000Z
|
2021-11-16T09:17:06.000Z
|
import os
PROJECT_DIR = os.path.abspath(os.pardir)
RUN_DIR = os.path.join(PROJECT_DIR, "runs/")
DATA_DIR = os.path.join(PROJECT_DIR, "data/")
EMBEDDINGS_DIR = os.path.join(PROJECT_DIR, "embeddings/")
| 28.714286
| 57
| 0.746269
|
import os
PROJECT_DIR = os.path.abspath(os.pardir)
RUN_DIR = os.path.join(PROJECT_DIR, "runs/")
DATA_DIR = os.path.join(PROJECT_DIR, "data/")
EMBEDDINGS_DIR = os.path.join(PROJECT_DIR, "embeddings/")
| true
| true
|
f70943e4a0c52d5d4276cd23f2fe2071c7a54270
| 1,185
|
py
|
Python
|
05-1class-func/tagger.py
|
banjin/FluentPython-example
|
268866ef52da4109072088778953d7ed47132001
|
[
"MIT"
] | 5,651
|
2015-01-06T21:58:46.000Z
|
2022-03-31T13:39:07.000Z
|
05-1class-func/tagger.py
|
banjin/FluentPython-example
|
268866ef52da4109072088778953d7ed47132001
|
[
"MIT"
] | 42
|
2016-12-11T19:17:11.000Z
|
2021-11-23T19:41:16.000Z
|
05-1class-func/tagger.py
|
banjin/FluentPython-example
|
268866ef52da4109072088778953d7ed47132001
|
[
"MIT"
] | 2,394
|
2015-01-18T10:57:38.000Z
|
2022-03-31T11:41:12.000Z
|
"""
# BEGIN TAG_DEMO
>>> tag('br') # <1>
'<br />'
>>> tag('p', 'hello') # <2>
'<p>hello</p>'
>>> print(tag('p', 'hello', 'world'))
<p>hello</p>
<p>world</p>
>>> tag('p', 'hello', id=33) # <3>
'<p id="33">hello</p>'
>>> print(tag('p', 'hello', 'world', cls='sidebar')) # <4>
<p class="sidebar">hello</p>
<p class="sidebar">world</p>
>>> tag(content='testing', name="img") # <5>
'<img content="testing" />'
>>> my_tag = {'name': 'img', 'title': 'Sunset Boulevard',
... 'src': 'sunset.jpg', 'cls': 'framed'}
>>> tag(**my_tag) # <6>
'<img class="framed" src="sunset.jpg" title="Sunset Boulevard" />'
# END TAG_DEMO
"""
# BEGIN TAG_FUNC
def tag(name, *content, cls=None, **attrs):
"""Generate one or more HTML tags"""
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr, value)
for attr, value
in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' %
(name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str)
# END TAG_FUNC
| 26.333333
| 68
| 0.491139
|
def tag(name, *content, cls=None, **attrs):
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr, value)
for attr, value
in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' %
(name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str)
| true
| true
|
f7094645f95735c801cc8b9f916dc205d0f15abb
| 470
|
py
|
Python
|
dp/longestPalindromicSubsequence/Solution.py
|
shahbagdadi/py-algo-n-ds
|
ff689534b771ddb4869b001b20a0e21b4896bb0a
|
[
"MIT"
] | null | null | null |
dp/longestPalindromicSubsequence/Solution.py
|
shahbagdadi/py-algo-n-ds
|
ff689534b771ddb4869b001b20a0e21b4896bb0a
|
[
"MIT"
] | null | null | null |
dp/longestPalindromicSubsequence/Solution.py
|
shahbagdadi/py-algo-n-ds
|
ff689534b771ddb4869b001b20a0e21b4896bb0a
|
[
"MIT"
] | null | null | null |
from functools import lru_cache
class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
@lru_cache(None)
def helper(b,e):
print(b,e)
if b > e : return 0
if b == e : return 1
if s[b] == s[e] :
return helper(b+1,e-1) + 2
return max(helper(b+1,e), helper(b,e-1))
return helper(0,len(s)-1)
s = Solution()
ans = s.longestPalindromeSubseq('bcbbd')
print(ans)
| 26.111111
| 53
| 0.52766
|
from functools import lru_cache
class Solution:
def longestPalindromeSubseq(self, s: str) -> int:
@lru_cache(None)
def helper(b,e):
print(b,e)
if b > e : return 0
if b == e : return 1
if s[b] == s[e] :
return helper(b+1,e-1) + 2
return max(helper(b+1,e), helper(b,e-1))
return helper(0,len(s)-1)
s = Solution()
ans = s.longestPalindromeSubseq('bcbbd')
print(ans)
| true
| true
|
f70946ff7a42e2971d93f7c6e7bd44b2b8487ee5
| 3,501
|
py
|
Python
|
run_with_submitit.py
|
haozy008/detr_transformer
|
f2cca52a1ea97a31c9497451714373bb691589e9
|
[
"Apache-2.0"
] | 22
|
2020-09-20T15:08:57.000Z
|
2022-03-27T14:06:09.000Z
|
run_with_submitit.py
|
haozy008/detr_transformer
|
f2cca52a1ea97a31c9497451714373bb691589e9
|
[
"Apache-2.0"
] | 4
|
2020-12-16T15:52:13.000Z
|
2021-08-14T02:40:07.000Z
|
run_with_submitit.py
|
haozy008/detr_transformer
|
f2cca52a1ea97a31c9497451714373bb691589e9
|
[
"Apache-2.0"
] | 7
|
2020-08-24T03:12:55.000Z
|
2022-03-27T14:06:34.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path("/checkpoint/{}/experiments".format(user))
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / "{}_init".format(uuid.uuid4().hex)
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as detection
self._setup_gpu_args()
detection.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print("Process group: {} tasks, rank: {}".format(job_env.num_tasks,job_env.global_rank))
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# cluster setup is defined by environment variables
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
)
executor.update_parameters(name="detr")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| 31.258929
| 102
| 0.677235
|
import argparse
import os
import uuid
from pathlib import Path
import main as detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path("/checkpoint/{}/experiments".format(user))
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / "{}_init".format(uuid.uuid4().hex)
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as detection
self._setup_gpu_args()
detection.main(self.args)
def checkpoint(self):
import os
import submitit
from pathlib import Path
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print("Process group: {} tasks, rank: {}".format(job_env.num_tasks,job_env.global_rank))
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
# cluster setup is defined by environment variables
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
)
executor.update_parameters(name="detr")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
| true
| true
|
f70948eb04aae6c332dfeb082bf8b190c4d271e1
| 11,461
|
py
|
Python
|
homeassistant/components/zha/entity.py
|
mib1185/core
|
b17d4ac65cde9a27ff6032d70b148792e5eba8df
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/zha/entity.py
|
mib1185/core
|
b17d4ac65cde9a27ff6032d70b148792e5eba8df
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/zha/entity.py
|
mib1185/core
|
b17d4ac65cde9a27ff6032d70b148792e5eba8df
|
[
"Apache-2.0"
] | null | null | null |
"""Entity for Zigbee Home Automation."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
import functools
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.const import ATTR_NAME
from homeassistant.core import CALLBACK_TYPE, Event, callback
from homeassistant.helpers import entity
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
from .core.const import (
ATTR_MANUFACTURER,
ATTR_MODEL,
DATA_ZHA,
DATA_ZHA_BRIDGE_ID,
DOMAIN,
SIGNAL_GROUP_ENTITY_REMOVED,
SIGNAL_GROUP_MEMBERSHIP_CHANGE,
SIGNAL_REMOVE,
)
from .core.helpers import LogMixin
if TYPE_CHECKING:
from .core.channels.base import ZigbeeChannel
from .core.device import ZHADevice
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = "entity_suffix"
UPDATE_GROUP_FROM_CHILD_DELAY = 0.5
class BaseZhaEntity(LogMixin, entity.Entity):
"""A base class for ZHA entities."""
unique_id_suffix: str | None = None
def __init__(self, unique_id: str, zha_device: ZHADevice, **kwargs: Any) -> None:
"""Init ZHA entity."""
self._name: str = ""
self._force_update: bool = False
self._should_poll: bool = False
self._unique_id: str = unique_id
if self.unique_id_suffix:
self._unique_id += f"-{self.unique_id_suffix}"
self._state: Any = None
self._extra_state_attributes: dict[str, Any] = {}
self._zha_device = zha_device
self._unsubs: list[Callable[[], None]] = []
self.remove_future: asyncio.Future[Any] = asyncio.Future()
@property
def name(self) -> str:
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def zha_device(self) -> ZHADevice:
"""Return the zha device this entity is attached to."""
return self._zha_device
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return device specific state attributes."""
return self._extra_state_attributes
@property
def force_update(self) -> bool:
"""Force update this entity."""
return self._force_update
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return self._should_poll
@property
def device_info(self) -> entity.DeviceInfo:
"""Return a device description for device registry."""
zha_device_info = self._zha_device.device_info
ieee = zha_device_info["ieee"]
return entity.DeviceInfo(
connections={(CONNECTION_ZIGBEE, ieee)},
identifiers={(DOMAIN, ieee)},
manufacturer=zha_device_info[ATTR_MANUFACTURER],
model=zha_device_info[ATTR_MODEL],
name=zha_device_info[ATTR_NAME],
via_device=(DOMAIN, self.hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
)
@callback
def async_state_changed(self) -> None:
"""Entity state changed."""
self.async_write_ha_state()
@callback
def async_update_state_attribute(self, key: str, value: Any) -> None:
"""Update a single device state attribute."""
self._extra_state_attributes.update({key: value})
self.async_write_ha_state()
@callback
def async_set_state(self, attr_id: int, attr_name: str, value: Any) -> None:
"""Set the entity state."""
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
@callback
def async_accept_signal(
self,
channel: ZigbeeChannel | None,
signal: str,
func: Callable[..., Any],
signal_override=False,
):
"""Accept a signal from a channel."""
unsub = None
if signal_override:
unsub = async_dispatcher_connect(self.hass, signal, func)
else:
assert channel
unsub = async_dispatcher_connect(
self.hass, f"{channel.unique_id}_{signal}", func
)
self._unsubs.append(unsub)
def log(self, level: int, msg: str, *args, **kwargs):
"""Log a message."""
msg = f"%s: {msg}"
args = (self.entity_id,) + args
_LOGGER.log(level, msg, *args, **kwargs)
class ZhaEntity(BaseZhaEntity, RestoreEntity):
"""A base class for non group ZHA entities."""
def __init_subclass__(cls, id_suffix: str | None = None, **kwargs) -> None:
"""Initialize subclass.
:param id_suffix: suffix to add to the unique_id of the entity. Used for multi
entities using the same channel/cluster id for the entity.
"""
super().__init_subclass__(**kwargs)
if id_suffix:
cls.unique_id_suffix = id_suffix
def __init__(
self,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs: Any,
) -> None:
"""Init ZHA entity."""
super().__init__(unique_id, zha_device, **kwargs)
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
ch_names = ", ".join(sorted(ch.name for ch in channels))
self._name: str = f"{zha_device.name} {ieeetail} {ch_names}"
if self.unique_id_suffix:
self._name += f" {self.unique_id_suffix}"
self.cluster_channels: dict[str, ZigbeeChannel] = {}
for channel in channels:
self.cluster_channels[channel.name] = channel
@classmethod
def create_entity(
cls,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs,
) -> ZhaEntity | None:
"""Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
return cls(unique_id, zha_device, channels, **kwargs)
@property
def available(self) -> bool:
"""Return entity availability."""
return self._zha_device.available
async def async_added_to_hass(self) -> None:
"""Run when about to be added to hass."""
self.remove_future = asyncio.Future()
self.async_accept_signal(
None,
f"{SIGNAL_REMOVE}_{self.zha_device.ieee}",
functools.partial(self.async_remove, force_remove=True),
signal_override=True,
)
if last_state := await self.async_get_last_state():
self.async_restore_last_state(last_state)
self.async_accept_signal(
None,
f"{self.zha_device.available_signal}_entity",
self.async_state_changed,
signal_override=True,
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee,
self.entity_id,
self._zha_device,
self.cluster_channels,
self.device_info,
self.remove_future,
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect entity object when removed."""
await super().async_will_remove_from_hass()
self.zha_device.gateway.remove_entity_reference(self)
self.remove_future.set_result(True)
@callback
def async_restore_last_state(self, last_state) -> None:
"""Restore previous state."""
async def async_update(self) -> None:
"""Retrieve latest state."""
tasks = [
channel.async_update()
for channel in self.cluster_channels.values()
if hasattr(channel, "async_update")
]
if tasks:
await asyncio.gather(*tasks)
class ZhaGroupEntity(BaseZhaEntity):
"""A base class for ZHA group entities."""
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
"""Initialize a light group."""
super().__init__(unique_id, zha_device, **kwargs)
self._available = False
self._group = zha_device.gateway.groups.get(group_id)
self._name = f"{self._group.name}_zha_group_0x{group_id:04x}"
self._group_id: int = group_id
self._entity_ids: list[str] = entity_ids
self._async_unsub_state_changed: CALLBACK_TYPE | None = None
self._handled_group_membership = False
self._change_listener_debouncer: Debouncer | None = None
@property
def available(self) -> bool:
"""Return entity availability."""
return self._available
@classmethod
def create_entity(
cls, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> ZhaGroupEntity | None:
"""Group Entity Factory.
Return entity if it is a supported configuration, otherwise return None
"""
return cls(entity_ids, unique_id, group_id, zha_device, **kwargs)
async def _handle_group_membership_changed(self):
"""Handle group membership changed."""
# Make sure we don't call remove twice as members are removed
if self._handled_group_membership:
return
self._handled_group_membership = True
await self.async_remove(force_remove=True)
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
await super().async_added_to_hass()
await self.async_update()
self.async_accept_signal(
None,
f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{self._group_id:04x}",
self._handle_group_membership_changed,
signal_override=True,
)
if self._change_listener_debouncer is None:
self._change_listener_debouncer = Debouncer(
self.hass,
_LOGGER,
cooldown=UPDATE_GROUP_FROM_CHILD_DELAY,
immediate=False,
function=functools.partial(self.async_update_ha_state, True),
)
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self._entity_ids, self.async_state_changed_listener
)
def send_removed_signal():
async_dispatcher_send(
self.hass, SIGNAL_GROUP_ENTITY_REMOVED, self._group_id
)
self.async_on_remove(send_removed_signal)
@callback
def async_state_changed_listener(self, event: Event):
"""Handle child updates."""
# Delay to ensure that we get updates from all members before updating the group
assert self._change_listener_debouncer
self.hass.create_task(self._change_listener_debouncer.async_call())
async def async_will_remove_from_hass(self) -> None:
"""Handle removal from Home Assistant."""
await super().async_will_remove_from_hass()
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self) -> None:
"""Update the state of the group entity."""
| 33.609971
| 88
| 0.639997
|
from __future__ import annotations
import asyncio
from collections.abc import Callable
import functools
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.const import ATTR_NAME
from homeassistant.core import CALLBACK_TYPE, Event, callback
from homeassistant.helpers import entity
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.device_registry import CONNECTION_ZIGBEE
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.restore_state import RestoreEntity
from .core.const import (
ATTR_MANUFACTURER,
ATTR_MODEL,
DATA_ZHA,
DATA_ZHA_BRIDGE_ID,
DOMAIN,
SIGNAL_GROUP_ENTITY_REMOVED,
SIGNAL_GROUP_MEMBERSHIP_CHANGE,
SIGNAL_REMOVE,
)
from .core.helpers import LogMixin
if TYPE_CHECKING:
from .core.channels.base import ZigbeeChannel
from .core.device import ZHADevice
_LOGGER = logging.getLogger(__name__)
ENTITY_SUFFIX = "entity_suffix"
UPDATE_GROUP_FROM_CHILD_DELAY = 0.5
class BaseZhaEntity(LogMixin, entity.Entity):
unique_id_suffix: str | None = None
def __init__(self, unique_id: str, zha_device: ZHADevice, **kwargs: Any) -> None:
self._name: str = ""
self._force_update: bool = False
self._should_poll: bool = False
self._unique_id: str = unique_id
if self.unique_id_suffix:
self._unique_id += f"-{self.unique_id_suffix}"
self._state: Any = None
self._extra_state_attributes: dict[str, Any] = {}
self._zha_device = zha_device
self._unsubs: list[Callable[[], None]] = []
self.remove_future: asyncio.Future[Any] = asyncio.Future()
@property
def name(self) -> str:
return self._name
@property
def unique_id(self) -> str:
return self._unique_id
@property
def zha_device(self) -> ZHADevice:
return self._zha_device
@property
def extra_state_attributes(self) -> dict[str, Any]:
return self._extra_state_attributes
@property
def force_update(self) -> bool:
return self._force_update
@property
def should_poll(self) -> bool:
return self._should_poll
@property
def device_info(self) -> entity.DeviceInfo:
zha_device_info = self._zha_device.device_info
ieee = zha_device_info["ieee"]
return entity.DeviceInfo(
connections={(CONNECTION_ZIGBEE, ieee)},
identifiers={(DOMAIN, ieee)},
manufacturer=zha_device_info[ATTR_MANUFACTURER],
model=zha_device_info[ATTR_MODEL],
name=zha_device_info[ATTR_NAME],
via_device=(DOMAIN, self.hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID]),
)
@callback
def async_state_changed(self) -> None:
self.async_write_ha_state()
@callback
def async_update_state_attribute(self, key: str, value: Any) -> None:
self._extra_state_attributes.update({key: value})
self.async_write_ha_state()
@callback
def async_set_state(self, attr_id: int, attr_name: str, value: Any) -> None:
async def async_will_remove_from_hass(self) -> None:
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
@callback
def async_accept_signal(
self,
channel: ZigbeeChannel | None,
signal: str,
func: Callable[..., Any],
signal_override=False,
):
unsub = None
if signal_override:
unsub = async_dispatcher_connect(self.hass, signal, func)
else:
assert channel
unsub = async_dispatcher_connect(
self.hass, f"{channel.unique_id}_{signal}", func
)
self._unsubs.append(unsub)
def log(self, level: int, msg: str, *args, **kwargs):
msg = f"%s: {msg}"
args = (self.entity_id,) + args
_LOGGER.log(level, msg, *args, **kwargs)
class ZhaEntity(BaseZhaEntity, RestoreEntity):
def __init_subclass__(cls, id_suffix: str | None = None, **kwargs) -> None:
super().__init_subclass__(**kwargs)
if id_suffix:
cls.unique_id_suffix = id_suffix
def __init__(
self,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs: Any,
) -> None:
super().__init__(unique_id, zha_device, **kwargs)
ieeetail = "".join([f"{o:02x}" for o in zha_device.ieee[:4]])
ch_names = ", ".join(sorted(ch.name for ch in channels))
self._name: str = f"{zha_device.name} {ieeetail} {ch_names}"
if self.unique_id_suffix:
self._name += f" {self.unique_id_suffix}"
self.cluster_channels: dict[str, ZigbeeChannel] = {}
for channel in channels:
self.cluster_channels[channel.name] = channel
@classmethod
def create_entity(
cls,
unique_id: str,
zha_device: ZHADevice,
channels: list[ZigbeeChannel],
**kwargs,
) -> ZhaEntity | None:
return cls(unique_id, zha_device, channels, **kwargs)
@property
def available(self) -> bool:
return self._zha_device.available
async def async_added_to_hass(self) -> None:
self.remove_future = asyncio.Future()
self.async_accept_signal(
None,
f"{SIGNAL_REMOVE}_{self.zha_device.ieee}",
functools.partial(self.async_remove, force_remove=True),
signal_override=True,
)
if last_state := await self.async_get_last_state():
self.async_restore_last_state(last_state)
self.async_accept_signal(
None,
f"{self.zha_device.available_signal}_entity",
self.async_state_changed,
signal_override=True,
)
self._zha_device.gateway.register_entity_reference(
self._zha_device.ieee,
self.entity_id,
self._zha_device,
self.cluster_channels,
self.device_info,
self.remove_future,
)
async def async_will_remove_from_hass(self) -> None:
await super().async_will_remove_from_hass()
self.zha_device.gateway.remove_entity_reference(self)
self.remove_future.set_result(True)
@callback
def async_restore_last_state(self, last_state) -> None:
async def async_update(self) -> None:
tasks = [
channel.async_update()
for channel in self.cluster_channels.values()
if hasattr(channel, "async_update")
]
if tasks:
await asyncio.gather(*tasks)
class ZhaGroupEntity(BaseZhaEntity):
def __init__(
self, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> None:
super().__init__(unique_id, zha_device, **kwargs)
self._available = False
self._group = zha_device.gateway.groups.get(group_id)
self._name = f"{self._group.name}_zha_group_0x{group_id:04x}"
self._group_id: int = group_id
self._entity_ids: list[str] = entity_ids
self._async_unsub_state_changed: CALLBACK_TYPE | None = None
self._handled_group_membership = False
self._change_listener_debouncer: Debouncer | None = None
@property
def available(self) -> bool:
return self._available
@classmethod
def create_entity(
cls, entity_ids: list[str], unique_id: str, group_id: int, zha_device, **kwargs
) -> ZhaGroupEntity | None:
return cls(entity_ids, unique_id, group_id, zha_device, **kwargs)
async def _handle_group_membership_changed(self):
if self._handled_group_membership:
return
self._handled_group_membership = True
await self.async_remove(force_remove=True)
async def async_added_to_hass(self) -> None:
await super().async_added_to_hass()
await self.async_update()
self.async_accept_signal(
None,
f"{SIGNAL_GROUP_MEMBERSHIP_CHANGE}_0x{self._group_id:04x}",
self._handle_group_membership_changed,
signal_override=True,
)
if self._change_listener_debouncer is None:
self._change_listener_debouncer = Debouncer(
self.hass,
_LOGGER,
cooldown=UPDATE_GROUP_FROM_CHILD_DELAY,
immediate=False,
function=functools.partial(self.async_update_ha_state, True),
)
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self._entity_ids, self.async_state_changed_listener
)
def send_removed_signal():
async_dispatcher_send(
self.hass, SIGNAL_GROUP_ENTITY_REMOVED, self._group_id
)
self.async_on_remove(send_removed_signal)
@callback
def async_state_changed_listener(self, event: Event):
# Delay to ensure that we get updates from all members before updating the group
assert self._change_listener_debouncer
self.hass.create_task(self._change_listener_debouncer.async_call())
async def async_will_remove_from_hass(self) -> None:
await super().async_will_remove_from_hass()
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self) -> None:
| true
| true
|
f70949c899c61f3c27374377db66bdcead46f9ca
| 1,408
|
py
|
Python
|
exmail/client/api/department.py
|
ni-ning/exmail-sdk
|
8ffefd70c46c4df755e0ab25559aeb0bb4e9b194
|
[
"MIT"
] | 5
|
2019-11-14T03:23:06.000Z
|
2021-12-23T02:24:15.000Z
|
exmail/client/api/department.py
|
ni-ning/exmail-sdk
|
8ffefd70c46c4df755e0ab25559aeb0bb4e9b194
|
[
"MIT"
] | null | null | null |
exmail/client/api/department.py
|
ni-ning/exmail-sdk
|
8ffefd70c46c4df755e0ab25559aeb0bb4e9b194
|
[
"MIT"
] | 1
|
2021-01-11T08:33:00.000Z
|
2021-01-11T08:33:00.000Z
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals
from exmail.client.api.base import EmailBaseAPI
class Department(EmailBaseAPI):
def create(self, department_data):
'''
创建部门
:param department_data: 创建部门所需数据
:return:
'''
return self._post(
'/department/create',
data=department_data
)
def update(self, department_data):
'''
创建部门
:param department_data: 更新部门所需数据
:return:
'''
return self._post(
'/department/update',
data=department_data
)
def delete(self, _id):
'''
删除部门
:param _id: 部门id (注:不能删除根部门;不能删除含有子部门、成员的部门)
:return:
'''
return self._get(
'/department/delete',
{'id': _id}
)
def list(self, _id=1):
"""
获取部门列表
:param _id: 父部门id(如果不传,默认部门为根部门,根部门ID为1)
:return: 部门列表数据。以部门的order字段从小到大排列
"""
return self._get(
'/department/list',
{'id': _id}
)
def search(self, name, fuzzy=False):
'''
查找部门
:param name: 查找的部门名字,必须合法
:param fuzzy: 是否模糊匹配
:return:
'''
return self._post(
'/department/search',
data={'name': name, 'fuzzy': int(fuzzy)}
)
| 20.405797
| 56
| 0.497159
|
from __future__ import absolute_import, unicode_literals
from exmail.client.api.base import EmailBaseAPI
class Department(EmailBaseAPI):
def create(self, department_data):
return self._post(
'/department/create',
data=department_data
)
def update(self, department_data):
return self._post(
'/department/update',
data=department_data
)
def delete(self, _id):
return self._get(
'/department/delete',
{'id': _id}
)
def list(self, _id=1):
return self._get(
'/department/list',
{'id': _id}
)
def search(self, name, fuzzy=False):
return self._post(
'/department/search',
data={'name': name, 'fuzzy': int(fuzzy)}
)
| true
| true
|
f7094a287be0868ea1021a5b882a2a214996f97e
| 41,174
|
py
|
Python
|
src/transformers/modeling_albert.py
|
dom-s/transformers
|
66ef8faf6ae805aeb4e71075d4da6eab7be3bc26
|
[
"Apache-2.0"
] | 1
|
2020-03-06T02:41:33.000Z
|
2020-03-06T02:41:33.000Z
|
src/transformers/modeling_albert.py
|
BlueSkyBubble/transformers
|
83446a88d902661fab12bf8c37a1aa2845cdca5f
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/modeling_albert.py
|
BlueSkyBubble/transformers
|
83446a88d902661fab12bf8c37a1aa2845cdca5f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model. """
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.configuration_albert import AlbertConfig
from transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer
from transformers.modeling_utils import PreTrainedModel
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
logger = logging.getLogger(__name__)
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-pytorch_model.bin",
"albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-pytorch_model.bin",
"albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-pytorch_model.bin",
"albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-pytorch_model.bin",
"albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin",
"albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin",
"albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin",
"albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin",
}
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
# If saved from the TF HUB module
name = name.replace("module/", "")
# Renaming and simplifying
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
# The feed forward layer had an 'intermediate' step which has been abstracted away
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
# ALBERT attention was split between self and output which have been abstracted away
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
# The pooler is a linear layer
name = name.replace("pooler/dense", "pooler")
# The classifier was simplified to predictions from cls/predictions
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
# Naming was changed to be more explicit
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
# Classifier
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
# No ALBERT model currently handles the next sentence prediction task
if "seq_relationship" in name:
continue
name = name.split("/")
# Ignore the gradients applied by the LAMB/ADAM optimizers.
if "adam_m" in name or "adam_v" in name or "global_step" in name:
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {} from {}".format(name, original_name))
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(BertEmbeddings):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
class AlbertAttention(BertSelfAttention):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.num_attention_heads, self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_ids, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(input_ids)
mixed_key_layer = self.key(input_ids)
mixed_value_layer = self.value(input_ids)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# Should find a better way to do this
w = (
self.dense.weight.t()
.view(self.num_attention_heads, self.attention_head_size, self.hidden_size)
.to(context_layer.dtype)
)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
ffn_output = self.ffn(attention_output[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index])
hidden_states = layer_output[0]
if self.output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if self.output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if self.output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_attentions = ()
if self.output_hidden_states:
all_hidden_states = (hidden_states,)
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
)
hidden_states = layer_group_output[0]
if self.output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class AlbertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "albert"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ALBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.AlbertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups.
If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there
is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error.
See base class PreTrainedModel for more information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertModel, AlbertTokenizer
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertModel.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states + self.bias
return prediction_scores
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)
def get_output_embeddings(self):
return self.predictions.decoder
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with
labels in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
from transformers import AlbertTokenizer, AlbertForMaskedLM
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForMaskedLM.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs
@add_start_docstrings(
"""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
logits ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import AlbertTokenizer, AlbertForSequenceClassification
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForSequenceClassification.from_pretrained('albert-base-v2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
loss: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
end_scores: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
# The checkpoint albert-base-v2 is not fine-tuned for question answering. Please see the
# examples/run_squad.py example to see how to fine-tune a model to a question answering task.
from transformers import AlbertTokenizer, AlbertForQuestionAnswering
import torch
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_dict = tokenizer.encode_plus(question, text, return_tensors='pt')
start_scores, end_scores = model(**input_dict)
"""
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 46.107503
| 148
| 0.670253
|
import logging
import math
import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.configuration_albert import AlbertConfig
from transformers.modeling_bert import ACT2FN, BertEmbeddings, BertSelfAttention, prune_linear_layer
from transformers.modeling_utils import PreTrainedModel
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
logger = logging.getLogger(__name__)
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
"albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-pytorch_model.bin",
"albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-pytorch_model.bin",
"albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-pytorch_model.bin",
"albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-pytorch_model.bin",
"albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin",
"albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin",
"albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin",
"albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin",
}
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
name = name.replace("module/", "")
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
name = name.replace("pooler/dense", "pooler")
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
if "seq_relationship" in name:
continue
name = name.split("/")
if "adam_m" in name or "adam_v" in name or "global_step" in name:
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {} from {}".format(name, original_name))
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
class AlbertAttention(BertSelfAttention):
def __init__(self, config):
super().__init__(config)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.num_attention_heads, self.attention_head_size)
heads = set(heads) - self.pruned_heads for head in heads:
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_ids, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(input_ids)
mixed_key_layer = self.key(input_ids)
mixed_value_layer = self.value(input_ids)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
w = (
self.dense.weight.t()
.view(self.num_attention_heads, self.attention_head_size, self.hidden_size)
.to(context_layer.dtype)
)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
ffn_output = self.ffn(attention_output[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:]
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index])
hidden_states = layer_output[0]
if self.output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if self.output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if self.output_attentions:
outputs = outputs + (layer_attentions,)
return outputs
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(self, hidden_states, attention_mask=None, head_mask=None):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_attentions = ()
if self.output_hidden_states:
all_hidden_states = (hidden_states,)
for i in range(self.config.num_hidden_layers):
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
)
hidden_states = layer_group_output[0]
if self.output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs
class AlbertPreTrainedModel(PreTrainedModel):
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "albert"
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
ALBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.AlbertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0]))
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states + self.bias
return prediction_scores
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
self._tie_or_clone_weights(self.predictions.decoder, self.albert.embeddings.word_embeddings)
def get_output_embeddings(self):
return self.predictions.decoder
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs
@add_start_docstrings(
"""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| true
| true
|
f7094bfea756491c4f21b1c24a374dfa1e15b117
| 1,134
|
py
|
Python
|
logreader.py
|
trim-uec/notify
|
c763d87d8654ce46366e9d5bf4d30191fc0db5bf
|
[
"MIT"
] | null | null | null |
logreader.py
|
trim-uec/notify
|
c763d87d8654ce46366e9d5bf4d30191fc0db5bf
|
[
"MIT"
] | null | null | null |
logreader.py
|
trim-uec/notify
|
c763d87d8654ce46366e9d5bf4d30191fc0db5bf
|
[
"MIT"
] | null | null | null |
import datetime as dt
import os
# ログファイル差分チェッカー
# ファイル名を受け取り、update()実行ごとに差分を返す
# 更新実行時の時間をもつ
# 更新にかかった総所要時間の計測はせず、上のレイヤーに任せる
# 監視対象のファイルは行を追加する形で情報が増えていくものとする
class LogReader:
# filename, last_update, tail_ix
def __init__(self, filename):
self.filename = filename
self.last_update = dt.datetime.now()
assert os.path.isfile(filename), '存在しないファイルを指定しました'
with open(self.filename, 'r') as f:
lines = f.readlines()
self.tail_ix = len(lines) # 末尾の行数(1から数えた場合の) = 次回更新でこの行数から読み込む
# TODO ファイルの存在をassertできるともっと良い(import osでexists?)
# TODO ログ出力はこれでよいのか?Viewクラス使ったほうがよいのでは?
# self.__printlog('初期化成功。'+self.filename+'を監視します。')
# 同期。last_updateを更新し、差分を取得し、tail_ixを更新。差分(文字列のリスト)を返す。無ければ空のリスト。
def added_lines(self):
self.last_update = dt.datetime.now()
with open(self.filename, 'r') as f:
all_lines = f.readlines()
added_lines = all_lines[self.tail_ix:]
self.tail_ix = len(all_lines)
return added_lines
# def __printlog(self, mes):
# print('['+str(dt.datetime.now())[:-7]+']', mes)
| 31.5
| 71
| 0.65873
|
import datetime as dt
import os
class LogReader:
def __init__(self, filename):
self.filename = filename
self.last_update = dt.datetime.now()
assert os.path.isfile(filename), '存在しないファイルを指定しました'
with open(self.filename, 'r') as f:
lines = f.readlines()
self.tail_ix = len(lines)
def added_lines(self):
self.last_update = dt.datetime.now()
with open(self.filename, 'r') as f:
all_lines = f.readlines()
added_lines = all_lines[self.tail_ix:]
self.tail_ix = len(all_lines)
return added_lines
| true
| true
|
f7094c34c8b66e96e77d0858e274b7b1eede1bf8
| 594
|
py
|
Python
|
demo/blog/migrations/0002_auto_20160420_2154.py
|
andrewebdev/django-ostinato
|
2c435dea23319be6e9011e7381afca2b4092b5a2
|
[
"MIT"
] | 5
|
2015-01-28T09:56:48.000Z
|
2020-05-22T21:07:30.000Z
|
demo/blog/migrations/0002_auto_20160420_2154.py
|
andrewebdev/django-ostinato
|
2c435dea23319be6e9011e7381afca2b4092b5a2
|
[
"MIT"
] | 18
|
2015-02-03T15:37:22.000Z
|
2020-06-05T16:41:15.000Z
|
demo/blog/migrations/0002_auto_20160420_2154.py
|
andrewebdev/django-ostinato
|
2c435dea23319be6e9011e7381afca2b4092b5a2
|
[
"MIT"
] | 2
|
2015-02-23T19:34:59.000Z
|
2017-01-22T02:10:12.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-20 21:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='state',
field=models.CharField(choices=[(b'review', b'Review'), (b'archived', b'Archived'), (b'private', b'Private'), (b'published', b'Published')], default=b'private', max_length=20, verbose_name='State'),
),
]
| 28.285714
| 210
| 0.619529
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='state',
field=models.CharField(choices=[(b'review', b'Review'), (b'archived', b'Archived'), (b'private', b'Private'), (b'published', b'Published')], default=b'private', max_length=20, verbose_name='State'),
),
]
| true
| true
|
f7094d7d2b07f9b1f3da84df7d2308db1dff55eb
| 1,474
|
py
|
Python
|
benchmark/experimental_vectors.py
|
guyang3532/text
|
e2fc987ff6a002018040cffac5e0d61c3d0b06c6
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/experimental_vectors.py
|
guyang3532/text
|
e2fc987ff6a002018040cffac5e0d61c3d0b06c6
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/experimental_vectors.py
|
guyang3532/text
|
e2fc987ff6a002018040cffac5e0d61c3d0b06c6
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import torch
from torchtext.experimental.datasets import AG_NEWS
from torchtext.experimental.vectors import FastText as FastTextExperimental
from torchtext.vocab import FastText
def benchmark_experimental_vectors():
def _run_benchmark_lookup(tokens, vector):
t0 = time.monotonic()
for token in tokens:
vector[token]
print("Lookup time:", time.monotonic() - t0)
train, = AG_NEWS(data_select='train')
vocab = train.get_vocab()
tokens = []
for (label, text) in train:
for id in text.tolist():
tokens.append(vocab.itos[id])
# existing FastText construction
print("Existing FastText - Not Jit Mode")
t0 = time.monotonic()
fast_text = FastText()
print("Construction time:", time.monotonic() - t0)
_run_benchmark_lookup(tokens, fast_text)
# experimental FastText construction
print("FastText Experimental")
t0 = time.monotonic()
fast_text_experimental = FastTextExperimental(validate_file=False)
print("Construction time:", time.monotonic() - t0)
# not jit lookup
print("FastText Experimental - Not Jit Mode")
_run_benchmark_lookup(tokens, fast_text_experimental)
# jit lookup
print("FastText Experimental - Jit Mode")
jit_fast_text_experimental = torch.jit.script(fast_text_experimental)
_run_benchmark_lookup(tokens, jit_fast_text_experimental)
if __name__ == "__main__":
benchmark_experimental_vectors()
| 30.708333
| 75
| 0.713026
|
import time
import torch
from torchtext.experimental.datasets import AG_NEWS
from torchtext.experimental.vectors import FastText as FastTextExperimental
from torchtext.vocab import FastText
def benchmark_experimental_vectors():
def _run_benchmark_lookup(tokens, vector):
t0 = time.monotonic()
for token in tokens:
vector[token]
print("Lookup time:", time.monotonic() - t0)
train, = AG_NEWS(data_select='train')
vocab = train.get_vocab()
tokens = []
for (label, text) in train:
for id in text.tolist():
tokens.append(vocab.itos[id])
print("Existing FastText - Not Jit Mode")
t0 = time.monotonic()
fast_text = FastText()
print("Construction time:", time.monotonic() - t0)
_run_benchmark_lookup(tokens, fast_text)
print("FastText Experimental")
t0 = time.monotonic()
fast_text_experimental = FastTextExperimental(validate_file=False)
print("Construction time:", time.monotonic() - t0)
print("FastText Experimental - Not Jit Mode")
_run_benchmark_lookup(tokens, fast_text_experimental)
print("FastText Experimental - Jit Mode")
jit_fast_text_experimental = torch.jit.script(fast_text_experimental)
_run_benchmark_lookup(tokens, jit_fast_text_experimental)
if __name__ == "__main__":
benchmark_experimental_vectors()
| true
| true
|
f7094f7293ea2a022771f9ba3ca7252c82e92c07
| 6,012
|
py
|
Python
|
tacker/db/migration/cli.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | 1
|
2020-10-13T05:20:38.000Z
|
2020-10-13T05:20:38.000Z
|
tacker/db/migration/cli.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
tacker/db/migration/cli.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from tacker.db.migration.models import head # noqa
from tacker.db.migration import purge_tables
HEAD_FILENAME = 'HEAD'
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_db_opts, 'database')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_head_file(config)
def do_upgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
revision = '+%s' % str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
update_head_file(config)
def validate_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
if (os.path.isfile(head_path) and
open(head_path).read().strip() == script.get_current_head()):
return
else:
alembic_util.err(_('HEAD file does not match migration timeline head'))
def update_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
with open(head_path, 'w+') as f:
f.write(script.get_current_head())
def purge_deleted(config, cmd):
"""Remove database records that have been previously soft deleted."""
purge_tables.purge_deleted(config.tacker_config,
CONF.command.resource,
CONF.command.age,
CONF.command.granularity)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
parser = subparsers.add_parser('upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
parser = subparsers.add_parser('purge_deleted')
parser.set_defaults(func=purge_deleted)
# positional parameter
parser.add_argument(
'resource',
choices=['all', 'events', 'vnf', 'vnfd', 'vims'],
help=_('Resource name for which deleted entries are to be purged.'))
# optional parameter, can be skipped. default='90'
parser.add_argument('-a', '--age', nargs='?', default='90',
help=_('How long to preserve deleted data,'
'defaults to 90'))
# optional parameter, can be skipped. default='days'
parser.add_argument(
'-g', '--granularity', default='days',
choices=['days', 'hours', 'minutes', 'seconds'],
help=_('Granularity to use for age argument, defaults to days.'))
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'tacker.db.migration:alembic_migrations')
# attach the Tacker conf to the Alembic conf
config.tacker_config = CONF
CONF()
# TODO(gongysh) enable logging
CONF.command.func(config, CONF.command.name)
| 33.775281
| 79
| 0.66018
|
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import script as alembic_script
from alembic import util as alembic_util
from oslo_config import cfg
from tacker.db.migration.models import head from tacker.db.migration import purge_tables
HEAD_FILENAME = 'HEAD'
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
secret=True,
help=_('URL to database')),
cfg.StrOpt('engine',
default='',
help=_('Database engine')),
]
CONF = cfg.ConfigOpts()
CONF.register_cli_opts(_db_opts, 'database')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
validate_head_file(config)
def do_upgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
revision = '+%s' % str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
update_head_file(config)
def validate_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
if (os.path.isfile(head_path) and
open(head_path).read().strip() == script.get_current_head()):
return
else:
alembic_util.err(_('HEAD file does not match migration timeline head'))
def update_head_file(config):
script = alembic_script.ScriptDirectory.from_config(config)
if len(script.get_heads()) > 1:
alembic_util.err(_('Timeline branches unable to generate timeline'))
head_path = os.path.join(script.versions, HEAD_FILENAME)
with open(head_path, 'w+') as f:
f.write(script.get_current_head())
def purge_deleted(config, cmd):
purge_tables.purge_deleted(config.tacker_config,
CONF.command.resource,
CONF.command.age,
CONF.command.granularity)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
parser = subparsers.add_parser('upgrade')
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
parser = subparsers.add_parser('purge_deleted')
parser.set_defaults(func=purge_deleted)
parser.add_argument(
'resource',
choices=['all', 'events', 'vnf', 'vnfd', 'vims'],
help=_('Resource name for which deleted entries are to be purged.'))
parser.add_argument('-a', '--age', nargs='?', default='90',
help=_('How long to preserve deleted data,'
'defaults to 90'))
parser.add_argument(
'-g', '--granularity', default='days',
choices=['days', 'hours', 'minutes', 'seconds'],
help=_('Granularity to use for age argument, defaults to days.'))
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini')
)
config.set_main_option('script_location',
'tacker.db.migration:alembic_migrations')
config.tacker_config = CONF
CONF()
CONF.command.func(config, CONF.command.name)
| true
| true
|
f7095169f139557a45e67599ce006c31ba8e3471
| 157
|
py
|
Python
|
python/pip_package/__init__.py
|
kanishkg/lab
|
a9a3b5c38ad160ffd2e77a3af3e13c6e66eed457
|
[
"CC-BY-4.0"
] | 16
|
2019-02-27T22:37:25.000Z
|
2021-11-08T12:39:33.000Z
|
python/pip_package/__init__.py
|
kanishkg/lab
|
a9a3b5c38ad160ffd2e77a3af3e13c6e66eed457
|
[
"CC-BY-4.0"
] | null | null | null |
python/pip_package/__init__.py
|
kanishkg/lab
|
a9a3b5c38ad160ffd2e77a3af3e13c6e66eed457
|
[
"CC-BY-4.0"
] | 7
|
2019-05-28T06:26:26.000Z
|
2021-11-27T16:33:55.000Z
|
"""Loads deepmind_lab.so."""
import imp
import pkg_resources
imp.load_dynamic(__name__, pkg_resources.resource_filename(
__name__, 'deepmind_lab.so'))
| 19.625
| 59
| 0.77707
|
import imp
import pkg_resources
imp.load_dynamic(__name__, pkg_resources.resource_filename(
__name__, 'deepmind_lab.so'))
| true
| true
|
f70951dfc2d5ffb763356ce62e348b225daba092
| 468
|
py
|
Python
|
sigmapiweb/apps/PartyListV2/migrations/0003_partyguest_hasprepartyaccess.py
|
Jacobvs/sigmapi-web
|
ca8d5a5294385fe5f4634c483a1278df904e2f85
|
[
"MIT"
] | null | null | null |
sigmapiweb/apps/PartyListV2/migrations/0003_partyguest_hasprepartyaccess.py
|
Jacobvs/sigmapi-web
|
ca8d5a5294385fe5f4634c483a1278df904e2f85
|
[
"MIT"
] | null | null | null |
sigmapiweb/apps/PartyListV2/migrations/0003_partyguest_hasprepartyaccess.py
|
Jacobvs/sigmapi-web
|
ca8d5a5294385fe5f4634c483a1278df904e2f85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-05 02:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("PartyListV2", "0002_restrictedguest"),
]
operations = [
migrations.AddField(
model_name="partyguest",
name="hasPrepartyAccess",
field=models.BooleanField(default=False),
),
]
| 22.285714
| 53
| 0.630342
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("PartyListV2", "0002_restrictedguest"),
]
operations = [
migrations.AddField(
model_name="partyguest",
name="hasPrepartyAccess",
field=models.BooleanField(default=False),
),
]
| true
| true
|
f70953a63bbc0820445c238e63d7232ef154338a
| 3,503
|
py
|
Python
|
python/phonenumbers/data/region_HR.py
|
Eyepea/python-phonenumbers
|
0336e191fda80a21ed5c19d5e029ad8c70f620ee
|
[
"Apache-2.0"
] | 2
|
2019-03-30T02:12:54.000Z
|
2021-03-08T18:59:40.000Z
|
python/phonenumbers/data/region_HR.py
|
Eyepea/python-phonenumbers
|
0336e191fda80a21ed5c19d5e029ad8c70f620ee
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/data/region_HR.py
|
Eyepea/python-phonenumbers
|
0336e191fda80a21ed5c19d5e029ad8c70f620ee
|
[
"Apache-2.0"
] | 1
|
2018-11-10T03:47:34.000Z
|
2018-11-10T03:47:34.000Z
|
"""Auto-generated file, do not edit by hand. HR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HR = PhoneMetadata(id='HR', country_code=385, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-7]\\d{5,8}|[89]\\d{6,11}', possible_number_pattern='\\d{6,12}'),
fixed_line=PhoneNumberDesc(national_number_pattern='1\\d{7}|(?:2[0-3]|3[1-5]|4[02-47-9]|5[1-3])\\d{6}', possible_number_pattern='\\d{6,8}', example_number='12345678'),
mobile=PhoneNumberDesc(national_number_pattern='9[1257-9]\\d{6,10}', possible_number_pattern='\\d{8,12}', example_number='912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{4,7}', possible_number_pattern='\\d{7,10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='6(?:[09]\\d{7}|[145]\\d{4,7})', possible_number_pattern='\\d{6,9}', example_number='611234'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='7[45]\\d{4,7}', possible_number_pattern='\\d{6,9}', example_number='741234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='62\\d{6,7}', possible_number_pattern='\\d{8,9}', example_number='62123456'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(1)(\\d{4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(6[09])(\\d{4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[09]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(62)(\\d{3})(\\d{3,4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['62'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='([2-5]\\d)(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['[2-5]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3})(\\d{3,4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{4})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3,4})(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3 \\4', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2,3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[145]|7'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[145]|7'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(80[01])(\\d{2})(\\d{2,3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(80[01])(\\d{3,4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule=u'0\\1')],
mobile_number_portable_region=True)
| 113
| 171
| 0.68684
|
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HR = PhoneMetadata(id='HR', country_code=385, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-7]\\d{5,8}|[89]\\d{6,11}', possible_number_pattern='\\d{6,12}'),
fixed_line=PhoneNumberDesc(national_number_pattern='1\\d{7}|(?:2[0-3]|3[1-5]|4[02-47-9]|5[1-3])\\d{6}', possible_number_pattern='\\d{6,8}', example_number='12345678'),
mobile=PhoneNumberDesc(national_number_pattern='9[1257-9]\\d{6,10}', possible_number_pattern='\\d{8,12}', example_number='912345678'),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{4,7}', possible_number_pattern='\\d{7,10}', example_number='8001234567'),
premium_rate=PhoneNumberDesc(national_number_pattern='6(?:[09]\\d{7}|[145]\\d{4,7})', possible_number_pattern='\\d{6,9}', example_number='611234'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='7[45]\\d{4,7}', possible_number_pattern='\\d{6,9}', example_number='741234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='62\\d{6,7}', possible_number_pattern='\\d{8,9}', example_number='62123456'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(1)(\\d{4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(6[09])(\\d{4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[09]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(62)(\\d{3})(\\d{3,4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['62'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='([2-5]\\d)(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['[2-5]'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3})(\\d{3,4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{4})(\\d{4})', format=u'\\1 \\2 \\3', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(9\\d)(\\d{3,4})(\\d{3})(\\d{3})', format=u'\\1 \\2 \\3 \\4', leading_digits_pattern=['9'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2,3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[145]|7'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['6[145]|7'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(80[01])(\\d{2})(\\d{2,3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule=u'0\\1'),
NumberFormat(pattern='(80[01])(\\d{3,4})(\\d{3})', format=u'\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule=u'0\\1')],
mobile_number_portable_region=True)
| true
| true
|
f7095508ece920b6fcd73d161319d0dad95e7721
| 3,451
|
py
|
Python
|
misago/misago/legal/admin/views.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/misago/legal/admin/views.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
misago/misago/legal/admin/views.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
from django.contrib import messages
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ...admin.views import generic
from ..models import Agreement
from .forms import AgreementForm, FilterAgreementsForm
from .utils import disable_agreement, set_agreement_as_active
class AgreementAdmin(generic.AdminBaseMixin):
root_link = "misago:admin:settings:agreements:index"
model = Agreement
form_class = AgreementForm
templates_dir = "misago/admin/agreements"
message_404 = _("Requested agreement does not exist.")
def handle_form(self, form, request, target):
form.save()
if self.message_submit:
messages.success(
request, self.message_submit % {"title": target.get_final_title()}
)
class AgreementsList(AgreementAdmin, generic.ListView):
items_per_page = 30
ordering = [("-id", _("From newest")), ("id", _("From oldest"))]
filter_form = FilterAgreementsForm
selection_label = _("With agreements: 0")
empty_selection_label = _("Select agreements")
mass_actions = [
{
"action": "delete",
"name": _("Delete agreements"),
"confirmation": _("Are you sure you want to delete those agreements?"),
}
]
def get_queryset(self):
qs = super().get_queryset()
return qs.select_related()
def action_delete(self, request, items):
items.delete()
Agreement.objects.invalidate_cache()
messages.success(request, _("Selected agreements have been deleted."))
class NewAgreement(AgreementAdmin, generic.ModelFormView):
message_submit = _('New agreement "%(title)s" has been saved.')
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.instance.set_created_by(request.user)
form.instance.save()
Agreement.objects.invalidate_cache()
class EditAgreement(AgreementAdmin, generic.ModelFormView):
message_submit = _('Agreement "%(title)s" has been edited.')
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.instance.last_modified_on = timezone.now()
form.instance.set_last_modified_by(request.user)
form.instance.save()
Agreement.objects.invalidate_cache()
class DeleteAgreement(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
target.delete()
Agreement.objects.invalidate_cache()
message = _('Agreement "%(title)s" has been deleted.')
messages.success(request, message % {"title": target.get_final_title()})
class SetAgreementAsActive(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
set_agreement_as_active(target, commit=True)
message = _('Agreement "%(title)s" has been set as active for type "%(type)s".')
targets_names = {
"title": target.get_final_title(),
"type": target.get_type_display(),
}
messages.success(request, message % targets_names)
class DisableAgreement(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
disable_agreement(target, commit=True)
message = _('Agreement "%(title)s" has been disabled.') % {
"title": target.get_final_title()
}
messages.success(request, message)
| 33.833333
| 88
| 0.676615
|
from django.contrib import messages
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ...admin.views import generic
from ..models import Agreement
from .forms import AgreementForm, FilterAgreementsForm
from .utils import disable_agreement, set_agreement_as_active
class AgreementAdmin(generic.AdminBaseMixin):
root_link = "misago:admin:settings:agreements:index"
model = Agreement
form_class = AgreementForm
templates_dir = "misago/admin/agreements"
message_404 = _("Requested agreement does not exist.")
def handle_form(self, form, request, target):
form.save()
if self.message_submit:
messages.success(
request, self.message_submit % {"title": target.get_final_title()}
)
class AgreementsList(AgreementAdmin, generic.ListView):
items_per_page = 30
ordering = [("-id", _("From newest")), ("id", _("From oldest"))]
filter_form = FilterAgreementsForm
selection_label = _("With agreements: 0")
empty_selection_label = _("Select agreements")
mass_actions = [
{
"action": "delete",
"name": _("Delete agreements"),
"confirmation": _("Are you sure you want to delete those agreements?"),
}
]
def get_queryset(self):
qs = super().get_queryset()
return qs.select_related()
def action_delete(self, request, items):
items.delete()
Agreement.objects.invalidate_cache()
messages.success(request, _("Selected agreements have been deleted."))
class NewAgreement(AgreementAdmin, generic.ModelFormView):
message_submit = _('New agreement "%(title)s" has been saved.')
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.instance.set_created_by(request.user)
form.instance.save()
Agreement.objects.invalidate_cache()
class EditAgreement(AgreementAdmin, generic.ModelFormView):
message_submit = _('Agreement "%(title)s" has been edited.')
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.instance.last_modified_on = timezone.now()
form.instance.set_last_modified_by(request.user)
form.instance.save()
Agreement.objects.invalidate_cache()
class DeleteAgreement(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
target.delete()
Agreement.objects.invalidate_cache()
message = _('Agreement "%(title)s" has been deleted.')
messages.success(request, message % {"title": target.get_final_title()})
class SetAgreementAsActive(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
set_agreement_as_active(target, commit=True)
message = _('Agreement "%(title)s" has been set as active for type "%(type)s".')
targets_names = {
"title": target.get_final_title(),
"type": target.get_type_display(),
}
messages.success(request, message % targets_names)
class DisableAgreement(AgreementAdmin, generic.ButtonView):
def button_action(self, request, target):
disable_agreement(target, commit=True)
message = _('Agreement "%(title)s" has been disabled.') % {
"title": target.get_final_title()
}
messages.success(request, message)
| true
| true
|
f70955228f7c65d8e4a1e24999a745383cb9b48b
| 397
|
py
|
Python
|
apiwrapper/wsgi.py
|
drinkingjava/coingeckoapiwrapper
|
e95fcc3a80504f60aada4875f896ed5195ab351d
|
[
"MIT"
] | null | null | null |
apiwrapper/wsgi.py
|
drinkingjava/coingeckoapiwrapper
|
e95fcc3a80504f60aada4875f896ed5195ab351d
|
[
"MIT"
] | null | null | null |
apiwrapper/wsgi.py
|
drinkingjava/coingeckoapiwrapper
|
e95fcc3a80504f60aada4875f896ed5195ab351d
|
[
"MIT"
] | null | null | null |
"""
WSGI config for apiwrapper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiwrapper.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiwrapper.settings')
application = get_wsgi_application()
| true
| true
|
f709555143c36af3622b8322e5e065810a378a8a
| 132
|
py
|
Python
|
profiles_api/urls.py
|
karthiksar15/UdemyPython1
|
d8b96935629f22df76c41ff6cd7a91b3c99b2b19
|
[
"MIT"
] | null | null | null |
profiles_api/urls.py
|
karthiksar15/UdemyPython1
|
d8b96935629f22df76c41ff6cd7a91b3c99b2b19
|
[
"MIT"
] | null | null | null |
profiles_api/urls.py
|
karthiksar15/UdemyPython1
|
d8b96935629f22df76c41ff6cd7a91b3c99b2b19
|
[
"MIT"
] | null | null | null |
from django.urls import path
from profiles_api import views
urlpatterns =[
path('hello-view/',views.HelloApiView.as_view())
]
| 16.5
| 52
| 0.75
|
from django.urls import path
from profiles_api import views
urlpatterns =[
path('hello-view/',views.HelloApiView.as_view())
]
| true
| true
|
f709560f12383812ac7749a0819306cdeb93570b
| 2,529
|
py
|
Python
|
sdks/python/apache_beam/io/external/generate_sequence.py
|
eyal0/beam
|
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
|
[
"Apache-2.0"
] | 35
|
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
sdks/python/apache_beam/io/external/generate_sequence.py
|
eyal0/beam
|
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
|
[
"Apache-2.0"
] | 80
|
2020-01-16T09:55:09.000Z
|
2020-10-03T13:43:07.000Z
|
sdks/python/apache_beam/io/external/generate_sequence.py
|
eyal0/beam
|
9c6922976cc2a5c6a2ef836c1986ff769cda99a5
|
[
"Apache-2.0"
] | 88
|
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
class GenerateSequence(ExternalTransform):
"""
An external PTransform which provides a bounded or unbounded stream of
integers.
Note: To use this transform, you need to start the Java expansion service.
Please refer to the portability documentation on how to do that. The
expansion service address has to be provided when instantiating this
transform. During pipeline translation this transform will be replaced by
the Java SDK's GenerateSequence.
If you start Flink's job server, the expansion service will be started on
port 8097. This is also the configured default for this transform. For a
different address, please set the expansion_service parameter.
For more information see:
- https://beam.apache.org/documentation/runners/flink/
- https://beam.apache.org/roadmap/portability/
Note: Runners need to support translating Read operations in order to use
this source. At the moment only the Flink Runner supports this.
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:generate_sequence:v1'
def __init__(
self,
start,
stop=None,
elements_per_period=None,
max_read_time=None,
expansion_service=None):
super(GenerateSequence, self).__init__(
self.URN,
ImplicitSchemaPayloadBuilder({
'start': start,
'stop': stop,
'elements_per_period': elements_per_period,
'max_read_time': max_read_time,
}),
expansion_service)
| 37.191176
| 78
| 0.740214
|
from __future__ import absolute_import
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
class GenerateSequence(ExternalTransform):
URN = 'beam:external:java:generate_sequence:v1'
def __init__(
self,
start,
stop=None,
elements_per_period=None,
max_read_time=None,
expansion_service=None):
super(GenerateSequence, self).__init__(
self.URN,
ImplicitSchemaPayloadBuilder({
'start': start,
'stop': stop,
'elements_per_period': elements_per_period,
'max_read_time': max_read_time,
}),
expansion_service)
| true
| true
|
f709562b3b66244cdea52b42ea35831e94e2b9a0
| 7,079
|
py
|
Python
|
aiml50/source/setup_pipeline.py
|
nswitanek/ignite-learning-paths-training-aiml
|
018968f413f6c1aa11230c802f785fdaea54e480
|
[
"CC-BY-4.0",
"MIT"
] | 203
|
2019-10-07T10:44:09.000Z
|
2021-11-08T09:21:17.000Z
|
aiml50/source/setup_pipeline.py
|
nswitanek/ignite-learning-paths-training-aiml
|
018968f413f6c1aa11230c802f785fdaea54e480
|
[
"CC-BY-4.0",
"MIT"
] | 53
|
2019-10-08T15:15:04.000Z
|
2020-11-23T16:29:39.000Z
|
aiml50/source/setup_pipeline.py
|
nswitanek/ignite-learning-paths-training-aiml
|
018968f413f6c1aa11230c802f785fdaea54e480
|
[
"CC-BY-4.0",
"MIT"
] | 210
|
2019-10-04T14:41:49.000Z
|
2021-11-04T23:05:22.000Z
|
import azureml
from azureml.core import VERSION
from azureml.core import Workspace, Experiment, Datastore, Environment
from azureml.core.runconfig import RunConfiguration
from azureml.data.datapath import DataPath, DataPathComputeBinding
from azureml.data.data_reference import DataReference
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import PythonScriptStep, EstimatorStep
from azureml.train.estimator import Estimator
import sys, getopt, os
## Get arguments
def printhelp():
print ('Arguments:')
print (' -d Data Store name')
print (' -p Data Store Path')
print (' -c Compute Target name')
print (' -v Universal Package version (for deployment and inferencing code)')
print (' -s Azure Subscription id')
print (' -a Storage Account name')
print (' -k Storage Account key')
print (' -r Resource Group name')
print (' -w Machine Learning workspace name')
datastorename=''
datastorepath=''
computetarget=''
packageversion=''
workspace_name=''
subscription_id=''
resource_group=''
storage_account=''
storage_account_key=''
try:
print('Arguments: ', sys.argv[1:])
opts, args = getopt.getopt(sys.argv[1:],"d:p:c:v:s:a:k:r:w:")
except getopt.GetoptError:
printhelp
for opt, arg in opts:
if opt == '-h':
printhelp
elif opt == '-d':
datastorename = arg
elif opt == '-p':
datastorepath = arg
elif opt == '-c':
computetarget = arg
elif opt == '-v':
packageversion = arg
elif opt == '-s':
subscription_id = arg
elif opt == '-a':
storage_account = arg
elif opt == '-k':
storage_account_key = arg
elif opt == '-r':
resource_group = arg
elif opt == '-w':
workspace_name = arg
print("Azure ML SDK Version: ", VERSION)
#### Connect to our workspace ####
##################################
# workspace
ws = Workspace.get( name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group)
# data
ds = Datastore.register_azure_blob_container(workspace=ws,
datastore_name=datastorename,
container_name='seer-container',
account_name=storage_account,
account_key=storage_account_key,
create_if_not_exists=True)
datastore = ws.datastores[datastorename]
# compute target
try:
cpu_cluster = ComputeTarget(workspace=ws, name=computetarget)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
min_nodes=1,
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, computetarget, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
compute = ws.compute_targets[computetarget]
#### Define Pipeline! ####
##########################
# The following will be created and then run:
# 1. Pipeline Parameters
# 2. Data Process Step
# 3. Training Step
# 4. Model Registration Step
# 5. Pipeline registration
# 6. Submit the pipeline for execution
## Pipeline Parameters ##
# We need to tell the Pipeline what it needs to learn to see!
datapath = DataPath(datastore=datastore, path_on_datastore=datastorepath)
data_path_pipeline_param = (PipelineParameter(name="data",
default_value=datapath),
DataPathComputeBinding(mode='mount'))
# Configuration for data prep and training steps #
dataprepEnvironment = Environment.from_pip_requirements('dataprepenv', 'requirements-dataprepandtraining.txt')
dataprepRunConfig = RunConfiguration()
dataprepRunConfig.environment = dataprepEnvironment
## Data Process Step ##
# parse.py file parses the images in our data source #
seer_tfrecords = PipelineData(
"tfrecords_set",
datastore=datastore,
is_directory=True
)
prepStep = PythonScriptStep(
'parse.py',
source_directory='.',
name='Data Preparation',
compute_target=compute,
arguments=["--source_path", data_path_pipeline_param, "--target_path", seer_tfrecords],
runconfig=dataprepRunConfig,
inputs=[data_path_pipeline_param],
outputs=[seer_tfrecords],
allow_reuse=True # Allow reuse of the data prep step
)
## Training Step ##
# train.py does the training based on the processed data #
seer_training = PipelineData(
"train",
datastore=datastore,
is_directory=True
)
train = Estimator(source_directory='.',
compute_target=compute,
entry_script='train.py',
use_gpu=True,
pip_requirements_file='requirements-dataprepandtraining.txt')
trainStep = EstimatorStep(
name='Model Training',
estimator=train,
estimator_entry_script_arguments=["--source_path", seer_tfrecords,
"--target_path", seer_training,
"--epochs", 5, # Consider transfer learning. See line 111 in train.py file.
"--batch", 10,
"--lr", 0.001],
inputs=[seer_tfrecords],
outputs=[seer_training],
compute_target=compute
)
## Register Model Step ##
# Once training is complete, register.py registers the model with AML #
# Configuration for registration step #
registerEnvironment = Environment.from_pip_requirements('registerenv', 'requirements-registration.txt')
registerRunConfig = RunConfiguration()
registerRunConfig.environment = registerEnvironment
seer_model = PipelineData(
"model",
datastore=datastore,
is_directory=True
)
registerStep = PythonScriptStep(
'register.py',
source_directory='.',
name='Model Registration',
arguments=["--source_path", seer_training,
"--target_path", seer_model,
"--universal_package_version", packageversion],
inputs=[seer_training],
outputs=[seer_model],
compute_target=compute,
runconfig=registerRunConfig
)
## Create and publish the Pipeline ##
# We now define and publish the pipeline #
pipeline = Pipeline(workspace=ws, steps=[prepStep, trainStep, registerStep])
published_pipeline = pipeline.publish(
name="Seer Pipeline",
description="Transfer learned image classifier. Uses folders as labels.")
## Submit the pipeline to be run ##
# Finally, we submit the pipeline for execution #
pipeline_run = Experiment(ws, 'seer',).submit(published_pipeline, tags={'universalPackageVersion': packageversion})
print('Run created with ID: ', pipeline_run.id)
| 32.925581
| 115
| 0.647973
|
import azureml
from azureml.core import VERSION
from azureml.core import Workspace, Experiment, Datastore, Environment
from azureml.core.runconfig import RunConfiguration
from azureml.data.datapath import DataPath, DataPathComputeBinding
from azureml.data.data_reference import DataReference
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
from azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter
from azureml.pipeline.steps import PythonScriptStep, EstimatorStep
from azureml.train.estimator import Estimator
import sys, getopt, os
def printhelp():
print ('Arguments:')
print (' -d Data Store name')
print (' -p Data Store Path')
print (' -c Compute Target name')
print (' -v Universal Package version (for deployment and inferencing code)')
print (' -s Azure Subscription id')
print (' -a Storage Account name')
print (' -k Storage Account key')
print (' -r Resource Group name')
print (' -w Machine Learning workspace name')
datastorename=''
datastorepath=''
computetarget=''
packageversion=''
workspace_name=''
subscription_id=''
resource_group=''
storage_account=''
storage_account_key=''
try:
print('Arguments: ', sys.argv[1:])
opts, args = getopt.getopt(sys.argv[1:],"d:p:c:v:s:a:k:r:w:")
except getopt.GetoptError:
printhelp
for opt, arg in opts:
if opt == '-h':
printhelp
elif opt == '-d':
datastorename = arg
elif opt == '-p':
datastorepath = arg
elif opt == '-c':
computetarget = arg
elif opt == '-v':
packageversion = arg
elif opt == '-s':
subscription_id = arg
elif opt == '-a':
storage_account = arg
elif opt == '-k':
storage_account_key = arg
elif opt == '-r':
resource_group = arg
elif opt == '-w':
workspace_name = arg
print("Azure ML SDK Version: ", VERSION)
ws = Workspace.get( name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group)
ds = Datastore.register_azure_blob_container(workspace=ws,
datastore_name=datastorename,
container_name='seer-container',
account_name=storage_account,
account_key=storage_account_key,
create_if_not_exists=True)
datastore = ws.datastores[datastorename]
try:
cpu_cluster = ComputeTarget(workspace=ws, name=computetarget)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',
min_nodes=1,
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, computetarget, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
compute = ws.compute_targets[computetarget]
datapath = DataPath(datastore=datastore, path_on_datastore=datastorepath)
data_path_pipeline_param = (PipelineParameter(name="data",
default_value=datapath),
DataPathComputeBinding(mode='mount'))
dataprepEnvironment = Environment.from_pip_requirements('dataprepenv', 'requirements-dataprepandtraining.txt')
dataprepRunConfig = RunConfiguration()
dataprepRunConfig.environment = dataprepEnvironment
seer_tfrecords = PipelineData(
"tfrecords_set",
datastore=datastore,
is_directory=True
)
prepStep = PythonScriptStep(
'parse.py',
source_directory='.',
name='Data Preparation',
compute_target=compute,
arguments=["--source_path", data_path_pipeline_param, "--target_path", seer_tfrecords],
runconfig=dataprepRunConfig,
inputs=[data_path_pipeline_param],
outputs=[seer_tfrecords],
allow_reuse=True )
seer_training = PipelineData(
"train",
datastore=datastore,
is_directory=True
)
train = Estimator(source_directory='.',
compute_target=compute,
entry_script='train.py',
use_gpu=True,
pip_requirements_file='requirements-dataprepandtraining.txt')
trainStep = EstimatorStep(
name='Model Training',
estimator=train,
estimator_entry_script_arguments=["--source_path", seer_tfrecords,
"--target_path", seer_training,
"--epochs", 5, "--batch", 10,
"--lr", 0.001],
inputs=[seer_tfrecords],
outputs=[seer_training],
compute_target=compute
)
registerEnvironment = Environment.from_pip_requirements('registerenv', 'requirements-registration.txt')
registerRunConfig = RunConfiguration()
registerRunConfig.environment = registerEnvironment
seer_model = PipelineData(
"model",
datastore=datastore,
is_directory=True
)
registerStep = PythonScriptStep(
'register.py',
source_directory='.',
name='Model Registration',
arguments=["--source_path", seer_training,
"--target_path", seer_model,
"--universal_package_version", packageversion],
inputs=[seer_training],
outputs=[seer_model],
compute_target=compute,
runconfig=registerRunConfig
)
pipeline = Pipeline(workspace=ws, steps=[prepStep, trainStep, registerStep])
published_pipeline = pipeline.publish(
name="Seer Pipeline",
description="Transfer learned image classifier. Uses folders as labels.")
pipeline_run = Experiment(ws, 'seer',).submit(published_pipeline, tags={'universalPackageVersion': packageversion})
print('Run created with ID: ', pipeline_run.id)
| true
| true
|
f709570944e58ff82295df04816197313a44f4f1
| 1,286
|
py
|
Python
|
cnns/nnlib/robustness/pni/code/models/nomarlization_layer.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | 1
|
2018-03-25T13:19:46.000Z
|
2018-03-25T13:19:46.000Z
|
cnns/nnlib/robustness/pni/code/models/nomarlization_layer.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | null | null | null |
cnns/nnlib/robustness/pni/code/models/nomarlization_layer.py
|
anonymous-user-commits/perturb-net
|
66fc7c4a1234fa34b92bcc85751f0a6e23d80a23
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class Normalize_layer(nn.Module):
def __init__(self, mean, std):
super(Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),
requires_grad=False)
def forward(self, input):
return input.sub(self.mean).div(self.std)
class noise_Normalize_layer(nn.Module):
def __init__(self, mean, std, input_noise=False):
super(noise_Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.input_noise = input_noise
self.alpha_i = nn.Parameter(torch.Tensor([0.25]), requires_grad=True)
def forward(self, input):
output = input.sub(self.mean).div(self.std)
input_std = output.std().item()
input_noise = output.clone().normal_(0, input_std)
return output + input_noise * self.alpha_i * self.input_noise
| 33.842105
| 78
| 0.618196
|
import torch
import torch.nn as nn
class Normalize_layer(nn.Module):
def __init__(self, mean, std):
super(Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),
requires_grad=False)
def forward(self, input):
return input.sub(self.mean).div(self.std)
class noise_Normalize_layer(nn.Module):
def __init__(self, mean, std, input_noise=False):
super(noise_Normalize_layer, self).__init__()
self.mean = nn.Parameter(torch.Tensor(mean).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.std = nn.Parameter(torch.Tensor(std).unsqueeze(1).unsqueeze(1),
requires_grad=False)
self.input_noise = input_noise
self.alpha_i = nn.Parameter(torch.Tensor([0.25]), requires_grad=True)
def forward(self, input):
output = input.sub(self.mean).div(self.std)
input_std = output.std().item()
input_noise = output.clone().normal_(0, input_std)
return output + input_noise * self.alpha_i * self.input_noise
| true
| true
|
f709582419a8e95e38133c411de04161dcccc9b3
| 19,859
|
py
|
Python
|
modeling/backbones/resnet.py
|
yihui-he2020/epipolar-transformers
|
6824f4345b2998500fbacd0f4e30f67f8e3da7b8
|
[
"MIT"
] | 360
|
2020-03-30T07:15:45.000Z
|
2022-03-04T14:08:04.000Z
|
modeling/backbones/resnet.py
|
yihui-he2020/epipolar-transformers
|
6824f4345b2998500fbacd0f4e30f67f8e3da7b8
|
[
"MIT"
] | 30
|
2020-05-12T11:12:20.000Z
|
2021-12-31T05:49:10.000Z
|
modeling/backbones/resnet.py
|
yihui-he2020/epipolar-transformers
|
6824f4345b2998500fbacd0f4e30f67f8e3da7b8
|
[
"MIT"
] | 38
|
2020-05-12T05:33:46.000Z
|
2022-01-25T22:27:45.000Z
|
import logging
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from modeling.layers.epipolar import Epipolar
from modeling import registry
from core import cfg
from .basic_batch import find_tensor_peak_batch
from utils.logger import setup_logger
from utils.model_serialization import load_state_dict
# logger = logging.getLogger(__name__)
logger = setup_logger("resnet", cfg.FOLDER_NAME)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_channels = 512 * block.expansion
#self.fc = nn.Linear(self.out_channels, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):
if norm_layer is None:
norm_layer = nn.BatchNorm2d
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
@registry.BACKBONES.register('R-18')
def resnet18(cfg, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
@registry.BACKBONES.register('R-34')
def resnet34(cfg, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
return model
@registry.BACKBONES.register('R-50')
def resnet50(cfg, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
@registry.BACKBONES.register('R-101')
def resnet101(cfg, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)
return model
@registry.BACKBONES.register('R-152')
def resnet152(cfg, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)
return model
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Written by Chunyu Wang (chnuwa@microsoft.com), modified by Yihui He
# ------------------------------------------------------------------------------
class PoseResNet(nn.Module):
def __init__(self, block, layers, cfg, **kwargs):
if cfg.BACKBONE.BN_MOMENTUM < 0:
self.BN_MOMENTUM = None
else:
self.BN_MOMENTUM = cfg.BACKBONE.BN_MOMENTUM
DECONV_WITH_BIAS = False
NUM_DECONV_LAYERS = 3
NUM_DECONV_FILTERS = [256, 256, 256]
NUM_DECONV_KERNELS = [4, 4, 4]
FINAL_CONV_KERNEL = 1 #cfg.POSE_RESNET.FINAL_CONV_KERNEL
self.inplanes = 64
self.deconv_with_bias = DECONV_WITH_BIAS
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=self.BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
NUM_DECONV_LAYERS,
NUM_DECONV_FILTERS,
NUM_DECONV_KERNELS,
)
self.final_layer = nn.Conv2d(
in_channels=NUM_DECONV_FILTERS[-1],
out_channels=cfg.KEYPOINT.NUM_PTS,
kernel_size=FINAL_CONV_KERNEL,
stride=1,
padding=1 if FINAL_CONV_KERNEL == 3 else 0
)
if 'epipolarpose' in cfg.BACKBONE.BODY:
if cfg.EPIPOLAR.MERGE == 'both':
self.epipolar_sampler1 = Epipolar()
self.epipolar_sampler = Epipolar()
else:
self.epipolar_sampler = None
self.epipolar_sampler1 = None
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=self.BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=self.BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x, other_inputs=[None, None, None, None, None, None, None]):
batch_size = x.shape[0]
other_features, other_KRT, other_heatmaps, KRT, camera, other_camera, other_img = other_inputs
features, heatmaps, batch_locs, batch_scos, corr_poss, depths = [], [], [], [], [], []
# 3 x 256 x 256
x = self.conv1(x)
# 128 x 128
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
# 256 x 64 x 64
def getOtherFeat(feat, sampler=None):
# skip feature aggregation for last layer
corr_pos = None
depth = None
if other_features is None:
# normal hourglass
return feat, None, None, None
if 'epipolarpose' in cfg.BACKBONE.BODY:
ret, corr_pos, depth, sample_locs = \
sampler(feat, other_features, KRT, other_KRT, \
camera=camera, other_camera=other_camera)
return ret + feat, corr_pos, depth, sample_locs
if cfg.EPIPOLAR.MERGE == 'early':
feature = x
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)
depths.append(depth)
corr_poss.append(corr_pos)
elif cfg.EPIPOLAR.MERGE == 'both':
feature = x
x, _, _, _ = getOtherFeat(feature, sampler=self.epipolar_sampler)
x = self.layer2(x)
# 512 x 32 × 32
x = self.layer3(x)
# 1024 x 16 × 16
x = self.layer4(x)
# 2048 x 8 x 8
feature = self.deconv_layers(x)
#256 x 64 x 64
if cfg.EPIPOLAR.MERGE == 'late':
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)
depths.append(depth)
corr_poss.append(corr_pos)
elif cfg.EPIPOLAR.MERGE == 'both':
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler1)
depths.append(depth)
corr_poss.append(corr_pos)
else:
x = feature
#20 x 64 x 64
heatmaps.append(self.final_layer(x))
# The location of the current batch
for ibatch in range(batch_size):
batch_location, batch_score = find_tensor_peak_batch(heatmaps[-1][ibatch],
cfg.KEYPOINT.SIGMA,
cfg.BACKBONE.DOWNSAMPLE)
batch_locs.append(batch_location)
batch_scos.append(batch_score)
batch_locs, batch_scos = torch.stack(batch_locs), torch.stack(batch_scos)
if other_features is None:
corr_poss, depths = None, None
else:
corr_poss = corr_poss[-1]
depths = depths[-1]
return feature, heatmaps, batch_locs, batch_scos, corr_poss, depths, sample_locs, None
def init_weights(self, pretrained=None):
if pretrained is not None:
if isinstance(pretrained, str) and os.path.isfile(pretrained):
logger.info('=> loading pretrained model {}'.format(pretrained))
pretrained_state_dict = torch.load(pretrained)
else:
logger.info('=> loading pretrained model from web')
pretrained_state_dict = pretrained
logger.info('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#load_state_dict(self, pretrained_state_dict, prefix='resnet.')
#load_state_dict(self, pretrained_state_dict, prefix='backbone.')
load_state_dict(self, pretrained_state_dict, strict=False, ignored_layers=['final_layer.bias', 'final_layer.weight'], prefix=cfg.WEIGHTS_PREFIX, prefix_replace=cfg.WEIGHTS_PREFIX_REPLACE)
#self.load_state_dict(pretrained_state_dict, strict=False)
else:
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
# nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
resnet_spec = {'18': (BasicBlock, [2, 2, 2, 2]),
'34': (BasicBlock, [3, 4, 6, 3]),
'50': (Bottleneck, [3, 4, 6, 3]),
'101': (Bottleneck, [3, 4, 23, 3]),
'152': (Bottleneck, [3, 8, 36, 3])}
@registry.BACKBONES.register('poseR-18')
@registry.BACKBONES.register('poseR-34')
@registry.BACKBONES.register('poseR-50')
@registry.BACKBONES.register('poseR-101')
@registry.BACKBONES.register('poseR-152')
@registry.BACKBONES.register('epipolarposeR-18')
@registry.BACKBONES.register('epipolarposeR-34')
@registry.BACKBONES.register('epipolarposeR-50')
@registry.BACKBONES.register('epipolarposeR-101')
@registry.BACKBONES.register('epipolarposeR-152')
def get_pose_net(cfg, **kwargs):
num_layers = cfg.BACKBONE.BODY.split('-')[-1]
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, cfg, **kwargs)
if cfg.BACKBONE.PRETRAINED:
# model.init_weights(cfg.NETWORK.PRETRAINED)
if cfg.BACKBONE.PRETRAINED_WEIGHTS:
model.init_weights(cfg.BACKBONE.PRETRAINED_WEIGHTS)
else:
model.init_weights(model_zoo.load_url(model_urls['resnet'+num_layers]))
return model
| 38.190385
| 199
| 0.599376
|
import logging
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from modeling.layers.epipolar import Epipolar
from modeling import registry
from core import cfg
from .basic_batch import find_tensor_peak_batch
from utils.logger import setup_logger
from utils.model_serialization import load_state_dict
logger = setup_logger("resnet", cfg.FOLDER_NAME)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_channels = 512 * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):
if norm_layer is None:
norm_layer = nn.BatchNorm2d
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
@registry.BACKBONES.register('R-18')
def resnet18(cfg, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
@registry.BACKBONES.register('R-34')
def resnet34(cfg, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
return model
@registry.BACKBONES.register('R-50')
def resnet50(cfg, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
@registry.BACKBONES.register('R-101')
def resnet101(cfg, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)
return model
@registry.BACKBONES.register('R-152')
def resnet152(cfg, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if cfg.BACKBONE.PRETRAINED:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)
return model
class PoseResNet(nn.Module):
def __init__(self, block, layers, cfg, **kwargs):
if cfg.BACKBONE.BN_MOMENTUM < 0:
self.BN_MOMENTUM = None
else:
self.BN_MOMENTUM = cfg.BACKBONE.BN_MOMENTUM
DECONV_WITH_BIAS = False
NUM_DECONV_LAYERS = 3
NUM_DECONV_FILTERS = [256, 256, 256]
NUM_DECONV_KERNELS = [4, 4, 4]
FINAL_CONV_KERNEL = 1 self.inplanes = 64
self.deconv_with_bias = DECONV_WITH_BIAS
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=self.BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.deconv_layers = self._make_deconv_layer(
NUM_DECONV_LAYERS,
NUM_DECONV_FILTERS,
NUM_DECONV_KERNELS,
)
self.final_layer = nn.Conv2d(
in_channels=NUM_DECONV_FILTERS[-1],
out_channels=cfg.KEYPOINT.NUM_PTS,
kernel_size=FINAL_CONV_KERNEL,
stride=1,
padding=1 if FINAL_CONV_KERNEL == 3 else 0
)
if 'epipolarpose' in cfg.BACKBONE.BODY:
if cfg.EPIPOLAR.MERGE == 'both':
self.epipolar_sampler1 = Epipolar()
self.epipolar_sampler = Epipolar()
else:
self.epipolar_sampler = None
self.epipolar_sampler1 = None
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=self.BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=self.BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x, other_inputs=[None, None, None, None, None, None, None]):
batch_size = x.shape[0]
other_features, other_KRT, other_heatmaps, KRT, camera, other_camera, other_img = other_inputs
features, heatmaps, batch_locs, batch_scos, corr_poss, depths = [], [], [], [], [], []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
def getOtherFeat(feat, sampler=None):
corr_pos = None
depth = None
if other_features is None:
return feat, None, None, None
if 'epipolarpose' in cfg.BACKBONE.BODY:
ret, corr_pos, depth, sample_locs = \
sampler(feat, other_features, KRT, other_KRT, \
camera=camera, other_camera=other_camera)
return ret + feat, corr_pos, depth, sample_locs
if cfg.EPIPOLAR.MERGE == 'early':
feature = x
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)
depths.append(depth)
corr_poss.append(corr_pos)
elif cfg.EPIPOLAR.MERGE == 'both':
feature = x
x, _, _, _ = getOtherFeat(feature, sampler=self.epipolar_sampler)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
feature = self.deconv_layers(x)
if cfg.EPIPOLAR.MERGE == 'late':
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler)
depths.append(depth)
corr_poss.append(corr_pos)
elif cfg.EPIPOLAR.MERGE == 'both':
x, corr_pos, depth, sample_locs = getOtherFeat(feature, sampler=self.epipolar_sampler1)
depths.append(depth)
corr_poss.append(corr_pos)
else:
x = feature
heatmaps.append(self.final_layer(x))
for ibatch in range(batch_size):
batch_location, batch_score = find_tensor_peak_batch(heatmaps[-1][ibatch],
cfg.KEYPOINT.SIGMA,
cfg.BACKBONE.DOWNSAMPLE)
batch_locs.append(batch_location)
batch_scos.append(batch_score)
batch_locs, batch_scos = torch.stack(batch_locs), torch.stack(batch_scos)
if other_features is None:
corr_poss, depths = None, None
else:
corr_poss = corr_poss[-1]
depths = depths[-1]
return feature, heatmaps, batch_locs, batch_scos, corr_poss, depths, sample_locs, None
def init_weights(self, pretrained=None):
if pretrained is not None:
if isinstance(pretrained, str) and os.path.isfile(pretrained):
logger.info('=> loading pretrained model {}'.format(pretrained))
pretrained_state_dict = torch.load(pretrained)
else:
logger.info('=> loading pretrained model from web')
pretrained_state_dict = pretrained
logger.info('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
load_state_dict(self, pretrained_state_dict, strict=False, ignored_layers=['final_layer.bias', 'final_layer.weight'], prefix=cfg.WEIGHTS_PREFIX, prefix_replace=cfg.WEIGHTS_PREFIX_REPLACE)
else:
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
resnet_spec = {'18': (BasicBlock, [2, 2, 2, 2]),
'34': (BasicBlock, [3, 4, 6, 3]),
'50': (Bottleneck, [3, 4, 6, 3]),
'101': (Bottleneck, [3, 4, 23, 3]),
'152': (Bottleneck, [3, 8, 36, 3])}
@registry.BACKBONES.register('poseR-18')
@registry.BACKBONES.register('poseR-34')
@registry.BACKBONES.register('poseR-50')
@registry.BACKBONES.register('poseR-101')
@registry.BACKBONES.register('poseR-152')
@registry.BACKBONES.register('epipolarposeR-18')
@registry.BACKBONES.register('epipolarposeR-34')
@registry.BACKBONES.register('epipolarposeR-50')
@registry.BACKBONES.register('epipolarposeR-101')
@registry.BACKBONES.register('epipolarposeR-152')
def get_pose_net(cfg, **kwargs):
num_layers = cfg.BACKBONE.BODY.split('-')[-1]
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, cfg, **kwargs)
if cfg.BACKBONE.PRETRAINED:
if cfg.BACKBONE.PRETRAINED_WEIGHTS:
model.init_weights(cfg.BACKBONE.PRETRAINED_WEIGHTS)
else:
model.init_weights(model_zoo.load_url(model_urls['resnet'+num_layers]))
return model
| true
| true
|
f7095a4b8f156351c6d348572d6e6f9a2c1cc9f1
| 1,173
|
py
|
Python
|
st2client/st2client/models/keyvalue.py
|
kkkanil/st2
|
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
|
[
"Apache-2.0"
] | null | null | null |
st2client/st2client/models/keyvalue.py
|
kkkanil/st2
|
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
|
[
"Apache-2.0"
] | 15
|
2021-02-11T22:58:54.000Z
|
2021-08-06T18:03:47.000Z
|
st2client/st2client/models/keyvalue.py
|
kkkanil/st2
|
07cd195d7a6e177a37dd019e5c9ab8329259d0fa
|
[
"Apache-2.0"
] | 1
|
2021-07-10T15:02:29.000Z
|
2021-07-10T15:02:29.000Z
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from st2client.models import core
LOG = logging.getLogger(__name__)
class KeyValuePair(core.Resource):
_alias = 'Key'
_display_name = 'Key Value Pair'
_plural = 'Keys'
_plural_display_name = 'Key Value Pairs'
_repr_attributes = ['name', 'value']
# Note: This is a temporary hack until we refactor client and make it support non id PKs
def get_id(self):
return self.name
def set_id(self, value):
pass
id = property(get_id, set_id)
| 28.609756
| 92
| 0.729753
|
from __future__ import absolute_import
import logging
from st2client.models import core
LOG = logging.getLogger(__name__)
class KeyValuePair(core.Resource):
_alias = 'Key'
_display_name = 'Key Value Pair'
_plural = 'Keys'
_plural_display_name = 'Key Value Pairs'
_repr_attributes = ['name', 'value']
def get_id(self):
return self.name
def set_id(self, value):
pass
id = property(get_id, set_id)
| true
| true
|
f7095a719021448b51dcc63041132d4d9b8b1f68
| 10,043
|
py
|
Python
|
didipack/latex_table.py
|
AntoineDidisheim/didipack
|
9c9266bf248cae79e6ffddd98b7e573108abaa57
|
[
"MIT"
] | null | null | null |
didipack/latex_table.py
|
AntoineDidisheim/didipack
|
9c9266bf248cae79e6ffddd98b7e573108abaa57
|
[
"MIT"
] | null | null | null |
didipack/latex_table.py
|
AntoineDidisheim/didipack
|
9c9266bf248cae79e6ffddd98b7e573108abaa57
|
[
"MIT"
] | 1
|
2021-02-08T09:26:04.000Z
|
2021-02-08T09:26:04.000Z
|
import pandas as pd
import numpy as np
import statsmodels.api as sm
from enum import Enum
class ParValue(Enum):
TSTAT = 1
PVALUE = 2
STD = 3
class OneReg:
def __init__(self, reg, show_list=[], hide_list=[], blocks=[], bottom_blocks=[]):
self.reg = reg
if show_list == []:
self.show_list = []
for s in self.reg.params.keys():
if s not in hide_list:
self.show_list.append(s)
else:
self.show_list = show_list
self.blocks = blocks
self.bottom_block = bottom_blocks
def create_columns(self):
# first add the parameters of the reg
d = pd.Series(dtype=object)
for k in self.show_list:
if k in self.reg.params.keys():
v = self.reg.pvalues[k]
p = f'{np.round(self.reg.params[k], TableReg.round):,}'
for tr in TableReg.sign_tr:
if v <= tr:
p += '*'
# self = table_2.reg_list[0]
# update the v to be tstat or std depending on parameters
if TableReg.par_value == ParValue.TSTAT:
v = self.reg.tvalues[k]
if TableReg.par_value == ParValue.STD:
v = self.reg.bse[k]
v = r'(' + f'{np.round(v, TableReg.round):,}' + r')'
v_l = [len(x) for x in v.split('.')]
p_l = [len(x) for x in p.split('.')]
t = abs(v_l[0] - p_l[0])
t = r'\phantom{' + '*' * t + '}'
if v_l[0] > p_l[0]:
p = t + p
if v_l[0] < p_l[0]:
v = t + v
t = abs(v_l[1] - p_l[1])
t = r'\phantom{' + '*' * t + '}'
if v_l[1] > p_l[1]:
p = p+t
if v_l[1] < p_l[1]:
v = v+t
# else:
# p = r'\phantom{(}' + p + r'\phantom{)}'
d[k] = p
t = pd.Series(dtype=object)
t[''] =v
d = d.append(t)
else:
t = pd.Series(dtype=object)
t[k] = TableReg.missing_symbol
t[''] = TableReg.missing_symbol
d = d.append(t)
# now we can add the "blocks", that is fix effects and others
for block in self.blocks:
t = pd.Series(dtype=object)
t[TableReg.group_key] = ''
for k in block.keys():
t[k] = block[k]
d = d.append(t)
# finaly additional info (r² and n.obs per default, but you can add anything through bottom blocks
if TableReg.show_obs | TableReg.show_r2 | (len(self.bottom_block)>0):
t = pd.Series(dtype=object)
t[TableReg.group_key] = ''
t['Observations'] = f'{int(self.reg.nobs):,}'
if hasattr(self.reg,'rsquared_adj'):
t[r'$R^2$'] = np.round(self.reg.rsquared_adj,TableReg.round_r2)
else:
t[r'Pseudo $R^2$'] = np.round(self.reg.prsquared,TableReg.round_r2)
first_block = True
for block in self.bottom_block:
if first_block:
first_block = False
else:
t[TableReg.group_key] = ''
t = pd.Series(dtype=object)
for k in block.keys():
t[k] = block[k]
d = d.append(t)
return d
class TableReg:
missing_symbol = ' '
par_value = ParValue.STD
round = 4
round_r2 = 4
sign_tr = [0.1, 0.05, 0.01]
show_obs = True
show_r2 = True
variable_skip = r'\smallskip'
group_key = 'asgeg'
group_skip = r'\medskip'
equal_lines = False
def __init__(self, **option):
self.reg_list = []
self.hide_list = []
self.order = []
self.df = None
self.final_show_list = []
self.show_only_list = []
self.col_groups = []
self.rename_dict = {}
if 'hide_list' in option:
assert type(option['hide_list']) == list, "The overall hide list has to be a list"
self.hide_list = option['hide_list']
if 'show_only_list' in option:
assert type(option['show_only_list']) == list, "The show only list has to be a list"
self.show_only_list = option['show_only_list']
if 'order' in option:
assert type(option['order']) == list, "The order has to be a list"
self.order = option['order']
if 'col_groups' in option:
self.set_col_groups(option['col_groups'])
if 'rename_dict' in option:
self.set_rename_dict(option['rename_dict'])
def set_rename_dict(self, rename_dict):
assert type(rename_dict) == dict, "The rename dict must be a dictionary"
self.rename_dict = rename_dict
def set_col_groups(self, groups):
assert type(groups) == list, "The col order has to be a list of list"
for group in groups:
assert type(group) == list, "Each col group must be a list ['name of group', first columne in the group (int), last col in group (int)]"
self.col_groups = groups
def add_reg(self, reg, show_list=[], hide_list=[], blocks=[],bottom_blocks=[]):
hide_list = hide_list + self.hide_list
self.reg_list.append(OneReg(reg, show_list, hide_list, blocks, bottom_blocks))
def update_show_list(self):
if len(self.show_only_list) == 0:
show_list = []
for oneReg in self.reg_list:
show_list = list(set(show_list + oneReg.show_list))
show_list = list(np.sort(show_list))
show_list = self.order + [x for x in show_list if x not in self.order]
else:
show_list = self.show_only_list
col = []
for oneReg in self.reg_list:
oneReg.show_list = show_list
col.append(oneReg.create_columns())
self.df = pd.concat(col,1)
self.df.columns = [r'\parboxc{c}{0.6cm}{('+str(int(i+1))+')}' for i in range(self.df.shape[1])]
self.df = self.df.rename(index=self.rename_dict)
self.final_show_list = show_list
self.final_show_list = pd.Series(self.final_show_list).replace(self.rename_dict).values.tolist()
self.tex=''
def create_tex(self):
self.update_show_list()
# writing the tex modification to include name templatess
tex = self.df.to_latex(escape=False)
cols = tex.split('\\begin{tabular}{')[1].split('}')[0]
rep = list(cols.replace('l','c'))
rep[0] = 'l'
tex = tex.replace(cols,''.join(rep))
if len(self.col_groups)>0:
# adding "group col names"
s = '\n '
s_line = '\n '
for g in self.col_groups:
s += '& \multicolumn{'+str(1+g[2]-g[1])+'}{c}{\parboxc{c}{0.6cm}{'+g[0]+'}}'
# s += '& \multicolumn{'+str(1+g[2]-g[1])+'}{c}{'+g[0]+'}'
s_line += r'\cmidrule(lr){'+str(g[1]+1)+'-'+str(g[2]+1)+'}'
s += r' \\'+'\n'
s_line += '\n'
ts = tex.split(r'\toprule')
tex = ts[0]+r'\toprule' + s +s_line+ ts[1]
ts = tex.split(r'\midrule')
tex = ts[0]+r'\midrule' + ts[1]
# adding the skip between variable
# first we extract the maxium length of a column on the first one
L = 0
for x in self.df.index:
L = max(L,len(x))
L+=1
for i in range(1,len(self.final_show_list)):
a = self.final_show_list[i]
a += ' '*(L-len(a))+'&'
ts = tex.split(a)
temp = ts[0][:-4] + TableReg.variable_skip + ts[0][-4:]
tex=temp+a+ts[1]
# processing the group skip
t = None
for item in tex.split("\n"):
if TableReg.group_key in item:
t = item
# replacing specific rule
if t is not None:
self.tex = tex.replace(t, TableReg.group_skip + r'\\')
else:
self.tex = tex
def save_tex(self, save_dir):
self.create_tex()
tex = self.tex
if TableReg.equal_lines:
tex=tex.replace(r'\toprule',r'\hline')
tex=tex.replace(r'\midrule',r'\hline')
tex=tex.replace(r'\bottomrule',r'\hline')
with open(save_dir,'w') as txt:
txt.write(tex)
@staticmethod
def create_panel_of_tables(table_list, name_list, save_dir):
numbers = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split()
title_list = []
for i in range(len(table_list)):
table_list[i].create_tex()
title_list.append('Panel '+numbers[i]+': '+name_list[i])
tex = table_list[0].tex
temp = r' \multicolumn{6}{c}{\parboxc{c}{0.7cm}{'+title_list[i]+r'}} \\'
ts = tex.split(r'\toprule')
tex = ts[0]+r'\toprule' +temp+r'\hline'+ts[1]
tex = tex.replace(r'\bottomrule','')
tex = tex.replace(r'\end{tabular}',r'asf')
tex = tex.replace('\\\\\n\nasf','\\bigskip \\\\ \n')
for i in range(1,len(table_list)):
t_tex = table_list[i].tex
temp = r' \multicolumn{6}{c}{\parboxc{c}{0.6cm}{' + title_list[i] + r'}} \\'
ts = t_tex.split(r'\toprule')
t_tex = ts[0] + r'\hline' + temp + r'\hline' + ts[1]
t = None
for item in t_tex.split("\n"):
if r'\begin{tabular}' in item:
t = item
t_tex = t_tex.replace(t,'')
if i+1 < len(table_list):
t_tex = t_tex.replace(r'\bottomrule','')
t_tex = t_tex.replace(r'\end{tabular}', r'asf')
t_tex = t_tex.replace('\\\\\n\nasf', '\\bigskip \\\\ \n')
tex +=t_tex
with open(save_dir,'w') as txt:
txt.write(tex)
| 34.393836
| 148
| 0.502738
|
import pandas as pd
import numpy as np
import statsmodels.api as sm
from enum import Enum
class ParValue(Enum):
TSTAT = 1
PVALUE = 2
STD = 3
class OneReg:
def __init__(self, reg, show_list=[], hide_list=[], blocks=[], bottom_blocks=[]):
self.reg = reg
if show_list == []:
self.show_list = []
for s in self.reg.params.keys():
if s not in hide_list:
self.show_list.append(s)
else:
self.show_list = show_list
self.blocks = blocks
self.bottom_block = bottom_blocks
def create_columns(self):
d = pd.Series(dtype=object)
for k in self.show_list:
if k in self.reg.params.keys():
v = self.reg.pvalues[k]
p = f'{np.round(self.reg.params[k], TableReg.round):,}'
for tr in TableReg.sign_tr:
if v <= tr:
p += '*'
if TableReg.par_value == ParValue.TSTAT:
v = self.reg.tvalues[k]
if TableReg.par_value == ParValue.STD:
v = self.reg.bse[k]
v = r'(' + f'{np.round(v, TableReg.round):,}' + r')'
v_l = [len(x) for x in v.split('.')]
p_l = [len(x) for x in p.split('.')]
t = abs(v_l[0] - p_l[0])
t = r'\phantom{' + '*' * t + '}'
if v_l[0] > p_l[0]:
p = t + p
if v_l[0] < p_l[0]:
v = t + v
t = abs(v_l[1] - p_l[1])
t = r'\phantom{' + '*' * t + '}'
if v_l[1] > p_l[1]:
p = p+t
if v_l[1] < p_l[1]:
v = v+t
d[k] = p
t = pd.Series(dtype=object)
t[''] =v
d = d.append(t)
else:
t = pd.Series(dtype=object)
t[k] = TableReg.missing_symbol
t[''] = TableReg.missing_symbol
d = d.append(t)
for block in self.blocks:
t = pd.Series(dtype=object)
t[TableReg.group_key] = ''
for k in block.keys():
t[k] = block[k]
d = d.append(t)
if TableReg.show_obs | TableReg.show_r2 | (len(self.bottom_block)>0):
t = pd.Series(dtype=object)
t[TableReg.group_key] = ''
t['Observations'] = f'{int(self.reg.nobs):,}'
if hasattr(self.reg,'rsquared_adj'):
t[r'$R^2$'] = np.round(self.reg.rsquared_adj,TableReg.round_r2)
else:
t[r'Pseudo $R^2$'] = np.round(self.reg.prsquared,TableReg.round_r2)
first_block = True
for block in self.bottom_block:
if first_block:
first_block = False
else:
t[TableReg.group_key] = ''
t = pd.Series(dtype=object)
for k in block.keys():
t[k] = block[k]
d = d.append(t)
return d
class TableReg:
missing_symbol = ' '
par_value = ParValue.STD
round = 4
round_r2 = 4
sign_tr = [0.1, 0.05, 0.01]
show_obs = True
show_r2 = True
variable_skip = r'\smallskip'
group_key = 'asgeg'
group_skip = r'\medskip'
equal_lines = False
def __init__(self, **option):
self.reg_list = []
self.hide_list = []
self.order = []
self.df = None
self.final_show_list = []
self.show_only_list = []
self.col_groups = []
self.rename_dict = {}
if 'hide_list' in option:
assert type(option['hide_list']) == list, "The overall hide list has to be a list"
self.hide_list = option['hide_list']
if 'show_only_list' in option:
assert type(option['show_only_list']) == list, "The show only list has to be a list"
self.show_only_list = option['show_only_list']
if 'order' in option:
assert type(option['order']) == list, "The order has to be a list"
self.order = option['order']
if 'col_groups' in option:
self.set_col_groups(option['col_groups'])
if 'rename_dict' in option:
self.set_rename_dict(option['rename_dict'])
def set_rename_dict(self, rename_dict):
assert type(rename_dict) == dict, "The rename dict must be a dictionary"
self.rename_dict = rename_dict
def set_col_groups(self, groups):
assert type(groups) == list, "The col order has to be a list of list"
for group in groups:
assert type(group) == list, "Each col group must be a list ['name of group', first columne in the group (int), last col in group (int)]"
self.col_groups = groups
def add_reg(self, reg, show_list=[], hide_list=[], blocks=[],bottom_blocks=[]):
hide_list = hide_list + self.hide_list
self.reg_list.append(OneReg(reg, show_list, hide_list, blocks, bottom_blocks))
def update_show_list(self):
if len(self.show_only_list) == 0:
show_list = []
for oneReg in self.reg_list:
show_list = list(set(show_list + oneReg.show_list))
show_list = list(np.sort(show_list))
show_list = self.order + [x for x in show_list if x not in self.order]
else:
show_list = self.show_only_list
col = []
for oneReg in self.reg_list:
oneReg.show_list = show_list
col.append(oneReg.create_columns())
self.df = pd.concat(col,1)
self.df.columns = [r'\parboxc{c}{0.6cm}{('+str(int(i+1))+')}' for i in range(self.df.shape[1])]
self.df = self.df.rename(index=self.rename_dict)
self.final_show_list = show_list
self.final_show_list = pd.Series(self.final_show_list).replace(self.rename_dict).values.tolist()
self.tex=''
def create_tex(self):
self.update_show_list()
tex = self.df.to_latex(escape=False)
cols = tex.split('\\begin{tabular}{')[1].split('}')[0]
rep = list(cols.replace('l','c'))
rep[0] = 'l'
tex = tex.replace(cols,''.join(rep))
if len(self.col_groups)>0:
s = '\n '
s_line = '\n '
for g in self.col_groups:
s += '& \multicolumn{'+str(1+g[2]-g[1])+'}{c}{\parboxc{c}{0.6cm}{'+g[0]+'}}'
s_line += r'\cmidrule(lr){'+str(g[1]+1)+'-'+str(g[2]+1)+'}'
s += r' \\'+'\n'
s_line += '\n'
ts = tex.split(r'\toprule')
tex = ts[0]+r'\toprule' + s +s_line+ ts[1]
ts = tex.split(r'\midrule')
tex = ts[0]+r'\midrule' + ts[1]
L = 0
for x in self.df.index:
L = max(L,len(x))
L+=1
for i in range(1,len(self.final_show_list)):
a = self.final_show_list[i]
a += ' '*(L-len(a))+'&'
ts = tex.split(a)
temp = ts[0][:-4] + TableReg.variable_skip + ts[0][-4:]
tex=temp+a+ts[1]
t = None
for item in tex.split("\n"):
if TableReg.group_key in item:
t = item
if t is not None:
self.tex = tex.replace(t, TableReg.group_skip + r'\\')
else:
self.tex = tex
def save_tex(self, save_dir):
self.create_tex()
tex = self.tex
if TableReg.equal_lines:
tex=tex.replace(r'\toprule',r'\hline')
tex=tex.replace(r'\midrule',r'\hline')
tex=tex.replace(r'\bottomrule',r'\hline')
with open(save_dir,'w') as txt:
txt.write(tex)
@staticmethod
def create_panel_of_tables(table_list, name_list, save_dir):
numbers = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'.split()
title_list = []
for i in range(len(table_list)):
table_list[i].create_tex()
title_list.append('Panel '+numbers[i]+': '+name_list[i])
tex = table_list[0].tex
temp = r' \multicolumn{6}{c}{\parboxc{c}{0.7cm}{'+title_list[i]+r'}} \\'
ts = tex.split(r'\toprule')
tex = ts[0]+r'\toprule' +temp+r'\hline'+ts[1]
tex = tex.replace(r'\bottomrule','')
tex = tex.replace(r'\end{tabular}',r'asf')
tex = tex.replace('\\\\\n\nasf','\\bigskip \\\\ \n')
for i in range(1,len(table_list)):
t_tex = table_list[i].tex
temp = r' \multicolumn{6}{c}{\parboxc{c}{0.6cm}{' + title_list[i] + r'}} \\'
ts = t_tex.split(r'\toprule')
t_tex = ts[0] + r'\hline' + temp + r'\hline' + ts[1]
t = None
for item in t_tex.split("\n"):
if r'\begin{tabular}' in item:
t = item
t_tex = t_tex.replace(t,'')
if i+1 < len(table_list):
t_tex = t_tex.replace(r'\bottomrule','')
t_tex = t_tex.replace(r'\end{tabular}', r'asf')
t_tex = t_tex.replace('\\\\\n\nasf', '\\bigskip \\\\ \n')
tex +=t_tex
with open(save_dir,'w') as txt:
txt.write(tex)
| true
| true
|
f7095a92645eb428a3155a581060c078485a7603
| 13,318
|
py
|
Python
|
src/stim/stabilizers/pauli_string_pybind_test.py
|
noajshu/Stim
|
503de420b1e56e90d7f44337ead1065a2ae26740
|
[
"Apache-2.0"
] | 99
|
2021-03-03T19:03:25.000Z
|
2022-03-22T11:39:08.000Z
|
src/stim/stabilizers/pauli_string_pybind_test.py
|
noajshu/Stim
|
503de420b1e56e90d7f44337ead1065a2ae26740
|
[
"Apache-2.0"
] | 95
|
2021-03-04T00:11:30.000Z
|
2022-03-30T08:53:44.000Z
|
src/stim/stabilizers/pauli_string_pybind_test.py
|
noajshu/Stim
|
503de420b1e56e90d7f44337ead1065a2ae26740
|
[
"Apache-2.0"
] | 20
|
2021-03-09T14:10:13.000Z
|
2022-03-15T04:40:12.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stim
import pytest
def test_identity():
p = stim.PauliString(3)
assert len(p) == 3
assert p[0] == p[1] == p[2] == 0
assert p.sign == +1
def test_from_str():
p = stim.PauliString("-_XYZ_ZYX")
assert len(p) == 8
assert p[0] == 0
assert p[1] == 1
assert p[2] == 2
assert p[3] == 3
assert p[4] == 0
assert p[5] == 3
assert p[6] == 2
assert p[7] == 1
assert p.sign == -1
p = stim.PauliString("")
assert len(p) == 0
assert p.sign == +1
p = stim.PauliString("X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("+X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("+iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("-iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == -1j
def test_equality():
assert not (stim.PauliString(4) == None)
assert not (stim.PauliString(4) == "other object")
assert not (stim.PauliString(4) == object())
assert stim.PauliString(4) != None
assert stim.PauliString(4) != "other object"
assert stim.PauliString(4) != object()
assert stim.PauliString(4) == stim.PauliString(4)
assert stim.PauliString(3) != stim.PauliString(4)
assert not (stim.PauliString(4) != stim.PauliString(4))
assert not (stim.PauliString(3) == stim.PauliString(4))
assert stim.PauliString("+X") == stim.PauliString("+X")
assert stim.PauliString("+X") != stim.PauliString("-X")
assert stim.PauliString("+X") != stim.PauliString("+Y")
assert stim.PauliString("+X") != stim.PauliString("-Y")
assert stim.PauliString("+X") != stim.PauliString("+iX")
assert stim.PauliString("+X") != stim.PauliString("-iX")
assert stim.PauliString("__") != stim.PauliString("_X")
assert stim.PauliString("__") != stim.PauliString("X_")
assert stim.PauliString("__") != stim.PauliString("XX")
assert stim.PauliString("__") == stim.PauliString("__")
def test_random():
p1 = stim.PauliString.random(100)
p2 = stim.PauliString.random(100)
assert p1 != p2
seen_signs = {stim.PauliString.random(1).sign for _ in range(200)}
assert seen_signs == {1, -1}
seen_signs = {stim.PauliString.random(1, allow_imaginary=True).sign for _ in range(200)}
assert seen_signs == {1, -1, 1j, -1j}
def test_str():
assert str(stim.PauliString(3)) == "+___"
assert str(stim.PauliString("XYZ")) == "+XYZ"
assert str(stim.PauliString("-XYZ")) == "-XYZ"
assert str(stim.PauliString("iXYZ")) == "+iXYZ"
assert str(stim.PauliString("-iXYZ")) == "-iXYZ"
def test_repr():
assert repr(stim.PauliString(3)) == 'stim.PauliString("+___")'
assert repr(stim.PauliString("-XYZ")) == 'stim.PauliString("-XYZ")'
vs = [
stim.PauliString(""),
stim.PauliString("ZXYZZ"),
stim.PauliString("-XYZ"),
stim.PauliString("I"),
stim.PauliString("iIXYZ"),
stim.PauliString("-iIXYZ"),
]
for v in vs:
r = repr(v)
assert eval(r, {'stim': stim}) == v
def test_commutes():
def c(a: str, b: str) -> bool:
return stim.PauliString(a).commutes(stim.PauliString(b))
assert c("", "")
assert c("X", "_")
assert c("X", "X")
assert not c("X", "Y")
assert not c("X", "Z")
assert c("XXXX", "YYYY")
assert c("XXXX", "YYYZ")
assert not c("XXXX", "XXXZ")
assert not c("XXXX", "___Z")
assert not c("XXXX", "Z___")
assert c("XXXX", "Z_Z_")
def test_product():
assert stim.PauliString("") * stim.PauliString("") == stim.PauliString("")
assert stim.PauliString("i") * stim.PauliString("i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-i") == stim.PauliString("+")
assert stim.PauliString("-i") * stim.PauliString("-i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-") == stim.PauliString("-i")
x = stim.PauliString("X")
y = stim.PauliString("Y")
z = stim.PauliString("Z")
assert x == +1 * x == x * +1 == +x
assert x * -1 == -x == -1 * x
assert (-x)[0] == 1
assert (-x).sign == -1
assert -(-x) == x
assert stim.PauliString(10) * stim.PauliString(11) == stim.PauliString(11)
assert x * z == stim.PauliString("-iY")
assert x * x == stim.PauliString(1)
assert x * y == stim.PauliString("iZ")
assert y * x == stim.PauliString("-iZ")
assert x * y == 1j * z
assert y * x == z * -1j
assert x.extended_product(y) == (1, 1j * z)
assert y.extended_product(x) == (1, -1j * z)
assert x.extended_product(x) == (1, stim.PauliString(1))
xx = stim.PauliString("+XX")
yy = stim.PauliString("+YY")
zz = stim.PauliString("+ZZ")
assert xx * zz == -yy
assert xx.extended_product(zz) == (1, -yy)
def test_inplace_product():
p = stim.PauliString("X")
alias = p
p *= 1j
assert alias == stim.PauliString("iX")
assert alias is p
p *= 1j
assert alias == stim.PauliString("-X")
p *= 1j
assert alias == stim.PauliString("-iX")
p *= 1j
assert alias == stim.PauliString("+X")
p *= stim.PauliString("Z")
assert alias == stim.PauliString("-iY")
p *= -1j
assert alias == stim.PauliString("-Y")
p *= -1j
assert alias == stim.PauliString("iY")
p *= -1j
assert alias == stim.PauliString("+Y")
p *= -1j
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-iY")
assert alias is p
def test_imaginary_phase():
p = stim.PauliString("IXYZ")
ip = stim.PauliString("iIXYZ")
assert 1j * p == p * 1j == ip == -stim.PauliString("-iIXYZ")
assert p.sign == 1
assert (-p).sign == -1
assert ip.sign == 1j
assert (-ip).sign == -1j
assert stim.PauliString("X") * stim.PauliString("Y") == 1j * stim.PauliString("Z")
assert stim.PauliString("Y") * stim.PauliString("X") == -1j * stim.PauliString("Z")
def test_get_set_sign():
p = stim.PauliString(2)
assert p.sign == +1
p.sign = -1
assert str(p) == "-__"
assert p.sign == -1
p.sign = +1
assert str(p) == "+__"
assert p.sign == +1
with pytest.raises(ValueError, match="new_sign"):
p.sign = 5
p.sign = 1j
assert str(p) == "+i__"
assert p.sign == 1j
p.sign = -1j
assert str(p) == "-i__"
assert p.sign == -1j
def test_get_set_item():
p = stim.PauliString(5)
assert list(p) == [0, 0, 0, 0, 0]
assert p[0] == 0
p[0] = 1
assert p[0] == 1
p[0] = 'Y'
assert p[0] == 2
p[0] = 'Z'
assert p[0] == 3
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 't'
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 10
assert p[1] == 0
p[1] = 2
assert p[1] == 2
def test_get_slice():
p = stim.PauliString("XXXX__YYYY__ZZZZX")
assert p[:7] == stim.PauliString("XXXX__Y")
assert p[:-3] == stim.PauliString("XXXX__YYYY__ZZ")
assert p[::2] == stim.PauliString("XX_YY_ZZX")
assert p[::-1] == stim.PauliString("XZZZZ__YYYY__XXXX")
assert p[-3:3] == stim.PauliString("")
assert p[-6:-1] == stim.PauliString("_ZZZZ")
assert p[3:5:-1] == stim.PauliString("")
assert p[5:3:-1] == stim.PauliString("__")
assert p[4:2:-1] == stim.PauliString("_X")
assert p[2:0:-1] == stim.PauliString("XX")
def test_copy():
p = stim.PauliString(3)
p2 = p.copy()
assert p == p2
assert p is not p2
p = stim.PauliString("-i_XYZ")
p2 = p.copy()
assert p == p2
assert p is not p2
def test_hash():
# stim.PauliString is mutable. It must not also be value-hashable.
# Defining __hash__ requires defining a FrozenPauliString variant instead.
with pytest.raises(TypeError, match="unhashable"):
_ = hash(stim.PauliString(1))
def test_add():
ps = stim.PauliString
assert ps(0) + ps(0) == ps(0)
assert ps(3) + ps(1000) == ps(1003)
assert ps(1000) + ps(3) == ps(1003)
assert ps("_XYZ") + ps("_ZZZ_") == ps("_XYZ_ZZZ_")
p = ps("_XYZ")
p += p
assert p == ps("_XYZ_XYZ")
for k in range(1, 8):
p += p
assert p == ps("_XYZ_XYZ" * 2**k)
p = ps("_XXX")
p += ps("Y")
assert p == ps("_XXXY")
p = ps("")
alias = p
p += ps("X")
assert alias is p
assert alias == ps("X")
p += p
assert alias is p
assert alias == ps("XX")
def test_mul_different_sizes():
ps = stim.PauliString
assert ps("") * ps("X" * 1000) == ps("X" * 1000)
assert ps("X" * 1000) * ps("") == ps("X" * 1000)
assert ps("Z" * 1000) * ps("") == ps("Z" * 1000)
p = ps("Z")
alias = p
p *= ps("ZZZ")
assert p == ps("_ZZ")
p *= ps("Z")
assert p == ps("ZZZ")
assert alias is p
def test_div():
assert stim.PauliString("+XYZ") / +1 == stim.PauliString("+XYZ")
assert stim.PauliString("+XYZ") / -1 == stim.PauliString("-XYZ")
assert stim.PauliString("+XYZ") / 1j == stim.PauliString("-iXYZ")
assert stim.PauliString("+XYZ") / -1j == stim.PauliString("iXYZ")
assert stim.PauliString("iXYZ") / 1j == stim.PauliString("XYZ")
p = stim.PauliString("__")
alias = p
assert p / -1 == stim.PauliString("-__")
assert alias == stim.PauliString("__")
p /= -1
assert alias == stim.PauliString("-__")
p /= 1j
assert alias == stim.PauliString("i__")
p /= 1j
assert alias == stim.PauliString("__")
p /= -1j
assert alias == stim.PauliString("i__")
p /= 1
assert alias == stim.PauliString("i__")
def test_mul_repeat():
ps = stim.PauliString
assert ps("") * 100 == ps("")
assert ps("X") * 100 == ps("X" * 100)
assert ps("XYZ_") * 1000 == ps("XYZ_" * 1000)
assert ps("XYZ_") * 1 == ps("XYZ_")
assert ps("XYZ_") * 0 == ps("")
assert 100 * ps("") == ps("")
assert 100 * ps("X") == ps("X" * 100)
assert 1000 * ps("XYZ_") == ps("XYZ_" * 1000)
assert 1 * ps("XYZ_") == ps("XYZ_")
assert 0 * ps("XYZ_") == ps("")
assert ps("i") * 0 == ps("+")
assert ps("i") * 1 == ps("i")
assert ps("i") * 2 == ps("-")
assert ps("i") * 3 == ps("-i")
assert ps("i") * 4 == ps("+")
assert ps("i") * 5 == ps("i")
assert ps("-i") * 0 == ps("+")
assert ps("-i") * 1 == ps("-i")
assert ps("-i") * 2 == ps("-")
assert ps("-i") * 3 == ps("i")
assert ps("-i") * 4 == ps("+")
assert ps("-i") * 5 == ps("-i")
assert ps("-") * 0 == ps("+")
assert ps("-") * 1 == ps("-")
assert ps("-") * 2 == ps("+")
assert ps("-") * 3 == ps("-")
assert ps("-") * 4 == ps("+")
assert ps("-") * 5 == ps("-")
p = ps("XYZ")
alias = p
p *= 1000
assert p == ps("XYZ" * 1000)
assert alias is p
def test_init_list():
assert stim.PauliString([]) == stim.PauliString(0)
assert stim.PauliString([0, 1, 2, 3]) == stim.PauliString("_XYZ")
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([-1])
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([4])
with pytest.raises(TypeError):
_ = stim.PauliString([2**500])
def test_init_copy():
p = stim.PauliString("_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
p = stim.PauliString("-i_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
def test_commutes_different_lengths():
x1000 = stim.PauliString("X" * 1000)
z1000 = stim.PauliString("Z" * 1000)
x1 = stim.PauliString("X")
z1 = stim.PauliString("Z")
assert x1.commutes(x1000)
assert x1000.commutes(x1)
assert z1.commutes(z1000)
assert z1000.commutes(z1)
assert not z1.commutes(x1000)
assert not x1000.commutes(z1)
assert not x1.commutes(z1000)
assert not z1000.commutes(x1)
def test_pickle():
import pickle
t = stim.PauliString.random(4)
a = pickle.dumps(t)
assert pickle.loads(a) == t
t = stim.PauliString("i_XYZ")
a = pickle.dumps(t)
assert pickle.loads(a) == t
| 28.037895
| 92
| 0.570431
|
import stim
import pytest
def test_identity():
p = stim.PauliString(3)
assert len(p) == 3
assert p[0] == p[1] == p[2] == 0
assert p.sign == +1
def test_from_str():
p = stim.PauliString("-_XYZ_ZYX")
assert len(p) == 8
assert p[0] == 0
assert p[1] == 1
assert p[2] == 2
assert p[3] == 3
assert p[4] == 0
assert p[5] == 3
assert p[6] == 2
assert p[7] == 1
assert p.sign == -1
p = stim.PauliString("")
assert len(p) == 0
assert p.sign == +1
p = stim.PauliString("X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("+X")
assert len(p) == 1
assert p[0] == 1
assert p.sign == +1
p = stim.PauliString("iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("+iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == 1j
p = stim.PauliString("-iX")
assert len(p) == 1
assert p[0] == 1
assert p.sign == -1j
def test_equality():
assert not (stim.PauliString(4) == None)
assert not (stim.PauliString(4) == "other object")
assert not (stim.PauliString(4) == object())
assert stim.PauliString(4) != None
assert stim.PauliString(4) != "other object"
assert stim.PauliString(4) != object()
assert stim.PauliString(4) == stim.PauliString(4)
assert stim.PauliString(3) != stim.PauliString(4)
assert not (stim.PauliString(4) != stim.PauliString(4))
assert not (stim.PauliString(3) == stim.PauliString(4))
assert stim.PauliString("+X") == stim.PauliString("+X")
assert stim.PauliString("+X") != stim.PauliString("-X")
assert stim.PauliString("+X") != stim.PauliString("+Y")
assert stim.PauliString("+X") != stim.PauliString("-Y")
assert stim.PauliString("+X") != stim.PauliString("+iX")
assert stim.PauliString("+X") != stim.PauliString("-iX")
assert stim.PauliString("__") != stim.PauliString("_X")
assert stim.PauliString("__") != stim.PauliString("X_")
assert stim.PauliString("__") != stim.PauliString("XX")
assert stim.PauliString("__") == stim.PauliString("__")
def test_random():
p1 = stim.PauliString.random(100)
p2 = stim.PauliString.random(100)
assert p1 != p2
seen_signs = {stim.PauliString.random(1).sign for _ in range(200)}
assert seen_signs == {1, -1}
seen_signs = {stim.PauliString.random(1, allow_imaginary=True).sign for _ in range(200)}
assert seen_signs == {1, -1, 1j, -1j}
def test_str():
assert str(stim.PauliString(3)) == "+___"
assert str(stim.PauliString("XYZ")) == "+XYZ"
assert str(stim.PauliString("-XYZ")) == "-XYZ"
assert str(stim.PauliString("iXYZ")) == "+iXYZ"
assert str(stim.PauliString("-iXYZ")) == "-iXYZ"
def test_repr():
assert repr(stim.PauliString(3)) == 'stim.PauliString("+___")'
assert repr(stim.PauliString("-XYZ")) == 'stim.PauliString("-XYZ")'
vs = [
stim.PauliString(""),
stim.PauliString("ZXYZZ"),
stim.PauliString("-XYZ"),
stim.PauliString("I"),
stim.PauliString("iIXYZ"),
stim.PauliString("-iIXYZ"),
]
for v in vs:
r = repr(v)
assert eval(r, {'stim': stim}) == v
def test_commutes():
def c(a: str, b: str) -> bool:
return stim.PauliString(a).commutes(stim.PauliString(b))
assert c("", "")
assert c("X", "_")
assert c("X", "X")
assert not c("X", "Y")
assert not c("X", "Z")
assert c("XXXX", "YYYY")
assert c("XXXX", "YYYZ")
assert not c("XXXX", "XXXZ")
assert not c("XXXX", "___Z")
assert not c("XXXX", "Z___")
assert c("XXXX", "Z_Z_")
def test_product():
assert stim.PauliString("") * stim.PauliString("") == stim.PauliString("")
assert stim.PauliString("i") * stim.PauliString("i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-i") == stim.PauliString("+")
assert stim.PauliString("-i") * stim.PauliString("-i") == stim.PauliString("-")
assert stim.PauliString("i") * stim.PauliString("-") == stim.PauliString("-i")
x = stim.PauliString("X")
y = stim.PauliString("Y")
z = stim.PauliString("Z")
assert x == +1 * x == x * +1 == +x
assert x * -1 == -x == -1 * x
assert (-x)[0] == 1
assert (-x).sign == -1
assert -(-x) == x
assert stim.PauliString(10) * stim.PauliString(11) == stim.PauliString(11)
assert x * z == stim.PauliString("-iY")
assert x * x == stim.PauliString(1)
assert x * y == stim.PauliString("iZ")
assert y * x == stim.PauliString("-iZ")
assert x * y == 1j * z
assert y * x == z * -1j
assert x.extended_product(y) == (1, 1j * z)
assert y.extended_product(x) == (1, -1j * z)
assert x.extended_product(x) == (1, stim.PauliString(1))
xx = stim.PauliString("+XX")
yy = stim.PauliString("+YY")
zz = stim.PauliString("+ZZ")
assert xx * zz == -yy
assert xx.extended_product(zz) == (1, -yy)
def test_inplace_product():
p = stim.PauliString("X")
alias = p
p *= 1j
assert alias == stim.PauliString("iX")
assert alias is p
p *= 1j
assert alias == stim.PauliString("-X")
p *= 1j
assert alias == stim.PauliString("-iX")
p *= 1j
assert alias == stim.PauliString("+X")
p *= stim.PauliString("Z")
assert alias == stim.PauliString("-iY")
p *= -1j
assert alias == stim.PauliString("-Y")
p *= -1j
assert alias == stim.PauliString("iY")
p *= -1j
assert alias == stim.PauliString("+Y")
p *= -1j
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("i_")
assert alias == stim.PauliString("-iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("iY")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("+Y")
p *= stim.PauliString("-i_")
assert alias == stim.PauliString("-iY")
assert alias is p
def test_imaginary_phase():
p = stim.PauliString("IXYZ")
ip = stim.PauliString("iIXYZ")
assert 1j * p == p * 1j == ip == -stim.PauliString("-iIXYZ")
assert p.sign == 1
assert (-p).sign == -1
assert ip.sign == 1j
assert (-ip).sign == -1j
assert stim.PauliString("X") * stim.PauliString("Y") == 1j * stim.PauliString("Z")
assert stim.PauliString("Y") * stim.PauliString("X") == -1j * stim.PauliString("Z")
def test_get_set_sign():
p = stim.PauliString(2)
assert p.sign == +1
p.sign = -1
assert str(p) == "-__"
assert p.sign == -1
p.sign = +1
assert str(p) == "+__"
assert p.sign == +1
with pytest.raises(ValueError, match="new_sign"):
p.sign = 5
p.sign = 1j
assert str(p) == "+i__"
assert p.sign == 1j
p.sign = -1j
assert str(p) == "-i__"
assert p.sign == -1j
def test_get_set_item():
p = stim.PauliString(5)
assert list(p) == [0, 0, 0, 0, 0]
assert p[0] == 0
p[0] = 1
assert p[0] == 1
p[0] = 'Y'
assert p[0] == 2
p[0] = 'Z'
assert p[0] == 3
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 't'
with pytest.raises(IndexError, match="new_pauli"):
p[0] = 10
assert p[1] == 0
p[1] = 2
assert p[1] == 2
def test_get_slice():
p = stim.PauliString("XXXX__YYYY__ZZZZX")
assert p[:7] == stim.PauliString("XXXX__Y")
assert p[:-3] == stim.PauliString("XXXX__YYYY__ZZ")
assert p[::2] == stim.PauliString("XX_YY_ZZX")
assert p[::-1] == stim.PauliString("XZZZZ__YYYY__XXXX")
assert p[-3:3] == stim.PauliString("")
assert p[-6:-1] == stim.PauliString("_ZZZZ")
assert p[3:5:-1] == stim.PauliString("")
assert p[5:3:-1] == stim.PauliString("__")
assert p[4:2:-1] == stim.PauliString("_X")
assert p[2:0:-1] == stim.PauliString("XX")
def test_copy():
p = stim.PauliString(3)
p2 = p.copy()
assert p == p2
assert p is not p2
p = stim.PauliString("-i_XYZ")
p2 = p.copy()
assert p == p2
assert p is not p2
def test_hash():
with pytest.raises(TypeError, match="unhashable"):
_ = hash(stim.PauliString(1))
def test_add():
ps = stim.PauliString
assert ps(0) + ps(0) == ps(0)
assert ps(3) + ps(1000) == ps(1003)
assert ps(1000) + ps(3) == ps(1003)
assert ps("_XYZ") + ps("_ZZZ_") == ps("_XYZ_ZZZ_")
p = ps("_XYZ")
p += p
assert p == ps("_XYZ_XYZ")
for k in range(1, 8):
p += p
assert p == ps("_XYZ_XYZ" * 2**k)
p = ps("_XXX")
p += ps("Y")
assert p == ps("_XXXY")
p = ps("")
alias = p
p += ps("X")
assert alias is p
assert alias == ps("X")
p += p
assert alias is p
assert alias == ps("XX")
def test_mul_different_sizes():
ps = stim.PauliString
assert ps("") * ps("X" * 1000) == ps("X" * 1000)
assert ps("X" * 1000) * ps("") == ps("X" * 1000)
assert ps("Z" * 1000) * ps("") == ps("Z" * 1000)
p = ps("Z")
alias = p
p *= ps("ZZZ")
assert p == ps("_ZZ")
p *= ps("Z")
assert p == ps("ZZZ")
assert alias is p
def test_div():
assert stim.PauliString("+XYZ") / +1 == stim.PauliString("+XYZ")
assert stim.PauliString("+XYZ") / -1 == stim.PauliString("-XYZ")
assert stim.PauliString("+XYZ") / 1j == stim.PauliString("-iXYZ")
assert stim.PauliString("+XYZ") / -1j == stim.PauliString("iXYZ")
assert stim.PauliString("iXYZ") / 1j == stim.PauliString("XYZ")
p = stim.PauliString("__")
alias = p
assert p / -1 == stim.PauliString("-__")
assert alias == stim.PauliString("__")
p /= -1
assert alias == stim.PauliString("-__")
p /= 1j
assert alias == stim.PauliString("i__")
p /= 1j
assert alias == stim.PauliString("__")
p /= -1j
assert alias == stim.PauliString("i__")
p /= 1
assert alias == stim.PauliString("i__")
def test_mul_repeat():
ps = stim.PauliString
assert ps("") * 100 == ps("")
assert ps("X") * 100 == ps("X" * 100)
assert ps("XYZ_") * 1000 == ps("XYZ_" * 1000)
assert ps("XYZ_") * 1 == ps("XYZ_")
assert ps("XYZ_") * 0 == ps("")
assert 100 * ps("") == ps("")
assert 100 * ps("X") == ps("X" * 100)
assert 1000 * ps("XYZ_") == ps("XYZ_" * 1000)
assert 1 * ps("XYZ_") == ps("XYZ_")
assert 0 * ps("XYZ_") == ps("")
assert ps("i") * 0 == ps("+")
assert ps("i") * 1 == ps("i")
assert ps("i") * 2 == ps("-")
assert ps("i") * 3 == ps("-i")
assert ps("i") * 4 == ps("+")
assert ps("i") * 5 == ps("i")
assert ps("-i") * 0 == ps("+")
assert ps("-i") * 1 == ps("-i")
assert ps("-i") * 2 == ps("-")
assert ps("-i") * 3 == ps("i")
assert ps("-i") * 4 == ps("+")
assert ps("-i") * 5 == ps("-i")
assert ps("-") * 0 == ps("+")
assert ps("-") * 1 == ps("-")
assert ps("-") * 2 == ps("+")
assert ps("-") * 3 == ps("-")
assert ps("-") * 4 == ps("+")
assert ps("-") * 5 == ps("-")
p = ps("XYZ")
alias = p
p *= 1000
assert p == ps("XYZ" * 1000)
assert alias is p
def test_init_list():
assert stim.PauliString([]) == stim.PauliString(0)
assert stim.PauliString([0, 1, 2, 3]) == stim.PauliString("_XYZ")
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([-1])
with pytest.raises(ValueError, match="pauli"):
_ = stim.PauliString([4])
with pytest.raises(TypeError):
_ = stim.PauliString([2**500])
def test_init_copy():
p = stim.PauliString("_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
p = stim.PauliString("-i_XYZ")
p2 = stim.PauliString(p)
assert p is not p2
assert p == p2
def test_commutes_different_lengths():
x1000 = stim.PauliString("X" * 1000)
z1000 = stim.PauliString("Z" * 1000)
x1 = stim.PauliString("X")
z1 = stim.PauliString("Z")
assert x1.commutes(x1000)
assert x1000.commutes(x1)
assert z1.commutes(z1000)
assert z1000.commutes(z1)
assert not z1.commutes(x1000)
assert not x1000.commutes(z1)
assert not x1.commutes(z1000)
assert not z1000.commutes(x1)
def test_pickle():
import pickle
t = stim.PauliString.random(4)
a = pickle.dumps(t)
assert pickle.loads(a) == t
t = stim.PauliString("i_XYZ")
a = pickle.dumps(t)
assert pickle.loads(a) == t
| true
| true
|
f7095b8e4b08b57406f35839c522b08a768a6216
| 1,113
|
py
|
Python
|
src/sumo_controller_node.py
|
ps-thunderatz/sumo_controller
|
635219bbb9841ed83a391be3142fb87efd22a461
|
[
"MIT"
] | null | null | null |
src/sumo_controller_node.py
|
ps-thunderatz/sumo_controller
|
635219bbb9841ed83a391be3142fb87efd22a461
|
[
"MIT"
] | 7
|
2021-08-28T18:54:58.000Z
|
2021-09-06T03:01:06.000Z
|
src/sumo_controller_node.py
|
ps-thunderatz/sumo_controller
|
635219bbb9841ed83a391be3142fb87efd22a461
|
[
"MIT"
] | 1
|
2021-09-05T18:59:05.000Z
|
2021-09-05T18:59:05.000Z
|
#!/usr/bin/env python3
"""Node para controlar um robô de sumô
File
-------
sumo_controller/src/sumo_controller_node.py
Authors
-------
ThundeRatz Team <comp@thunderatz.org>
"""
import rospy
from std_msgs.msg import Float64
CONTROL_RATE = 60 # Hz
def main():
""" Lógica principal do node de controle
"""
rospy.init_node("sumo_controller", disable_signals=True, anonymous=True)
rospy.loginfo(f"Node de controle iniciado {rospy.get_time()}")
rate = rospy.Rate(CONTROL_RATE)
# Inicialize os sensores e motores aqui
while not rospy.is_shutdown():
# Escreva aqui seu código para controlar o sumô
rate.sleep()
if __name__ == "__main__":
try:
main()
except (rospy.ROSInterruptException, KeyboardInterrupt):
pass
finally:
# Corrija o nome dos tópicos!!!
left_motor_pub = rospy.Publisher("topico/do/motor/esquerdo", Float64, queue_size=1)
right_motor_pub = rospy.Publisher("topico/do/motor/direito", Float64, queue_size=1)
left_motor_pub.publish(Float64(0))
right_motor_pub.publish(Float64(0))
| 24.195652
| 91
| 0.680144
|
import rospy
from std_msgs.msg import Float64
CONTROL_RATE = 60
def main():
rospy.init_node("sumo_controller", disable_signals=True, anonymous=True)
rospy.loginfo(f"Node de controle iniciado {rospy.get_time()}")
rate = rospy.Rate(CONTROL_RATE)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
try:
main()
except (rospy.ROSInterruptException, KeyboardInterrupt):
pass
finally:
left_motor_pub = rospy.Publisher("topico/do/motor/esquerdo", Float64, queue_size=1)
right_motor_pub = rospy.Publisher("topico/do/motor/direito", Float64, queue_size=1)
left_motor_pub.publish(Float64(0))
right_motor_pub.publish(Float64(0))
| true
| true
|
f7095bec43a635319555dc50a6ab8fae0442f341
| 9,403
|
py
|
Python
|
tests/tools/commands/test_venv.py
|
Eric-Arellano/pex
|
49955249ec3a8dfa8da5fd7da6a06b9666ae73c7
|
[
"Apache-2.0"
] | null | null | null |
tests/tools/commands/test_venv.py
|
Eric-Arellano/pex
|
49955249ec3a8dfa8da5fd7da6a06b9666ae73c7
|
[
"Apache-2.0"
] | null | null | null |
tests/tools/commands/test_venv.py
|
Eric-Arellano/pex
|
49955249ec3a8dfa8da5fd7da6a06b9666ae73c7
|
[
"Apache-2.0"
] | 1
|
2021-03-08T21:42:57.000Z
|
2021-03-08T21:42:57.000Z
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import os
import subprocess
import tempfile
from subprocess import CalledProcessError
from textwrap import dedent
import pytest
from pex.common import temporary_dir, touch
from pex.executor import Executor
from pex.testing import run_pex_command
from pex.tools.commands.virtualenv import Virtualenv
from pex.typing import TYPE_CHECKING
from pex.util import named_temporary_file
if TYPE_CHECKING:
from typing import Callable, Tuple, Any, Dict, Optional, Iterable
CreatePexVenv = Callable[[Tuple[str, ...]], Virtualenv]
FABRIC_VERSION = "2.5.0"
@pytest.fixture(scope="module")
def pex():
# type: () -> str
with temporary_dir() as tmpdir:
pex_path = os.path.join(tmpdir, "fabric.pex")
src_dir = os.path.join(tmpdir, "src")
touch(os.path.join(src_dir, "user/__init__.py"))
touch(os.path.join(src_dir, "user/package/__init__.py"))
# N.B.: --unzip just speeds up runs 2+ of the pex file and is otherwise not relevant to
# these tests.
run_pex_command(
args=[
"fabric=={}".format(FABRIC_VERSION),
"-c",
"fab",
"--sources-directory",
src_dir,
"-o",
pex_path,
"--unzip",
"--include-tools",
]
)
yield os.path.realpath(pex_path)
def make_env(**kwargs):
# type: (**Any) -> Dict[str, str]
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items())
return env
@pytest.fixture
def create_pex_venv(pex):
# type: (str) -> CreatePexVenv
with temporary_dir() as tmpdir:
venv_dir = os.path.join(tmpdir, "venv")
def _create_pex_venv(*options):
# type: (*str) -> Virtualenv
subprocess.check_call(
args=[pex, "venv", venv_dir] + list(options or ()), env=make_env(PEX_TOOLS="1")
)
return Virtualenv(venv_dir)
yield _create_pex_venv
def test_force(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv("--pip")
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
venv.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(CalledProcessError):
create_pex_venv()
venv_force = create_pex_venv("--force")
# The re-created venv should have no ansicolors installed like the prior venv.
with pytest.raises(Executor.NonZeroExit):
venv_force.interpreter.execute(args=["-c", "import colors"])
# The re-created venv should have no pip installed either.
with pytest.raises(Executor.NonZeroExit):
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
def execute_venv_pex_interpreter(
venv, # type: Virtualenv
code=None, # type: Optional[str]
extra_args=(), # type: Iterable[str]
**extra_env # type: Any
):
# type: (...) -> Tuple[int, str, str]
process = subprocess.Popen(
args=[venv.join_path("pex")] + list(extra_args),
env=make_env(PEX_INTERPRETER=True, **extra_env),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdout, stderr = process.communicate(input=None if code is None else code.encode())
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def expected_file_path(
venv, # type: Virtualenv
package, # type: str
):
# type: (...) -> str
return os.path.realpath(
os.path.join(
venv.site_packages_dir,
os.path.sep.join(package.split(".")),
"__init__.{ext}".format(ext="pyc" if venv.interpreter.version[0] == 2 else "py"),
)
)
def parse_fabric_version_output(output):
# type: (str) -> Dict[str, str]
return dict(line.split(" ", 1) for line in output.splitlines())
def test_venv_pex(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
venv_pex = venv.join_path("pex")
fabric_output = subprocess.check_output(args=[venv_pex, "-V"])
# N.B.: `fab -V` output looks like so:
# $ fab -V
# Fabric 2.5.0
# Paramiko 2.7.2
# Invoke 1.4.1
versions = parse_fabric_version_output(fabric_output.decode("utf-8"))
assert FABRIC_VERSION == versions["Fabric"]
invoke_version = "Invoke {}".format(versions["Invoke"])
invoke_script_output = subprocess.check_output(
args=[venv_pex, "-V"], env=make_env(PEX_SCRIPT="invoke")
)
assert invoke_version == invoke_script_output.decode("utf-8").strip()
invoke_entry_point_output = subprocess.check_output(
args=[venv_pex, "-V"],
env=make_env(PEX_MODULE="invoke.main:program.run"),
)
assert invoke_version == invoke_entry_point_output.decode("utf-8").strip()
pex_extra_sys_path = ["/dev/null", "Bob"]
returncode, _, stderr = execute_venv_pex_interpreter(
venv,
code=dedent(
"""\
from __future__ import print_function
import os
import sys
def assert_equal(test_num, expected, actual):
if expected == actual:
return
print(
"[{{}}] Expected {{}} but got {{}}".format(test_num, expected, actual),
file=sys.stderr,
)
sys.exit(test_num)
assert_equal(1, {pex_extra_sys_path!r}, sys.path[-2:])
import fabric
assert_equal(2, {fabric!r}, os.path.realpath(fabric.__file__))
import user.package
assert_equal(3, {user_package!r}, os.path.realpath(user.package.__file__))
""".format(
pex_extra_sys_path=pex_extra_sys_path,
fabric=expected_file_path(venv, "fabric"),
user_package=expected_file_path(venv, "user.package"),
)
),
PEX_EXTRA_SYS_PATH=os.pathsep.join(pex_extra_sys_path),
)
assert 0 == returncode, stderr
def test_binary_path(create_pex_venv):
# type: (CreatePexVenv) -> None
code = dedent(
"""\
import errno
import subprocess
import sys
# PEXed code should be able to find all (console) scripts on the $PATH when the venv is
# created with --bin-path set, and the scripts should all run with the venv interpreter in
# order to find their code.
def try_invoke(*args):
try:
subprocess.check_call(list(args))
return 0
except OSError as e:
if e.errno == errno.ENOENT:
# This is what we expect when scripts are not set up on PATH via --bin-path.
return 1
return 2
exit_code = try_invoke("fab", "-V")
exit_code += 10 * try_invoke("inv", "-V")
exit_code += 100 * try_invoke("invoke", "-V")
sys.exit(exit_code)
"""
)
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=code, PATH=tempfile.gettempdir()
)
assert 111 == returncode, stdout + stderr
venv_bin_path = create_pex_venv("-f", "--bin-path", "prepend")
returncode, _, _ = execute_venv_pex_interpreter(
venv_bin_path, code=code, PATH=tempfile.gettempdir()
)
assert 0 == returncode
def test_venv_pex_interpreter_special_modes(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
# special mode execute module: -m module
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-m"])
assert 2 == returncode, stderr
assert "" == stdout
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-m", "fabric", "--version"]
)
assert 0 == returncode, stderr
versions = parse_fabric_version_output(stdout)
assert FABRIC_VERSION == versions["Fabric"]
# special mode execute code string: -c <str>
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-c"])
assert 2 == returncode, stderr
assert "" == stdout
fabric_file_code = "import fabric, os; print(os.path.realpath(fabric.__file__))"
expected_fabric_file_path = expected_file_path(venv, "fabric")
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-c", fabric_file_code]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute stdin: -
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=["-"]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute python file: <py file name>
with named_temporary_file(prefix="code", suffix=".py", mode="w") as fp:
fp.write(fabric_file_code)
fp.close()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=[fp.name]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
| 32.536332
| 98
| 0.622461
|
from __future__ import absolute_import
import os
import subprocess
import tempfile
from subprocess import CalledProcessError
from textwrap import dedent
import pytest
from pex.common import temporary_dir, touch
from pex.executor import Executor
from pex.testing import run_pex_command
from pex.tools.commands.virtualenv import Virtualenv
from pex.typing import TYPE_CHECKING
from pex.util import named_temporary_file
if TYPE_CHECKING:
from typing import Callable, Tuple, Any, Dict, Optional, Iterable
CreatePexVenv = Callable[[Tuple[str, ...]], Virtualenv]
FABRIC_VERSION = "2.5.0"
@pytest.fixture(scope="module")
def pex():
with temporary_dir() as tmpdir:
pex_path = os.path.join(tmpdir, "fabric.pex")
src_dir = os.path.join(tmpdir, "src")
touch(os.path.join(src_dir, "user/__init__.py"))
touch(os.path.join(src_dir, "user/package/__init__.py"))
run_pex_command(
args=[
"fabric=={}".format(FABRIC_VERSION),
"-c",
"fab",
"--sources-directory",
src_dir,
"-o",
pex_path,
"--unzip",
"--include-tools",
]
)
yield os.path.realpath(pex_path)
def make_env(**kwargs):
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items())
return env
@pytest.fixture
def create_pex_venv(pex):
with temporary_dir() as tmpdir:
venv_dir = os.path.join(tmpdir, "venv")
def _create_pex_venv(*options):
subprocess.check_call(
args=[pex, "venv", venv_dir] + list(options or ()), env=make_env(PEX_TOOLS="1")
)
return Virtualenv(venv_dir)
yield _create_pex_venv
def test_force(create_pex_venv):
venv = create_pex_venv("--pip")
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
venv.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(CalledProcessError):
create_pex_venv()
venv_force = create_pex_venv("--force")
with pytest.raises(Executor.NonZeroExit):
venv_force.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(Executor.NonZeroExit):
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
def execute_venv_pex_interpreter(
venv, code=None, extra_args=(), **extra_env ):
process = subprocess.Popen(
args=[venv.join_path("pex")] + list(extra_args),
env=make_env(PEX_INTERPRETER=True, **extra_env),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdout, stderr = process.communicate(input=None if code is None else code.encode())
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def expected_file_path(
venv, package, ):
return os.path.realpath(
os.path.join(
venv.site_packages_dir,
os.path.sep.join(package.split(".")),
"__init__.{ext}".format(ext="pyc" if venv.interpreter.version[0] == 2 else "py"),
)
)
def parse_fabric_version_output(output):
return dict(line.split(" ", 1) for line in output.splitlines())
def test_venv_pex(create_pex_venv):
venv = create_pex_venv()
venv_pex = venv.join_path("pex")
fabric_output = subprocess.check_output(args=[venv_pex, "-V"])
versions = parse_fabric_version_output(fabric_output.decode("utf-8"))
assert FABRIC_VERSION == versions["Fabric"]
invoke_version = "Invoke {}".format(versions["Invoke"])
invoke_script_output = subprocess.check_output(
args=[venv_pex, "-V"], env=make_env(PEX_SCRIPT="invoke")
)
assert invoke_version == invoke_script_output.decode("utf-8").strip()
invoke_entry_point_output = subprocess.check_output(
args=[venv_pex, "-V"],
env=make_env(PEX_MODULE="invoke.main:program.run"),
)
assert invoke_version == invoke_entry_point_output.decode("utf-8").strip()
pex_extra_sys_path = ["/dev/null", "Bob"]
returncode, _, stderr = execute_venv_pex_interpreter(
venv,
code=dedent(
"""\
from __future__ import print_function
import os
import sys
def assert_equal(test_num, expected, actual):
if expected == actual:
return
print(
"[{{}}] Expected {{}} but got {{}}".format(test_num, expected, actual),
file=sys.stderr,
)
sys.exit(test_num)
assert_equal(1, {pex_extra_sys_path!r}, sys.path[-2:])
import fabric
assert_equal(2, {fabric!r}, os.path.realpath(fabric.__file__))
import user.package
assert_equal(3, {user_package!r}, os.path.realpath(user.package.__file__))
""".format(
pex_extra_sys_path=pex_extra_sys_path,
fabric=expected_file_path(venv, "fabric"),
user_package=expected_file_path(venv, "user.package"),
)
),
PEX_EXTRA_SYS_PATH=os.pathsep.join(pex_extra_sys_path),
)
assert 0 == returncode, stderr
def test_binary_path(create_pex_venv):
code = dedent(
"""\
import errno
import subprocess
import sys
# PEXed code should be able to find all (console) scripts on the $PATH when the venv is
# created with --bin-path set, and the scripts should all run with the venv interpreter in
# order to find their code.
def try_invoke(*args):
try:
subprocess.check_call(list(args))
return 0
except OSError as e:
if e.errno == errno.ENOENT:
# This is what we expect when scripts are not set up on PATH via --bin-path.
return 1
return 2
exit_code = try_invoke("fab", "-V")
exit_code += 10 * try_invoke("inv", "-V")
exit_code += 100 * try_invoke("invoke", "-V")
sys.exit(exit_code)
"""
)
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=code, PATH=tempfile.gettempdir()
)
assert 111 == returncode, stdout + stderr
venv_bin_path = create_pex_venv("-f", "--bin-path", "prepend")
returncode, _, _ = execute_venv_pex_interpreter(
venv_bin_path, code=code, PATH=tempfile.gettempdir()
)
assert 0 == returncode
def test_venv_pex_interpreter_special_modes(create_pex_venv):
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-m"])
assert 2 == returncode, stderr
assert "" == stdout
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-m", "fabric", "--version"]
)
assert 0 == returncode, stderr
versions = parse_fabric_version_output(stdout)
assert FABRIC_VERSION == versions["Fabric"]
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-c"])
assert 2 == returncode, stderr
assert "" == stdout
fabric_file_code = "import fabric, os; print(os.path.realpath(fabric.__file__))"
expected_fabric_file_path = expected_file_path(venv, "fabric")
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-c", fabric_file_code]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=["-"]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
with named_temporary_file(prefix="code", suffix=".py", mode="w") as fp:
fp.write(fabric_file_code)
fp.close()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=[fp.name]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
| true
| true
|
f7095d1db7c3f81dc0dfa863c114fdc12fc2c216
| 2,964
|
py
|
Python
|
test_scripts/main_cvxpy_simple.py
|
forgi86/pyMPC
|
291db149554767a035fcb01df3fed7a6b3fe60e4
|
[
"MIT"
] | 84
|
2019-05-28T09:27:37.000Z
|
2022-03-31T08:38:23.000Z
|
test_scripts/main_cvxpy_simple.py
|
passion4energy/pyMPC
|
4b004ba707dab49cd36d96a3575b8593c870a904
|
[
"MIT"
] | 2
|
2020-04-17T00:03:27.000Z
|
2021-01-30T11:35:58.000Z
|
test_scripts/main_cvxpy_simple.py
|
passion4energy/pyMPC
|
4b004ba707dab49cd36d96a3575b8593c870a904
|
[
"MIT"
] | 20
|
2019-10-13T13:50:16.000Z
|
2022-03-31T08:38:25.000Z
|
from cvxpy import Variable, Parameter, Minimize, Problem, OSQP, quad_form
import numpy as np
import scipy as sp
import scipy.sparse as sparse
import time
if __name__ == "__main__":
# Discrete time model of a quadcopter
Ts = 0.2
M = 2.0
Ad = sparse.csc_matrix([
[1.0, Ts],
[0, 1.0]
])
Bd = sparse.csc_matrix([
[0.0],
[Ts/M]])
[nx, nu] = Bd.shape # number of states and number or inputs
# Constraints
uref = 0
uinit = 0 # not used here
umin = np.array([-1000.0]) - uref
umax = np.array([1000.0]) - uref
xmin = np.array([-100.0, -100.0])
xmax = np.array([100.0, 100.0])
# Objective function
Q = sparse.diags([0.2, 0.3])
QN = sparse.diags([0.4, 0.5]) # final cost
R = 0.1*sparse.eye(1)
# Initial and reference states
x0 = np.array([0.1, 0.2]) # initial state
# Reference input and states
pref = 7.0
vref = 0
xref = np.array([pref, vref]) # reference state
# Prediction horizon
Np = 20
# Define problem
u = Variable((nu, Np))
x = Variable((nx, Np + 1))
x_init = Parameter(nx)
objective = 0
constraints = [x[:,0] == x_init]
for k in range(Np):
objective += quad_form(x[:, k] - xref, Q) + quad_form(u[:, k], R)
constraints += [x[:, k+1] == Ad*x[:, k] + Bd*u[:, k]]
constraints += [xmin <= x[:, k], x[:, k] <= xmax]
constraints += [umin <= u[:, k], u[:, k] <= umax]
objective += quad_form(x[:, Np] - xref, QN)
prob = Problem(Minimize(objective), constraints)
# Simulate in closed loop
# Simulate in closed loop
len_sim = 15 # simulation length (s)
nsim = int(len_sim/Ts) # simulation length(timesteps)
xsim = np.zeros((nsim,nx))
usim = np.zeros((nsim,nu))
tsim = np.arange(0,nsim)*Ts
uminus1_val = uinit # initial previous measured input is the input at time instant -1.
time_start = time.time()
for i in range(nsim):
x_init.value = x0
#uminus1.value = uminus1_val
prob.solve(solver=OSQP, warm_start=True)
uMPC = u[:,0].value
usim[i,:] = uMPC
x0 = Ad.dot(x0) + Bd.dot(uMPC)
xsim[i,:] = x0
uminus1_val = uMPC # or a measurement if the input is affected by noise
time_sim = time.time() - time_start
# In [1]
import matplotlib.pyplot as plt
fig,axes = plt.subplots(3,1, figsize=(10,10))
axes[0].plot(tsim, xsim[:,0], "k", label='p')
axes[0].plot(tsim, xref[0]*np.ones(np.shape(tsim)), "r--", label="pref")
axes[0].set_title("Position (m)")
axes[1].plot(tsim, xsim[:,1], label="v")
axes[1].plot(tsim, xref[1]*np.ones(np.shape(tsim)), "r--", label="vref")
axes[1].set_title("Velocity (m/s)")
axes[2].plot(tsim, usim[:,0], label="u")
axes[2].plot(tsim, uref*np.ones(np.shape(tsim)), "r--", label="uref")
axes[2].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
| 28.5
| 90
| 0.567476
|
from cvxpy import Variable, Parameter, Minimize, Problem, OSQP, quad_form
import numpy as np
import scipy as sp
import scipy.sparse as sparse
import time
if __name__ == "__main__":
Ts = 0.2
M = 2.0
Ad = sparse.csc_matrix([
[1.0, Ts],
[0, 1.0]
])
Bd = sparse.csc_matrix([
[0.0],
[Ts/M]])
[nx, nu] = Bd.shape
uref = 0
uinit = 0 umin = np.array([-1000.0]) - uref
umax = np.array([1000.0]) - uref
xmin = np.array([-100.0, -100.0])
xmax = np.array([100.0, 100.0])
Q = sparse.diags([0.2, 0.3])
QN = sparse.diags([0.4, 0.5]) R = 0.1*sparse.eye(1)
x0 = np.array([0.1, 0.2]) pref = 7.0
vref = 0
xref = np.array([pref, vref])
Np = 20
u = Variable((nu, Np))
x = Variable((nx, Np + 1))
x_init = Parameter(nx)
objective = 0
constraints = [x[:,0] == x_init]
for k in range(Np):
objective += quad_form(x[:, k] - xref, Q) + quad_form(u[:, k], R)
constraints += [x[:, k+1] == Ad*x[:, k] + Bd*u[:, k]]
constraints += [xmin <= x[:, k], x[:, k] <= xmax]
constraints += [umin <= u[:, k], u[:, k] <= umax]
objective += quad_form(x[:, Np] - xref, QN)
prob = Problem(Minimize(objective), constraints)
len_sim = 15 nsim = int(len_sim/Ts) xsim = np.zeros((nsim,nx))
usim = np.zeros((nsim,nu))
tsim = np.arange(0,nsim)*Ts
uminus1_val = uinit time_start = time.time()
for i in range(nsim):
x_init.value = x0
prob.solve(solver=OSQP, warm_start=True)
uMPC = u[:,0].value
usim[i,:] = uMPC
x0 = Ad.dot(x0) + Bd.dot(uMPC)
xsim[i,:] = x0
uminus1_val = uMPC time_sim = time.time() - time_start
import matplotlib.pyplot as plt
fig,axes = plt.subplots(3,1, figsize=(10,10))
axes[0].plot(tsim, xsim[:,0], "k", label='p')
axes[0].plot(tsim, xref[0]*np.ones(np.shape(tsim)), "r--", label="pref")
axes[0].set_title("Position (m)")
axes[1].plot(tsim, xsim[:,1], label="v")
axes[1].plot(tsim, xref[1]*np.ones(np.shape(tsim)), "r--", label="vref")
axes[1].set_title("Velocity (m/s)")
axes[2].plot(tsim, usim[:,0], label="u")
axes[2].plot(tsim, uref*np.ones(np.shape(tsim)), "r--", label="uref")
axes[2].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
| true
| true
|
f7095f4389a092c609358f7f82ea89c7b96159a9
| 11,890
|
py
|
Python
|
src/ppscore/calculation.py
|
SuryaThiru/ppscore
|
59df800e32d4ef5fda4be2bdf4b3235db2a39fee
|
[
"MIT"
] | null | null | null |
src/ppscore/calculation.py
|
SuryaThiru/ppscore
|
59df800e32d4ef5fda4be2bdf4b3235db2a39fee
|
[
"MIT"
] | null | null | null |
src/ppscore/calculation.py
|
SuryaThiru/ppscore
|
59df800e32d4ef5fda4be2bdf4b3235db2a39fee
|
[
"MIT"
] | null | null | null |
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_absolute_error, f1_score
import pandas as pd
from pandas.api.types import (
is_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_string_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
)
# if the number is 4, then it is possible to detect patterns when there are at least 4 times the same observation. If the limit is increased, the minimum observations also increase. This is important, because this is the limit when sklearn will throw an error which will lead to a score of 0 if we catch it
CV_ITERATIONS = 4
RANDOM_SEED = 587136
# if a numeric column has less than 15 unique values, it is inferred as categoric
# thus, the ppscore will use a classification
# this has important implications on the ppscore
# eg if you have 4 equal categories encoded 0, 1, 2, 3 and treat it as a regression
# then the baseline is 1 (median) which is okayish and a predictor will have a harder time
# to beat the baseline, thus the ppscore will be considerably lower
# if the column is encoded as category, then the baseline will be to always predict 0
# this baseline will be way easier to beat and thus result in a higher ppscore
NUMERIC_AS_CATEGORIC_BREAKPOINT = 15
def _calculate_model_cv_score_(df, target, feature, metric, model, **kwargs):
"Calculates the mean model score based on cross-validation"
# Sources about the used methods:
# https://scikit-learn.org/stable/modules/tree.html
# https://scikit-learn.org/stable/modules/cross_validation.html
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
# shuffle the rows - this is important for crossvalidation
# because the crossvalidation just takes the first n lines
# if there is a strong pattern in the rows eg 0,0,0,0,1,1,1,1
# then this will lead to problems because the first cv sees mostly 0 and the later 1
# this approach might be wrong for timeseries because it might leak information
df = df.sample(frac=1, random_state=RANDOM_SEED, replace=False)
# preprocess target
if df[target].dtype == object:
le = preprocessing.LabelEncoder()
df[target] = le.fit_transform(df[target])
target_series = df[target]
else:
target_series = df[target]
# preprocess feature
if df[feature].dtype == object:
one_hot_encoder = preprocessing.OneHotEncoder()
sparse_matrix = one_hot_encoder.fit_transform(df[feature].values.reshape(-1, 1))
feature_df = sparse_matrix
else:
# reshaping needed because there is only 1 feature
feature_df = df[feature].values.reshape(-1, 1)
# Crossvalidation is stratifiedKFold for classification, KFold for regression
# CV on one core (n_job=1; default) has shown to be fastest
scores = cross_val_score(
model, feature_df, target_series, cv=CV_ITERATIONS, scoring=metric
)
return scores.mean()
def _normalized_mae_score(model_mae, naive_mae):
"Normalizes the model MAE score, given the baseline score"
# # Value range of MAE is [0, infinity), 0 is best
# 10, 5 >> 0 because worse than naive
# 10, 20 >> 0.5
# 5, 20 >> 0.75 = 1 - (mae/base_mae)
if model_mae > naive_mae:
return 0
else:
return 1 - (model_mae / naive_mae)
def _mae_normalizer(df, y, model_score):
"In case of MAE, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].median()
baseline_score = mean_absolute_error(df[y], df["naive"]) # true, pred
ppscore = _normalized_mae_score(abs(model_score), baseline_score)
return ppscore, baseline_score
def _normalized_f1_score(model_f1, baseline_f1):
"Normalizes the model F1 score, given the baseline score"
# # F1 ranges from 0 to 1
# # 1 is best
# 0.5, 0.7 = 0 because worse than naive
# 0.75, 0.5 > 0.5
#
if model_f1 < baseline_f1:
return 0
else:
scale_range = 1.0 - baseline_f1 # eg 0.3
f1_diff = model_f1 - baseline_f1 # eg 0.1
return f1_diff / scale_range # 0.1/0.3 = 0.33
def _f1_normalizer(df, y, model_score):
"In case of F1, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].value_counts().index[0]
baseline_score = f1_score(df[y], df["naive"], average="weighted")
ppscore = _normalized_f1_score(model_score, baseline_score)
return ppscore, baseline_score
TASKS = {
"regression": {
"metric_name": "mean absolute error",
"metric_key": "neg_mean_absolute_error",
"model": tree.DecisionTreeRegressor(),
"score_normalizer": _mae_normalizer,
},
"classification": {
"metric_name": "weighted F1",
"metric_key": "f1_weighted",
"model": tree.DecisionTreeClassifier(),
"score_normalizer": _f1_normalizer,
},
"predict_itself": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_constant": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_id": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
}
def _infer_task(df, x, y):
"Returns str with the name of the inferred task based on the columns x and y"
if x == y:
return "predict_itself"
category_count = df[y].value_counts().count()
if category_count == 1:
return "predict_constant"
if category_count == 2:
return "classification"
if category_count == len(df[y]) and (
is_string_dtype(df[y]) or is_categorical_dtype(df[y])
):
return "predict_id"
if category_count <= NUMERIC_AS_CATEGORIC_BREAKPOINT and is_numeric_dtype(df[y]):
return "classification"
if is_bool_dtype(df[y]) or is_string_dtype(df[y]) or is_categorical_dtype(df[y]):
return "classification"
if is_datetime64_any_dtype(df[y]) or is_timedelta64_dtype(df[y]):
raise Exception(
f"The target column {y} has the dtype {df[y].dtype} which is not supported. A possible solution might be to convert {y} to a string column"
)
# this check needs to be after is_bool_dtype because bool is considered numeric by pandas
if is_numeric_dtype(df[y]):
return "regression"
raise Exception(
f"Could not infer a valid task based on the target {y}. The dtype {df[y].dtype} is not yet supported"
) # pragma: no cover
def _feature_is_id(df, x):
"Returns Boolean if the feature column x is an ID"
if not (is_string_dtype(df[x]) or is_categorical_dtype(df[x])):
return False
category_count = df[x].value_counts().count()
return category_count == len(df[x])
def _maybe_sample(df, sample):
"""
Maybe samples the rows of the given df to have at most ``sample`` rows
If sample is ``None`` or falsy, there will be no sampling.
If the df has fewer rows than the sample, there will be no sampling.
Parameters
----------
df : pandas.DataFrame
Dataframe that might be sampled
sample : int or ``None``
Number of rows to be sampled
Returns
-------
pandas.DataFrame
DataFrame after potential sampling
"""
if sample and len(df) > sample:
# this is a problem if x or y have more than sample=5000 categories
# TODO: dont sample when the problem occurs and show warning
df = df.sample(sample, random_state=RANDOM_SEED, replace=False)
return df
def score(df, x, y, task=None, sample=5000):
"""
Calculate the Predictive Power Score (PPS) for "x predicts y"
The score always ranges from 0 to 1 and is data-type agnostic.
A score of 0 means that the column x cannot predict the column y better than a naive baseline model.
A score of 1 means that the column x can perfectly predict the column y given the model.
A score between 0 and 1 states the ratio of how much potential predictive power the model achieved compared to the baseline model.
Parameters
----------
df : pandas.DataFrame
Dataframe that contains the columns x and y
x : str
Name of the column x which acts as the feature
y : str
Name of the column y which acts as the target
task : str, default ``None``
Name of the prediction task, e.g. ``classification`` or ``regression``
If the task is not specified, it is infered based on the y column
The task determines which model and evaluation score is used for the PPS
sample : int or ``None``
Number of rows for sampling. The sampling decreases the calculation time of the PPS.
If ``None`` there will be no sampling.
Returns
-------
Dict
A dict that contains multiple fields about the resulting PPS.
The dict enables introspection into the calculations that have been performed under the hood
"""
if x == y:
task_name = "predict_itself"
else:
# TODO: log.warning when values have been dropped
df = df[[x, y]].dropna()
if len(df) == 0:
raise Exception("After dropping missing values, there are no valid rows left")
df = _maybe_sample(df, sample)
if task is None:
task_name = _infer_task(df, x, y)
else:
task_name = task
task = TASKS[task_name]
if task_name in ["predict_constant", "predict_itself"]:
model_score = 1
ppscore = 1
baseline_score = 1
elif task_name == "predict_id": # target is id
model_score = 0
ppscore = 0
baseline_score = 0
elif _feature_is_id(df, x):
model_score = 0
ppscore = 0
baseline_score = 0
else:
model_score = _calculate_model_cv_score_(
df, target=y, feature=x, metric=task["metric_key"], model=task["model"]
)
ppscore, baseline_score = task["score_normalizer"](df, y, model_score)
return {
"x": x,
"y": y,
"task": task_name,
"ppscore": ppscore,
"metric": task["metric_name"],
"baseline_score": baseline_score,
"model_score": abs(model_score), # sklearn returns negative mae
"model": task["model"],
}
# def predictors(df, y, task=None, sorted=True):
# pass
def matrix(df, output="df", **kwargs):
"""
Calculate the Predictive Power Score (PPS) matrix for all columns in the dataframe
Parameters
----------
df : pandas.DataFrame
The dataframe that contains the data
output: str - potential values: "df", "dict"
Control the type of the output. Either return a df or a dict with all the PPS dicts arranged by the target column
kwargs:
Other key-word arguments that shall be forwarded to the pps.score method
Returns
-------
pandas.DataFrame or Dict
Either returns a df or a dict with all the PPS dicts arranged by the target column. This can be influenced by the output argument
"""
data = {}
columns = list(df.columns)
for target in columns:
scores = []
for feature in columns:
# single_score = score(df, x=feature, y=target)["ppscore"]
try:
single_score = score(df, x=feature, y=target, **kwargs)["ppscore"]
except:
# TODO: log error
single_score = 0
scores.append(single_score)
data[target] = scores
if output == "df":
matrix = pd.DataFrame.from_dict(data, orient="index")
matrix.columns = columns
return matrix
else: # output == "dict"
return data
| 34.970588
| 306
| 0.655341
|
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_absolute_error, f1_score
import pandas as pd
from pandas.api.types import (
is_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_string_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
)
CV_ITERATIONS = 4
RANDOM_SEED = 587136
NUMERIC_AS_CATEGORIC_BREAKPOINT = 15
def _calculate_model_cv_score_(df, target, feature, metric, model, **kwargs):
df = df.sample(frac=1, random_state=RANDOM_SEED, replace=False)
if df[target].dtype == object:
le = preprocessing.LabelEncoder()
df[target] = le.fit_transform(df[target])
target_series = df[target]
else:
target_series = df[target]
if df[feature].dtype == object:
one_hot_encoder = preprocessing.OneHotEncoder()
sparse_matrix = one_hot_encoder.fit_transform(df[feature].values.reshape(-1, 1))
feature_df = sparse_matrix
else:
feature_df = df[feature].values.reshape(-1, 1)
scores = cross_val_score(
model, feature_df, target_series, cv=CV_ITERATIONS, scoring=metric
)
return scores.mean()
def _normalized_mae_score(model_mae, naive_mae):
if model_mae > naive_mae:
return 0
else:
return 1 - (model_mae / naive_mae)
def _mae_normalizer(df, y, model_score):
df["naive"] = df[y].median()
baseline_score = mean_absolute_error(df[y], df["naive"])
ppscore = _normalized_mae_score(abs(model_score), baseline_score)
return ppscore, baseline_score
def _normalized_f1_score(model_f1, baseline_f1):
if model_f1 < baseline_f1:
return 0
else:
scale_range = 1.0 - baseline_f1 f1_diff = model_f1 - baseline_f1 return f1_diff / scale_range
def _f1_normalizer(df, y, model_score):
df["naive"] = df[y].value_counts().index[0]
baseline_score = f1_score(df[y], df["naive"], average="weighted")
ppscore = _normalized_f1_score(model_score, baseline_score)
return ppscore, baseline_score
TASKS = {
"regression": {
"metric_name": "mean absolute error",
"metric_key": "neg_mean_absolute_error",
"model": tree.DecisionTreeRegressor(),
"score_normalizer": _mae_normalizer,
},
"classification": {
"metric_name": "weighted F1",
"metric_key": "f1_weighted",
"model": tree.DecisionTreeClassifier(),
"score_normalizer": _f1_normalizer,
},
"predict_itself": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_constant": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_id": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
}
def _infer_task(df, x, y):
if x == y:
return "predict_itself"
category_count = df[y].value_counts().count()
if category_count == 1:
return "predict_constant"
if category_count == 2:
return "classification"
if category_count == len(df[y]) and (
is_string_dtype(df[y]) or is_categorical_dtype(df[y])
):
return "predict_id"
if category_count <= NUMERIC_AS_CATEGORIC_BREAKPOINT and is_numeric_dtype(df[y]):
return "classification"
if is_bool_dtype(df[y]) or is_string_dtype(df[y]) or is_categorical_dtype(df[y]):
return "classification"
if is_datetime64_any_dtype(df[y]) or is_timedelta64_dtype(df[y]):
raise Exception(
f"The target column {y} has the dtype {df[y].dtype} which is not supported. A possible solution might be to convert {y} to a string column"
)
if is_numeric_dtype(df[y]):
return "regression"
raise Exception(
f"Could not infer a valid task based on the target {y}. The dtype {df[y].dtype} is not yet supported"
)
def _feature_is_id(df, x):
if not (is_string_dtype(df[x]) or is_categorical_dtype(df[x])):
return False
category_count = df[x].value_counts().count()
return category_count == len(df[x])
def _maybe_sample(df, sample):
if sample and len(df) > sample:
df = df.sample(sample, random_state=RANDOM_SEED, replace=False)
return df
def score(df, x, y, task=None, sample=5000):
if x == y:
task_name = "predict_itself"
else:
df = df[[x, y]].dropna()
if len(df) == 0:
raise Exception("After dropping missing values, there are no valid rows left")
df = _maybe_sample(df, sample)
if task is None:
task_name = _infer_task(df, x, y)
else:
task_name = task
task = TASKS[task_name]
if task_name in ["predict_constant", "predict_itself"]:
model_score = 1
ppscore = 1
baseline_score = 1
elif task_name == "predict_id": model_score = 0
ppscore = 0
baseline_score = 0
elif _feature_is_id(df, x):
model_score = 0
ppscore = 0
baseline_score = 0
else:
model_score = _calculate_model_cv_score_(
df, target=y, feature=x, metric=task["metric_key"], model=task["model"]
)
ppscore, baseline_score = task["score_normalizer"](df, y, model_score)
return {
"x": x,
"y": y,
"task": task_name,
"ppscore": ppscore,
"metric": task["metric_name"],
"baseline_score": baseline_score,
"model_score": abs(model_score), "model": task["model"],
}
def matrix(df, output="df", **kwargs):
data = {}
columns = list(df.columns)
for target in columns:
scores = []
for feature in columns:
try:
single_score = score(df, x=feature, y=target, **kwargs)["ppscore"]
except:
single_score = 0
scores.append(single_score)
data[target] = scores
if output == "df":
matrix = pd.DataFrame.from_dict(data, orient="index")
matrix.columns = columns
return matrix
else: return data
| true
| true
|
f7095f7c93a14717410ae3c0c3514e781f186551
| 832
|
py
|
Python
|
direct/cli/utils.py
|
NKI-AI/direct
|
7c9d59345b68ea70bc20a5cea2c895c6059f8e46
|
[
"Apache-2.0"
] | 57
|
2021-12-21T23:11:46.000Z
|
2022-03-26T23:25:36.000Z
|
direct/cli/utils.py
|
NKI-AI/direct
|
7c9d59345b68ea70bc20a5cea2c895c6059f8e46
|
[
"Apache-2.0"
] | 23
|
2021-12-22T15:02:20.000Z
|
2022-03-29T21:12:37.000Z
|
direct/cli/utils.py
|
NKI-AI/direct
|
7c9d59345b68ea70bc20a5cea2c895c6059f8e46
|
[
"Apache-2.0"
] | 5
|
2021-12-22T17:15:28.000Z
|
2022-03-03T02:44:12.000Z
|
# coding=utf-8
# Copyright (c) DIRECT Contributors
import argparse
import pathlib
import sys
from direct.types import FileOrUrl, PathOrString
from direct.utils.io import check_is_valid_url
def is_file(path):
path = pathlib.Path(path)
if path.is_file():
return path
raise argparse.ArgumentTypeError(f"{path} is not a valid file or url.")
def file_or_url(path: PathOrString) -> FileOrUrl:
if check_is_valid_url(path):
return FileOrUrl(path)
path = pathlib.Path(path)
if path.is_file():
return FileOrUrl(path)
raise argparse.ArgumentTypeError(f"{path} is not a valid file or url.")
def check_train_val(key, name):
if key is not None and len(key) != 2:
sys.exit(f"--{name} has to be of the form `train_folder, validation_folder` if a validation folder is set.")
| 25.212121
| 116
| 0.704327
|
import argparse
import pathlib
import sys
from direct.types import FileOrUrl, PathOrString
from direct.utils.io import check_is_valid_url
def is_file(path):
path = pathlib.Path(path)
if path.is_file():
return path
raise argparse.ArgumentTypeError(f"{path} is not a valid file or url.")
def file_or_url(path: PathOrString) -> FileOrUrl:
if check_is_valid_url(path):
return FileOrUrl(path)
path = pathlib.Path(path)
if path.is_file():
return FileOrUrl(path)
raise argparse.ArgumentTypeError(f"{path} is not a valid file or url.")
def check_train_val(key, name):
if key is not None and len(key) != 2:
sys.exit(f"--{name} has to be of the form `train_folder, validation_folder` if a validation folder is set.")
| true
| true
|
f7095fdd9792bebf1514154b53900ed899f43e29
| 303
|
py
|
Python
|
Eshop/urls.py
|
deepchatterjeevns/Green-veggies-ECommerce-App
|
142e8f9cb6766b99c0f8212ca77dc9bec41a9a83
|
[
"MIT"
] | null | null | null |
Eshop/urls.py
|
deepchatterjeevns/Green-veggies-ECommerce-App
|
142e8f9cb6766b99c0f8212ca77dc9bec41a9a83
|
[
"MIT"
] | null | null | null |
Eshop/urls.py
|
deepchatterjeevns/Green-veggies-ECommerce-App
|
142e8f9cb6766b99c0f8212ca77dc9bec41a9a83
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('store.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 30.3
| 65
| 0.759076
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('store.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true
| true
|
f70960b00381015a48deaf6009d765c36d4c15c3
| 10,878
|
py
|
Python
|
register.py
|
joakimlindblad/py_alpha_amd_release
|
6a95286753c48e9f0c882d650158b15b58bcdd46
|
[
"MIT"
] | null | null | null |
register.py
|
joakimlindblad/py_alpha_amd_release
|
6a95286753c48e9f0c882d650158b15b58bcdd46
|
[
"MIT"
] | null | null | null |
register.py
|
joakimlindblad/py_alpha_amd_release
|
6a95286753c48e9f0c882d650158b15b58bcdd46
|
[
"MIT"
] | null | null | null |
#
# Py-Alpha-AMD Registration Framework
# Author: Johan Ofverstedt
# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information
#
# Copyright 2019 Johan Ofverstedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#
# Registration framework
#
# Import Numpy/Scipy
import numpy as np
import scipy as sp
import scipy.misc
# Import transforms
from transforms import CompositeTransform
from transforms import AffineTransform
from transforms import Rigid2DTransform
from transforms import Rotate2DTransform
from transforms import TranslationTransform
from transforms import ScalingTransform
# Import distances
from distances import QuantizedImage
from distances import alpha_amd
from distances import symmetric_amd_distance
# Import optimizers
from optimizers import GradientDescentOptimizer
# Import generators and filters
import generators
import filters
# Import misc
import math
import sys
import time
import cProfile, pstats
class Register:
def __init__(self, dim):
self.dim = dim
self.sampling_fraction = 1.0
self.step_lengths = np.array([[0.1, 1.0]])
self.iterations = 1500
self.alpha_levels = 7
self.gradient_magnitude_threshold = 0.00001
self.ref_im = None
self.flo_im = None
self.ref_mask = None
self.flo_mask = None
self.ref_weights = None
self.flo_weights = None
# Transforms
self.initial_transforms = []
self.transforms_param_scaling = []
self.output_transforms = []
self.values = []
self.value_history = []
# Resolution pyramid levels
self.pyramid_factors = []
self.pyramid_sigmas = []
self.distances = []
# Reporting/Output
self.report_func = None
self.report_freq = 25
def add_initial_transform(self, transform, param_scaling=None):
if param_scaling is None:
param_scaling = np.ones((transforms.get_param_count(),))
self.initial_transforms.append(transform)
self.transforms_param_scaling.append(param_scaling)
def add_initial_transforms(self, transforms, param_scaling=None):
for i, t in enumerate(transforms):
if param_scaling is None:
pscaling = np.ones((transforms.get_param_count(),))
else:
pscaling = param_scaling[i]
self.add_initial_transform(t, pscaling)
def clear_transforms(self):
self.initial_transforms = []
self.output_transforms = []
self.transforms_param_scaling = []
self.values = []
self.value_history = []
def get_output(self, index):
return self.output_transforms[index], self.values[index]
def get_value_history(self, index, level):
return self.value_history[index][level]
def add_pyramid_level(self, factor, sigma):
self.pyramid_factors.append(factor)
self.pyramid_sigmas.append(sigma)
def add_pyramid_levels(self, factors, sigmas):
for i in range(len(factors)):
self.add_pyramid_level(factors[i], sigmas[i])
def get_pyramid_level_count(self):
return len(self.pyramid_factors)
def set_sampling_fraction(self, sampling_fraction):
self.sampling_fraction = sampling_fraction
def set_iterations(self, iterations):
self.iterations = iterations
def set_alpha_levels(self, alpha_levels):
self.alpha_levels = alpha_levels
def set_step_lengths(self, step_lengths):
self.step_lengths = np.array(step_lengths)#np.array([start_step_length, end_step_length])
def set_reference_image(self, image, spacing = None):
self.ref_im = image
if spacing is None:
self.ref_spacing = np.ones(image.ndim)
else:
self.ref_spacing = spacing
def set_floating_image(self, image, spacing = None):
self.flo_im = image
if spacing is None:
self.flo_spacing = np.ones(image.ndim)
else:
self.flo_spacing = spacing
def set_reference_mask(self, mask):
self.ref_mask = mask
def set_floating_mask(self, mask):
self.flo_mask = mask
def set_reference_weights(self, weights):
self.ref_weights = weights
def set_floating_weights(self, weights):
self.flo_weights = weights
def set_gradient_magnitude_threshold(self, t):
self.gradient_magnitude_threshold = t
def set_report_freq(self, freq):
self.report_freq = freq
def set_report_func(self, func):
self.report_func = func
def initialize(self, pyramid_images_output_path=None):
if len(self.pyramid_factors) == 0:
self.add_pyramid_level(1, 0.0)
if len(self.initial_transforms) == 0:
self.add_initial_transform(AffineTransform(self.dim))
### Preprocessing
pyramid_levels = len(self.pyramid_factors)
for i in range(pyramid_levels):
factor = self.pyramid_factors[i]
ref_resampled = filters.downsample(filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]), factor)
flo_resampled = filters.downsample(filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]), factor)
ref_mask_resampled = filters.downsample(self.ref_mask, factor)
flo_mask_resampled = filters.downsample(self.flo_mask, factor)
ref_resampled = filters.normalize(ref_resampled, 0.0, ref_mask_resampled)
flo_resampled = filters.normalize(flo_resampled, 0.0, flo_mask_resampled)
if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
scipy.misc.imsave('%sref_resampled_%d.png' % (pyramid_images_output_path, i+1), ref_resampled)
scipy.misc.imsave('%sflo_resampled_%d.png' % (pyramid_images_output_path, i+1), flo_resampled)
if self.ref_weights is None:
ref_weights = np.zeros(ref_resampled.shape)
ref_weights[ref_mask_resampled] = 1.0
else:
ref_weights = filters.downsample(self.ref_weights, factor)
if self.flo_weights is None:
flo_weights = np.zeros(flo_resampled.shape)
flo_weights[flo_mask_resampled] = 1.0
else:
flo_weights = filters.downsample(self.flo_weights, factor)
ref_diag = np.sqrt(np.square(np.array(ref_resampled.shape)*self.ref_spacing).sum())
flo_diag = np.sqrt(np.square(np.array(flo_resampled.shape)*self.flo_spacing).sum())
q_ref = QuantizedImage(ref_resampled, self.alpha_levels, ref_weights, self.ref_spacing*factor, remove_zero_weight_pnts = True)
q_flo = QuantizedImage(flo_resampled, self.alpha_levels, flo_weights, self.flo_spacing*factor, remove_zero_weight_pnts = True)
tf_ref = alpha_amd.AlphaAMD(q_ref, self.alpha_levels, ref_diag, self.ref_spacing*factor, ref_mask_resampled, ref_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)
tf_flo = alpha_amd.AlphaAMD(q_flo, self.alpha_levels, flo_diag, self.flo_spacing*factor, flo_mask_resampled, flo_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)
symmetric_measure = True
squared_measure = False
sym_dist = symmetric_amd_distance.SymmetricAMDDistance(symmetric_measure=symmetric_measure, squared_measure=squared_measure)
sym_dist.set_ref_image_source(q_ref)
sym_dist.set_ref_image_target(tf_ref)
sym_dist.set_flo_image_source(q_flo)
sym_dist.set_flo_image_target(tf_flo)
sym_dist.set_sampling_fraction(self.sampling_fraction)
sym_dist.initialize()
self.distances.append(sym_dist)
def run(self):
pyramid_level_count = len(self.pyramid_factors)
transform_count = len(self.initial_transforms)
for t_it in range(transform_count):
init_transform = self.initial_transforms[t_it]
param_scaling = self.transforms_param_scaling[t_it]
self.value_history.append([])
for lvl_it in range(pyramid_level_count):
opt = GradientDescentOptimizer(self.distances[lvl_it], init_transform.copy())
if self.step_lengths.ndim == 1:
opt.set_step_length(self.step_lengths[0], self.step_lengths[1])
else:
opt.set_step_length(self.step_lengths[lvl_it, 0], self.step_lengths[lvl_it, 1])
opt.set_scalings(param_scaling)
opt.set_gradient_magnitude_threshold(self.gradient_magnitude_threshold)
opt.set_report_freq(self.report_freq)
if type(self.report_func) is list or type(self.report_func) is tuple:
opt.set_report_callback(self.report_func[t_it])
else:
opt.set_report_callback(self.report_func)
if isinstance(self.iterations, int):
itercount = self.iterations
else:
assert(len(self.iterations) == pyramid_level_count)
itercount = self.iterations[lvl_it]
opt.optimize(itercount)
if lvl_it + 1 == pyramid_level_count:
self.output_transforms.append(opt.get_transform())
self.values.append(opt.get_value())
self.initial_transforms[t_it] = opt.get_transform()
else:
init_transform = opt.get_transform()
self.value_history[-1].append(opt.get_value_history())
| 38.574468
| 206
| 0.668505
|
import numpy as np
import scipy as sp
import scipy.misc
from transforms import CompositeTransform
from transforms import AffineTransform
from transforms import Rigid2DTransform
from transforms import Rotate2DTransform
from transforms import TranslationTransform
from transforms import ScalingTransform
from distances import QuantizedImage
from distances import alpha_amd
from distances import symmetric_amd_distance
from optimizers import GradientDescentOptimizer
import generators
import filters
import math
import sys
import time
import cProfile, pstats
class Register:
def __init__(self, dim):
self.dim = dim
self.sampling_fraction = 1.0
self.step_lengths = np.array([[0.1, 1.0]])
self.iterations = 1500
self.alpha_levels = 7
self.gradient_magnitude_threshold = 0.00001
self.ref_im = None
self.flo_im = None
self.ref_mask = None
self.flo_mask = None
self.ref_weights = None
self.flo_weights = None
self.initial_transforms = []
self.transforms_param_scaling = []
self.output_transforms = []
self.values = []
self.value_history = []
self.pyramid_factors = []
self.pyramid_sigmas = []
self.distances = []
self.report_func = None
self.report_freq = 25
def add_initial_transform(self, transform, param_scaling=None):
if param_scaling is None:
param_scaling = np.ones((transforms.get_param_count(),))
self.initial_transforms.append(transform)
self.transforms_param_scaling.append(param_scaling)
def add_initial_transforms(self, transforms, param_scaling=None):
for i, t in enumerate(transforms):
if param_scaling is None:
pscaling = np.ones((transforms.get_param_count(),))
else:
pscaling = param_scaling[i]
self.add_initial_transform(t, pscaling)
def clear_transforms(self):
self.initial_transforms = []
self.output_transforms = []
self.transforms_param_scaling = []
self.values = []
self.value_history = []
def get_output(self, index):
return self.output_transforms[index], self.values[index]
def get_value_history(self, index, level):
return self.value_history[index][level]
def add_pyramid_level(self, factor, sigma):
self.pyramid_factors.append(factor)
self.pyramid_sigmas.append(sigma)
def add_pyramid_levels(self, factors, sigmas):
for i in range(len(factors)):
self.add_pyramid_level(factors[i], sigmas[i])
def get_pyramid_level_count(self):
return len(self.pyramid_factors)
def set_sampling_fraction(self, sampling_fraction):
self.sampling_fraction = sampling_fraction
def set_iterations(self, iterations):
self.iterations = iterations
def set_alpha_levels(self, alpha_levels):
self.alpha_levels = alpha_levels
def set_step_lengths(self, step_lengths):
self.step_lengths = np.array(step_lengths)
def set_reference_image(self, image, spacing = None):
self.ref_im = image
if spacing is None:
self.ref_spacing = np.ones(image.ndim)
else:
self.ref_spacing = spacing
def set_floating_image(self, image, spacing = None):
self.flo_im = image
if spacing is None:
self.flo_spacing = np.ones(image.ndim)
else:
self.flo_spacing = spacing
def set_reference_mask(self, mask):
self.ref_mask = mask
def set_floating_mask(self, mask):
self.flo_mask = mask
def set_reference_weights(self, weights):
self.ref_weights = weights
def set_floating_weights(self, weights):
self.flo_weights = weights
def set_gradient_magnitude_threshold(self, t):
self.gradient_magnitude_threshold = t
def set_report_freq(self, freq):
self.report_freq = freq
def set_report_func(self, func):
self.report_func = func
def initialize(self, pyramid_images_output_path=None):
if len(self.pyramid_factors) == 0:
self.add_pyramid_level(1, 0.0)
if len(self.initial_transforms) == 0:
self.add_initial_transform(AffineTransform(self.dim))
pyramid_levels = len(self.pyramid_factors)
for i in range(pyramid_levels):
factor = self.pyramid_factors[i]
ref_resampled = filters.downsample(filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]), factor)
flo_resampled = filters.downsample(filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]), factor)
ref_mask_resampled = filters.downsample(self.ref_mask, factor)
flo_mask_resampled = filters.downsample(self.flo_mask, factor)
ref_resampled = filters.normalize(ref_resampled, 0.0, ref_mask_resampled)
flo_resampled = filters.normalize(flo_resampled, 0.0, flo_mask_resampled)
if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
scipy.misc.imsave('%sref_resampled_%d.png' % (pyramid_images_output_path, i+1), ref_resampled)
scipy.misc.imsave('%sflo_resampled_%d.png' % (pyramid_images_output_path, i+1), flo_resampled)
if self.ref_weights is None:
ref_weights = np.zeros(ref_resampled.shape)
ref_weights[ref_mask_resampled] = 1.0
else:
ref_weights = filters.downsample(self.ref_weights, factor)
if self.flo_weights is None:
flo_weights = np.zeros(flo_resampled.shape)
flo_weights[flo_mask_resampled] = 1.0
else:
flo_weights = filters.downsample(self.flo_weights, factor)
ref_diag = np.sqrt(np.square(np.array(ref_resampled.shape)*self.ref_spacing).sum())
flo_diag = np.sqrt(np.square(np.array(flo_resampled.shape)*self.flo_spacing).sum())
q_ref = QuantizedImage(ref_resampled, self.alpha_levels, ref_weights, self.ref_spacing*factor, remove_zero_weight_pnts = True)
q_flo = QuantizedImage(flo_resampled, self.alpha_levels, flo_weights, self.flo_spacing*factor, remove_zero_weight_pnts = True)
tf_ref = alpha_amd.AlphaAMD(q_ref, self.alpha_levels, ref_diag, self.ref_spacing*factor, ref_mask_resampled, ref_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)
tf_flo = alpha_amd.AlphaAMD(q_flo, self.alpha_levels, flo_diag, self.flo_spacing*factor, flo_mask_resampled, flo_mask_resampled, interpolator_mode='linear', dt_fun = None, mask_out_edges = True)
symmetric_measure = True
squared_measure = False
sym_dist = symmetric_amd_distance.SymmetricAMDDistance(symmetric_measure=symmetric_measure, squared_measure=squared_measure)
sym_dist.set_ref_image_source(q_ref)
sym_dist.set_ref_image_target(tf_ref)
sym_dist.set_flo_image_source(q_flo)
sym_dist.set_flo_image_target(tf_flo)
sym_dist.set_sampling_fraction(self.sampling_fraction)
sym_dist.initialize()
self.distances.append(sym_dist)
def run(self):
pyramid_level_count = len(self.pyramid_factors)
transform_count = len(self.initial_transforms)
for t_it in range(transform_count):
init_transform = self.initial_transforms[t_it]
param_scaling = self.transforms_param_scaling[t_it]
self.value_history.append([])
for lvl_it in range(pyramid_level_count):
opt = GradientDescentOptimizer(self.distances[lvl_it], init_transform.copy())
if self.step_lengths.ndim == 1:
opt.set_step_length(self.step_lengths[0], self.step_lengths[1])
else:
opt.set_step_length(self.step_lengths[lvl_it, 0], self.step_lengths[lvl_it, 1])
opt.set_scalings(param_scaling)
opt.set_gradient_magnitude_threshold(self.gradient_magnitude_threshold)
opt.set_report_freq(self.report_freq)
if type(self.report_func) is list or type(self.report_func) is tuple:
opt.set_report_callback(self.report_func[t_it])
else:
opt.set_report_callback(self.report_func)
if isinstance(self.iterations, int):
itercount = self.iterations
else:
assert(len(self.iterations) == pyramid_level_count)
itercount = self.iterations[lvl_it]
opt.optimize(itercount)
if lvl_it + 1 == pyramid_level_count:
self.output_transforms.append(opt.get_transform())
self.values.append(opt.get_value())
self.initial_transforms[t_it] = opt.get_transform()
else:
init_transform = opt.get_transform()
self.value_history[-1].append(opt.get_value_history())
| true
| true
|
f70964bda74141f4f7530508848019e326ef048c
| 99
|
py
|
Python
|
fedml_api/data_preprocessing/stackoverflow_lr/__init__.py
|
GabriJP/FedML
|
9ccf52baddc065bf9cf3ca8ae2eeaadbc338e988
|
[
"Apache-2.0"
] | null | null | null |
fedml_api/data_preprocessing/stackoverflow_lr/__init__.py
|
GabriJP/FedML
|
9ccf52baddc065bf9cf3ca8ae2eeaadbc338e988
|
[
"Apache-2.0"
] | null | null | null |
fedml_api/data_preprocessing/stackoverflow_lr/__init__.py
|
GabriJP/FedML
|
9ccf52baddc065bf9cf3ca8ae2eeaadbc338e988
|
[
"Apache-2.0"
] | null | null | null |
from .data_loader import StackOverflowLRDataLoader
__all__ = [
'StackOverflowLRDataLoader',
]
| 16.5
| 50
| 0.787879
|
from .data_loader import StackOverflowLRDataLoader
__all__ = [
'StackOverflowLRDataLoader',
]
| true
| true
|
f70964db97d98144a8ce1f0e75a35480642ce6b9
| 1,273
|
py
|
Python
|
discretisedfield/ovf2vtk.py
|
minrk/discretisedfield
|
251584f8d976a7fafdff5402d16327489407c4dd
|
[
"BSD-3-Clause"
] | null | null | null |
discretisedfield/ovf2vtk.py
|
minrk/discretisedfield
|
251584f8d976a7fafdff5402d16327489407c4dd
|
[
"BSD-3-Clause"
] | null | null | null |
discretisedfield/ovf2vtk.py
|
minrk/discretisedfield
|
251584f8d976a7fafdff5402d16327489407c4dd
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import discretisedfield as df
def convert_files(input_files, output_files):
for input_file, output_file in zip(input_files, output_files):
field = df.Field.fromfile(input_file)
field.write(output_file)
def main():
parser = argparse.ArgumentParser(
prog='ovf2vtk',
description='ovf2vtk - ovf to VTK format conversion'
)
parser.add_argument('--infile', type=argparse.FileType('r'),
help='One or more input files', nargs='+',
required=True)
parser.add_argument('--outfile', type=argparse.FileType('w'), nargs='+',
help='One or more output files, optional')
args = parser.parse_args()
if args.outfile:
if len(args.infile) == len(args.outfile):
input_files = [f.name for f in args.infile]
output_files = [f.name for f in args.outfile]
else:
print('\nError: The number of input and output '
'files does not match.')
return 0
else:
input_files = [f.name for f in args.infile]
output_files = [f'{f.split(".")[0]}.vtk' for f in input_files]
convert_files(input_files, output_files)
if __name__ == "__main__":
main()
| 31.825
| 76
| 0.600157
|
import argparse
import discretisedfield as df
def convert_files(input_files, output_files):
for input_file, output_file in zip(input_files, output_files):
field = df.Field.fromfile(input_file)
field.write(output_file)
def main():
parser = argparse.ArgumentParser(
prog='ovf2vtk',
description='ovf2vtk - ovf to VTK format conversion'
)
parser.add_argument('--infile', type=argparse.FileType('r'),
help='One or more input files', nargs='+',
required=True)
parser.add_argument('--outfile', type=argparse.FileType('w'), nargs='+',
help='One or more output files, optional')
args = parser.parse_args()
if args.outfile:
if len(args.infile) == len(args.outfile):
input_files = [f.name for f in args.infile]
output_files = [f.name for f in args.outfile]
else:
print('\nError: The number of input and output '
'files does not match.')
return 0
else:
input_files = [f.name for f in args.infile]
output_files = [f'{f.split(".")[0]}.vtk' for f in input_files]
convert_files(input_files, output_files)
if __name__ == "__main__":
main()
| true
| true
|
f70965108055debeb1faafd0a789bccc53cc61ee
| 3,586
|
py
|
Python
|
wallstreet_cli/main.py
|
kuuurt/wallstreet_cli
|
ce4f48b23c77a2955842685d9688fee6f45ba64f
|
[
"MIT"
] | null | null | null |
wallstreet_cli/main.py
|
kuuurt/wallstreet_cli
|
ce4f48b23c77a2955842685d9688fee6f45ba64f
|
[
"MIT"
] | null | null | null |
wallstreet_cli/main.py
|
kuuurt/wallstreet_cli
|
ce4f48b23c77a2955842685d9688fee6f45ba64f
|
[
"MIT"
] | null | null | null |
import os
import argparse
# import json
from wallstreet import Stock
from wallstreet_cli import xetra
from forex_python.converter import CurrencyRates
LOCAL_DB_PATH = os.path.join(os.path.dirname(__file__), "data", "db.txt")
def _currency_conversion(source_v: float, source_currency: str, target_currency: str):
"""Convert source currency to target currency
Args:
source_v (float):
source_currency (str): designation of source currency
target_currency (str): [description]
"""
c = CurrencyRates()
return c.convert(source_currency, target_currency, source_v)
def _get_stock_price(stock_name: str):
try:
return xetra.pipeline([stock_name])
except IndexError:
print("Ticker not found!")
return None
# TODO (easy): handle other exceptions. try using "APPL" as arguement for --stock,
# unknown error occured
def _get_all_fav_stock_prices(show_command):
xetra.pipeline(_get_fav_tickers(), show_command)
# for stock in _get_fav_tickers():
# xetra.get_stock_from_dataset(stock, csv_list)
# show_stock(stock)
def _find_ticker(company_name):
"""give the company_name, finds the ticker name"""
# TODO (harder) having a function to search for tickers by just giving a company name
# probably need to make an api request to some search engine
pass
def show_stock(stock_name: str):
"""show stock price of certain stock
Args:
stock_name (str): [description]
"""
# TODO (easy): take currency as arguement and show stock prices in different currencies
price_in_usd = _get_stock_price(stock_name)
if not price_in_usd:
return
price_in_eur = _currency_conversion(price_in_usd, "USD", "EUR")
print(f"{stock_name}: {round(price_in_eur, 2)} EUR")
def _append_fav_ticker(l_of_tickers: list, db_path: str=LOCAL_DB_PATH):
"""append a list of tickers to a json file
Args:
l_of_tickers (list): list of tickers to add to favorites
db_path (str, optional): path to store the fav file. Defaults to LOCAL_DB_PATH.
"""
# create the folder if not yet initialized
if not os.path.exists(db_path):
os.makedirs(os.path.dirname(db_path), exist_ok=True)
# read the json file from local path
# update the list
l_of_tickers = l_of_tickers + _get_fav_tickers()
file = open(db_path, "w")
file.write("{}".format(l_of_tickers))
file.close()
def _get_fav_tickers(db_path: str=LOCAL_DB_PATH):
"""read from the local json file, get all fav tickers
Returns a list of strings
"""
# return list of tickers from file
if not os.path.exists(db_path):
return []
file = open(db_path, "r")
content = file.read()
file.close()
output = content.strip("][").replace("'", "").split(", ")
return output
def main():
## TODO clean tmp files
parser = argparse.ArgumentParser(description="cli for wallstreet")
parser.add_argument("--stock", help="show stock price of ticker")
parser.add_argument("--currency", default="EUR", help="currency")
parser.add_argument("-s", default=False, action="store_true")
parser.add_argument("--add_fav", default=None, help="show stock price of ticker")
parser.add_argument("--show_fav", default=False, action="store_true")
args = parser.parse_args()
if args.stock:
show_stock(args.stock)
elif args.show_fav:
_get_all_fav_stock_prices(args.s)
elif args.add_fav:
_append_fav_ticker(args.add_fav.split(","))
if __name__ == "__main__":
main()
| 32.017857
| 91
| 0.687395
|
import os
import argparse
from wallstreet import Stock
from wallstreet_cli import xetra
from forex_python.converter import CurrencyRates
LOCAL_DB_PATH = os.path.join(os.path.dirname(__file__), "data", "db.txt")
def _currency_conversion(source_v: float, source_currency: str, target_currency: str):
c = CurrencyRates()
return c.convert(source_currency, target_currency, source_v)
def _get_stock_price(stock_name: str):
try:
return xetra.pipeline([stock_name])
except IndexError:
print("Ticker not found!")
return None
def _get_all_fav_stock_prices(show_command):
xetra.pipeline(_get_fav_tickers(), show_command)
def _find_ticker(company_name):
pass
def show_stock(stock_name: str):
price_in_usd = _get_stock_price(stock_name)
if not price_in_usd:
return
price_in_eur = _currency_conversion(price_in_usd, "USD", "EUR")
print(f"{stock_name}: {round(price_in_eur, 2)} EUR")
def _append_fav_ticker(l_of_tickers: list, db_path: str=LOCAL_DB_PATH):
if not os.path.exists(db_path):
os.makedirs(os.path.dirname(db_path), exist_ok=True)
l_of_tickers = l_of_tickers + _get_fav_tickers()
file = open(db_path, "w")
file.write("{}".format(l_of_tickers))
file.close()
def _get_fav_tickers(db_path: str=LOCAL_DB_PATH):
if not os.path.exists(db_path):
return []
file = open(db_path, "r")
content = file.read()
file.close()
output = content.strip("][").replace("'", "").split(", ")
return output
def main():
## TODO clean tmp files
parser = argparse.ArgumentParser(description="cli for wallstreet")
parser.add_argument("--stock", help="show stock price of ticker")
parser.add_argument("--currency", default="EUR", help="currency")
parser.add_argument("-s", default=False, action="store_true")
parser.add_argument("--add_fav", default=None, help="show stock price of ticker")
parser.add_argument("--show_fav", default=False, action="store_true")
args = parser.parse_args()
if args.stock:
show_stock(args.stock)
elif args.show_fav:
_get_all_fav_stock_prices(args.s)
elif args.add_fav:
_append_fav_ticker(args.add_fav.split(","))
if __name__ == "__main__":
main()
| true
| true
|
f7096668b0d05bfcb253d440a0e8ab2c6d1bcb9f
| 15,440
|
py
|
Python
|
torch_ecg/models/cnn/multi_scopic.py
|
DeepPSP/torch_ecg
|
6db5ffb063d0e8fb4ce97029a0d184a658f43a37
|
[
"MIT"
] | 9
|
2021-06-26T03:00:55.000Z
|
2022-03-03T13:43:00.000Z
|
torch_ecg/models/cnn/multi_scopic.py
|
DeepPSP/torch_ecg
|
6db5ffb063d0e8fb4ce97029a0d184a658f43a37
|
[
"MIT"
] | 1
|
2021-10-01T09:29:30.000Z
|
2021-10-02T03:41:55.000Z
|
torch_ecg/models/cnn/multi_scopic.py
|
DeepPSP/torch_ecg
|
6db5ffb063d0e8fb4ce97029a0d184a658f43a37
|
[
"MIT"
] | 2
|
2021-05-25T14:52:03.000Z
|
2021-12-29T15:09:08.000Z
|
"""
The core part of the SOTA model of CPSC2019,
branched, and has different scope (in terms of dilation) in each branch
"""
from copy import deepcopy
from itertools import repeat
from collections import OrderedDict
from typing import Union, Optional, Sequence, NoReturn
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import torch
from torch import nn
from torch import Tensor
from ...cfg import CFG, DEFAULTS
from ...utils.utils_nn import compute_module_size, SizeMixin
from ...utils.misc import dict_to_str
from ...models._nets import (
Conv_Bn_Activation,
DownSample,
NonLocalBlock, SEBlock, GlobalContextBlock,
)
if DEFAULTS.torch_dtype == torch.float64:
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"MultiScopicCNN",
"MultiScopicBasicBlock",
"MultiScopicBranch",
]
class MultiScopicBasicBlock(SizeMixin, nn.Sequential):
""" finished, checked,
basic building block of the CNN part of the SOTA model
from CPSC2019 challenge (entry 0416)
(conv -> activation) * N --> bn --> down_sample
"""
__DEBUG__ = False
__name__ = "MultiScopicBasicBlock"
def __init__(self,
in_channels:int,
scopes:Sequence[int],
num_filters:Union[int,Sequence[int]],
filter_lengths:Union[int,Sequence[int]],
subsample_length:int,
groups:int=1,
**config) -> NoReturn:
""" finished, checked,
Parameters
----------
in_channels: int,
number of channels in the input
scopes: sequence of int,
scopes of the convolutional layers, via `dilation`
num_filters: int or sequence of int,
number of filters of the convolutional layer(s)
filter_lengths: int or sequence of int,
filter length(s) (kernel size(s)) of the convolutional layer(s)
subsample_length: int,
subsample length (ratio) at the last layer of the block
"""
super().__init__()
self.__in_channels = in_channels
self.__scopes = scopes
self.__num_convs = len(self.__scopes)
if isinstance(num_filters, int):
self.__out_channels = list(repeat(num_filters, self.__num_convs))
else:
self.__out_channels = num_filters
assert len(self.__out_channels) == self.__num_convs, \
f"`scopes` indicates {self.__num_convs} convolutional layers, while `num_filters` indicates {len(self.__out_channels)}"
if isinstance(filter_lengths, int):
self.__filter_lengths = list(repeat(filter_lengths, self.__num_convs))
else:
self.__filter_lengths = filter_lengths
assert len(self.__filter_lengths) == self.__num_convs, \
f"`scopes` indicates {self.__num_convs} convolutional layers, while `filter_lengths` indicates {len(self.__filter_lengths)}"
self.__subsample_length = subsample_length
self.__groups = groups
self.config = CFG(deepcopy(config))
conv_in_channels = self.__in_channels
for idx in range(self.__num_convs):
self.add_module(
f"ca_{idx}",
Conv_Bn_Activation(
in_channels=conv_in_channels,
out_channels=self.__out_channels[idx],
kernel_size=self.__filter_lengths[idx],
stride=1,
dilation=self.__scopes[idx],
groups=self.__groups,
batch_norm=self.config.batch_norm,
# kw_bn=self.config.kw_bn,
activation=self.config.activation,
kw_activation=self.config.kw_activation,
kernel_initializer=self.config.kernel_initializer,
kw_initializer=self.config.kw_initializer,
bias=self.config.bias,
)
)
conv_in_channels = self.__out_channels[idx]
self.add_module(
"bn",
nn.BatchNorm1d(self.__out_channels[-1])
)
self.add_module(
"down",
DownSample(
down_scale=self.__subsample_length,
in_channels=self.__out_channels[-1],
groups=self.__groups,
# padding=
batch_norm=False,
mode=self.config.subsample_mode,
)
)
if self.config.dropout > 0:
self.add_module(
"dropout",
nn.Dropout(self.config.dropout, inplace=False)
)
def forward(self, input:Tensor) -> Tensor:
""" finished, checked,
Parameters
----------
input: Tensor,
of shape (batch_size, n_channels, seq_len)
Returns
-------
output: Tensor,
of shape (batch_size, n_channels, seq_len)
"""
output = super().forward(input)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
""" finished, checked,
Parameters
----------
seq_len: int,
length of the 1d sequence
batch_size: int, optional,
the batch size, can be None
Returns
-------
output_shape: sequence,
the output shape of this block, given `seq_len` and `batch_size`
"""
_seq_len = seq_len
for idx, module in enumerate(self):
if idx == self.__num_convs: # bn layer
continue
elif self.config.dropout > 0 and idx == len(self)-1: # dropout layer
continue
output_shape = module.compute_output_shape(_seq_len, batch_size)
_, _, _seq_len = output_shape
return output_shape
class MultiScopicBranch(SizeMixin, nn.Sequential):
""" finished, checked,
branch path of the CNN part of the SOTA model
from CPSC2019 challenge (entry 0416)
"""
__DEBUG__ = False
__name__ = "MultiScopicBranch"
def __init__(self,
in_channels:int,
scopes:Sequence[Sequence[int]],
num_filters:Union[Sequence[int],Sequence[Sequence[int]]],
filter_lengths:Union[Sequence[int],Sequence[Sequence[int]]],
subsample_lengths:Union[int,Sequence[int]],
groups:int=1,
**config) -> NoReturn:
""" finished, checked,
Parameters
----------
in_channels: int,
number of features (channels) of the input
scopes: sequence of sequences of int,
scopes (in terms of `dilation`) for the convolutional layers,
each sequence of int is for one branch
num_filters: sequence of int, or sequence of sequences of int,
number of filters for the convolutional layers,
if is sequence of int,
then convolutionaly layers in one branch will have the same number of filters
filter_lengths: sequence of int, or sequence of sequences of int,
filter length (kernel size) of the convolutional layers,
if is sequence of int,
then convolutionaly layers in one branch will have the same filter length
subsample_lengths: int, or sequence of int,
subsample length (stride) of the convolutional layers,
if is sequence of int,
then convolutionaly layers in one branch will have the same subsample length
groups: int, default 1,
connection pattern (of channels) of the inputs and outputs
config: dict,
other hyper-parameters, including
dropout, activation choices, weight initializer, etc.
"""
super().__init__()
self.__in_channels = in_channels
self.__scopes = scopes
self.__num_blocks = len(self.__scopes)
self.__num_filters = num_filters
assert len(self.__num_filters) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `num_filters` indicates {len(self.__num_filters)}"
self.__filter_lengths = filter_lengths
assert len(self.__filter_lengths) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `filter_lengths` indicates {len(self.__filter_lengths)}"
if isinstance(subsample_lengths, int):
self.__subsample_lengths = list(repeat(subsample_lengths, self.__num_blocks))
else:
self.__subsample_lengths = filter_lengths
assert len(self.__subsample_lengths) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `subsample_lengths` indicates {len(self.__subsample_lengths)}"
self.__groups = groups
self.config = CFG(deepcopy(config))
block_in_channels = self.__in_channels
for idx in range(self.__num_blocks):
self.add_module(
f"block_{idx}",
MultiScopicBasicBlock(
in_channels=block_in_channels,
scopes=self.__scopes[idx],
num_filters=self.__num_filters[idx],
filter_lengths=self.__filter_lengths[idx],
subsample_length=self.__subsample_lengths[idx],
groups=self.__groups,
dropout=self.config.dropouts[idx],
**(self.config.block)
)
)
block_in_channels = self.__num_filters[idx]
def forward(self, input:Tensor) -> Tensor:
""" finished, checked,
Parameters
----------
input: Tensor,
of shape (batch_size, n_channels, seq_len)
Returns
-------
output: Tensor,
of shape (batch_size, n_channels, seq_len)
"""
output = super().forward(input)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
""" finished, checked,
Parameters
----------
seq_len: int,
length of the 1d sequence
batch_size: int, optional,
the batch size, can be None
Returns
-------
output_shape: sequence,
the output shape of this block, given `seq_len` and `batch_size`
"""
_seq_len = seq_len
for idx, module in enumerate(self):
output_shape = module.compute_output_shape(_seq_len, batch_size)
_, _, _seq_len = output_shape
return output_shape
class MultiScopicCNN(SizeMixin, nn.Module):
""" finished, checked,
CNN part of the SOTA model from CPSC2019 challenge (entry 0416)
"""
__DEBUG__ = False
__name__ = "MultiScopicCNN"
def __init__(self, in_channels:int, **config) -> NoReturn:
""" finished, checked,
Parameters
----------
in_channels: int,
number of channels in the input
config: dict,
other hyper-parameters of the Module, ref. corresponding config file
key word arguments that have to be set:
scopes: sequence of sequences of sequences of int,
scopes (in terms of dilation) of each convolution
num_filters: sequence of sequences (of int or of sequences of int),
number of filters of the convolutional layers,
with granularity to each block of each branch,
or to each convolution of each block of each branch
filter_lengths: sequence of sequences (of int or of sequences of int),
filter length(s) (kernel size(s)) of the convolutions,
with granularity to each block of each branch,
or to each convolution of each block of each branch
subsample_lengths: sequence of int or sequence of sequences of int,
subsampling length(s) (ratio(s)) of all blocks,
with granularity to each branch or to each block of each branch,
each subsamples after the last convolution of each block
dropouts: sequence of int or sequence of sequences of int,
dropout rates of all blocks,
with granularity to each branch or to each block of each branch,
each dropouts at the last of each block
groups: int,
connection pattern (of channels) of the inputs and outputs
block: dict,
other parameters that can be set for the building blocks
for a full list of configurable parameters, ref. corr. config file
"""
super().__init__()
self.__in_channels = in_channels
self.config = CFG(deepcopy(config))
self.__scopes = self.config.scopes
self.__num_branches = len(self.__scopes)
if self.__DEBUG__:
print(f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}")
self.branches = nn.ModuleDict()
for idx in range(self.__num_branches):
self.branches[f"branch_{idx}"] = \
MultiScopicBranch(
in_channels=self.__in_channels,
scopes=self.__scopes[idx],
num_filters=self.config.num_filters[idx],
filter_lengths=self.config.filter_lengths[idx],
subsample_lengths=self.config.subsample_lengths[idx],
groups=self.config.groups,
dropouts=self.config.dropouts[idx],
block=self.config.block, # a dict
)
def forward(self, input:Tensor) -> Tensor:
""" finished, checked,
Parameters
----------
input: Tensor,
of shape (batch_size, n_channels, seq_len)
Returns
-------
output: Tensor,
of shape (batch_size, n_channels, seq_len)
"""
branch_out = OrderedDict()
for idx in range(self.__num_branches):
key = f"branch_{idx}"
branch_out[key] = self.branches[key].forward(input)
output = torch.cat(
[branch_out[f"branch_{idx}"] for idx in range(self.__num_branches)],
dim=1, # along channels
)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
""" finished, checked,
Parameters
----------
seq_len: int,
length of the 1d sequence
batch_size: int, optional,
the batch size, can be None
Returns
-------
output_shape: sequence,
the output shape of this block, given `seq_len` and `batch_size`
"""
out_channels = 0
for idx in range(self.__num_branches):
key = f"branch_{idx}"
_, _branch_oc, _seq_len = \
self.branches[key].compute_output_shape(seq_len, batch_size)
out_channels += _branch_oc
output_shape = (batch_size, out_channels, _seq_len)
return output_shape
| 38.217822
| 147
| 0.592034
|
from copy import deepcopy
from itertools import repeat
from collections import OrderedDict
from typing import Union, Optional, Sequence, NoReturn
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import torch
from torch import nn
from torch import Tensor
from ...cfg import CFG, DEFAULTS
from ...utils.utils_nn import compute_module_size, SizeMixin
from ...utils.misc import dict_to_str
from ...models._nets import (
Conv_Bn_Activation,
DownSample,
NonLocalBlock, SEBlock, GlobalContextBlock,
)
if DEFAULTS.torch_dtype == torch.float64:
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"MultiScopicCNN",
"MultiScopicBasicBlock",
"MultiScopicBranch",
]
class MultiScopicBasicBlock(SizeMixin, nn.Sequential):
__DEBUG__ = False
__name__ = "MultiScopicBasicBlock"
def __init__(self,
in_channels:int,
scopes:Sequence[int],
num_filters:Union[int,Sequence[int]],
filter_lengths:Union[int,Sequence[int]],
subsample_length:int,
groups:int=1,
**config) -> NoReturn:
super().__init__()
self.__in_channels = in_channels
self.__scopes = scopes
self.__num_convs = len(self.__scopes)
if isinstance(num_filters, int):
self.__out_channels = list(repeat(num_filters, self.__num_convs))
else:
self.__out_channels = num_filters
assert len(self.__out_channels) == self.__num_convs, \
f"`scopes` indicates {self.__num_convs} convolutional layers, while `num_filters` indicates {len(self.__out_channels)}"
if isinstance(filter_lengths, int):
self.__filter_lengths = list(repeat(filter_lengths, self.__num_convs))
else:
self.__filter_lengths = filter_lengths
assert len(self.__filter_lengths) == self.__num_convs, \
f"`scopes` indicates {self.__num_convs} convolutional layers, while `filter_lengths` indicates {len(self.__filter_lengths)}"
self.__subsample_length = subsample_length
self.__groups = groups
self.config = CFG(deepcopy(config))
conv_in_channels = self.__in_channels
for idx in range(self.__num_convs):
self.add_module(
f"ca_{idx}",
Conv_Bn_Activation(
in_channels=conv_in_channels,
out_channels=self.__out_channels[idx],
kernel_size=self.__filter_lengths[idx],
stride=1,
dilation=self.__scopes[idx],
groups=self.__groups,
batch_norm=self.config.batch_norm,
activation=self.config.activation,
kw_activation=self.config.kw_activation,
kernel_initializer=self.config.kernel_initializer,
kw_initializer=self.config.kw_initializer,
bias=self.config.bias,
)
)
conv_in_channels = self.__out_channels[idx]
self.add_module(
"bn",
nn.BatchNorm1d(self.__out_channels[-1])
)
self.add_module(
"down",
DownSample(
down_scale=self.__subsample_length,
in_channels=self.__out_channels[-1],
groups=self.__groups,
batch_norm=False,
mode=self.config.subsample_mode,
)
)
if self.config.dropout > 0:
self.add_module(
"dropout",
nn.Dropout(self.config.dropout, inplace=False)
)
def forward(self, input:Tensor) -> Tensor:
output = super().forward(input)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
_seq_len = seq_len
for idx, module in enumerate(self):
if idx == self.__num_convs: continue
elif self.config.dropout > 0 and idx == len(self)-1: continue
output_shape = module.compute_output_shape(_seq_len, batch_size)
_, _, _seq_len = output_shape
return output_shape
class MultiScopicBranch(SizeMixin, nn.Sequential):
__DEBUG__ = False
__name__ = "MultiScopicBranch"
def __init__(self,
in_channels:int,
scopes:Sequence[Sequence[int]],
num_filters:Union[Sequence[int],Sequence[Sequence[int]]],
filter_lengths:Union[Sequence[int],Sequence[Sequence[int]]],
subsample_lengths:Union[int,Sequence[int]],
groups:int=1,
**config) -> NoReturn:
super().__init__()
self.__in_channels = in_channels
self.__scopes = scopes
self.__num_blocks = len(self.__scopes)
self.__num_filters = num_filters
assert len(self.__num_filters) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `num_filters` indicates {len(self.__num_filters)}"
self.__filter_lengths = filter_lengths
assert len(self.__filter_lengths) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `filter_lengths` indicates {len(self.__filter_lengths)}"
if isinstance(subsample_lengths, int):
self.__subsample_lengths = list(repeat(subsample_lengths, self.__num_blocks))
else:
self.__subsample_lengths = filter_lengths
assert len(self.__subsample_lengths) == self.__num_blocks, \
f"`scopes` indicates {self.__num_blocks} `MultiScopicBasicBlock`s, while `subsample_lengths` indicates {len(self.__subsample_lengths)}"
self.__groups = groups
self.config = CFG(deepcopy(config))
block_in_channels = self.__in_channels
for idx in range(self.__num_blocks):
self.add_module(
f"block_{idx}",
MultiScopicBasicBlock(
in_channels=block_in_channels,
scopes=self.__scopes[idx],
num_filters=self.__num_filters[idx],
filter_lengths=self.__filter_lengths[idx],
subsample_length=self.__subsample_lengths[idx],
groups=self.__groups,
dropout=self.config.dropouts[idx],
**(self.config.block)
)
)
block_in_channels = self.__num_filters[idx]
def forward(self, input:Tensor) -> Tensor:
output = super().forward(input)
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
_seq_len = seq_len
for idx, module in enumerate(self):
output_shape = module.compute_output_shape(_seq_len, batch_size)
_, _, _seq_len = output_shape
return output_shape
class MultiScopicCNN(SizeMixin, nn.Module):
__DEBUG__ = False
__name__ = "MultiScopicCNN"
def __init__(self, in_channels:int, **config) -> NoReturn:
super().__init__()
self.__in_channels = in_channels
self.config = CFG(deepcopy(config))
self.__scopes = self.config.scopes
self.__num_branches = len(self.__scopes)
if self.__DEBUG__:
print(f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}")
self.branches = nn.ModuleDict()
for idx in range(self.__num_branches):
self.branches[f"branch_{idx}"] = \
MultiScopicBranch(
in_channels=self.__in_channels,
scopes=self.__scopes[idx],
num_filters=self.config.num_filters[idx],
filter_lengths=self.config.filter_lengths[idx],
subsample_lengths=self.config.subsample_lengths[idx],
groups=self.config.groups,
dropouts=self.config.dropouts[idx],
block=self.config.block, )
def forward(self, input:Tensor) -> Tensor:
branch_out = OrderedDict()
for idx in range(self.__num_branches):
key = f"branch_{idx}"
branch_out[key] = self.branches[key].forward(input)
output = torch.cat(
[branch_out[f"branch_{idx}"] for idx in range(self.__num_branches)],
dim=1, )
return output
def compute_output_shape(self, seq_len:Optional[int]=None, batch_size:Optional[int]=None) -> Sequence[Union[int, None]]:
out_channels = 0
for idx in range(self.__num_branches):
key = f"branch_{idx}"
_, _branch_oc, _seq_len = \
self.branches[key].compute_output_shape(seq_len, batch_size)
out_channels += _branch_oc
output_shape = (batch_size, out_channels, _seq_len)
return output_shape
| true
| true
|
f70966e3c9f9dccb779b1817d27df03b62c5153e
| 5,073
|
py
|
Python
|
tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py
|
My-Technical-Architect/tensorflow
|
35cf4653e6fe15953e2e565afc5a0fd2ab4d5290
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py
|
My-Technical-Architect/tensorflow
|
35cf4653e6fe15953e2e565afc5a0fd2ab4d5290
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py
|
My-Technical-Architect/tensorflow
|
35cf4653e6fe15953e2e565afc5a0fd2ab4d5290
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import tensor_util
dists = tf.contrib.distributions
class DistributionTest(tf.test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
dists.Normal,
dists.Bernoulli,
dists.Beta,
dists.Chi2,
dists.Exponential,
dists.Gamma,
dists.InverseGamma,
dists.Laplace,
dists.StudentT,
dists.Uniform]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, tf.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape, tf.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
tf.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = dists.Normal(mu=1., sigma=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = dists.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = dists.Normal(mu=1., sigma=2., validate_args=True)
normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(base_params.pop("validate_args"),
copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = dists.Normal(mu, sigma,
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch))
normal = dists.Normal([mu], [sigma],
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([mu], [sigma],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = tf.placeholder(dtype=tf.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
x = tf.placeholder(dtype=tf.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
# Test case 3.
x = tf.placeholder(dtype=tf.int32)
is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
if __name__ == '__main__':
tf.test.main()
| 40.91129
| 80
| 0.650897
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import tensor_util
dists = tf.contrib.distributions
class DistributionTest(tf.test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
dists.Normal,
dists.Bernoulli,
dists.Beta,
dists.Chi2,
dists.Exponential,
dists.Gamma,
dists.InverseGamma,
dists.Laplace,
dists.StudentT,
dists.Uniform]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, tf.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape, tf.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
tf.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
normal = dists.Normal(mu=1., sigma=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = dists.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = dists.Normal(mu=1., sigma=2., validate_args=True)
normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(base_params.pop("validate_args"),
copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = dists.Normal(mu, sigma,
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch))
normal = dists.Normal([mu], [sigma],
validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([mu], [sigma],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch))
mvn = dists.MultivariateNormalDiag([[mu]], [[sigma]],
validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch))
x = tf.placeholder(dtype=tf.int32, shape=[])
self.assertTrue(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertTrue(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
x = tf.placeholder(dtype=tf.int32, shape=[1])
self.assertFalse(normal._is_scalar_helper(x.get_shape, lambda: None))
self.assertFalse(normal._is_scalar_helper(lambda: tf.TensorShape(None),
lambda: tf.shape(x)))
x = tf.placeholder(dtype=tf.int32)
is_scalar = normal._is_scalar_helper(x.get_shape, lambda: tf.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f7096801f14d2d26bd55f170d586739f77960dfb
| 618
|
py
|
Python
|
chat/migrations/0001_initial.py
|
bionikspoon/django_channels_chat
|
b756bb6a8a361ef190ea70120f70c7b420559d74
|
[
"MIT"
] | null | null | null |
chat/migrations/0001_initial.py
|
bionikspoon/django_channels_chat
|
b756bb6a8a361ef190ea70120f70c7b420559d74
|
[
"MIT"
] | null | null | null |
chat/migrations/0001_initial.py
|
bionikspoon/django_channels_chat
|
b756bb6a8a361ef190ea70120f70c7b420559d74
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-25 23:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChatMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room', models.CharField(max_length=64)),
('message', models.CharField(max_length=1024)),
],
),
]
| 24.72
| 114
| 0.590615
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChatMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room', models.CharField(max_length=64)),
('message', models.CharField(max_length=1024)),
],
),
]
| true
| true
|
f70969aa61bc9bce48678cb70237665201eecd01
| 2,621
|
py
|
Python
|
homeassistant/components/bbox/device_tracker.py
|
zalke/home-assistant
|
a31e49c857722c0723dc5297cd83cbce0f8716f6
|
[
"Apache-2.0"
] | 4
|
2019-07-03T22:36:57.000Z
|
2019-08-10T15:33:25.000Z
|
homeassistant/components/bbox/device_tracker.py
|
zalke/home-assistant
|
a31e49c857722c0723dc5297cd83cbce0f8716f6
|
[
"Apache-2.0"
] | 7
|
2019-08-23T05:26:02.000Z
|
2022-03-11T23:57:18.000Z
|
homeassistant/components/bbox/device_tracker.py
|
zalke/home-assistant
|
a31e49c857722c0723dc5297cd83cbce0f8716f6
|
[
"Apache-2.0"
] | 3
|
2019-04-28T16:35:45.000Z
|
2020-05-28T15:21:59.000Z
|
"""Support for French FAI Bouygues Bbox routers."""
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = '192.168.1.254'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
})
def get_scanner(hass, config):
"""Validate the configuration and return a Bbox scanner."""
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple('Device', ['mac', 'name', 'ip', 'last_update'])
class BboxDeviceScanner(DeviceScanner):
"""This class scans for devices connected to the bbox."""
def __init__(self, config):
"""Get host from config."""
from typing import List # noqa: pylint: disable=unused-import
self.host = config[CONF_HOST]
"""Initialize the scanner."""
self.last_results = [] # type: List[Device]
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [result.name for result in self.last_results if
result.mac == device]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Check the Bbox for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning...")
import pybbox
box = pybbox.Bbox(ip=self.host)
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device['active'] != 1:
continue
last_results.append(
Device(device['macaddress'], device['hostname'],
device['ipaddress'], now))
self.last_results = last_results
_LOGGER.info("Scan successful")
return True
| 28.48913
| 75
| 0.660053
|
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = '192.168.1.254'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
})
def get_scanner(hass, config):
scanner = BboxDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple('Device', ['mac', 'name', 'ip', 'last_update'])
class BboxDeviceScanner(DeviceScanner):
def __init__(self, config):
from typing import List
self.host = config[CONF_HOST]
self.last_results = []
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
filter_named = [result.name for result in self.last_results if
result.mac == device]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
_LOGGER.info("Scanning...")
import pybbox
box = pybbox.Bbox(ip=self.host)
result = box.get_all_connected_devices()
now = dt_util.now()
last_results = []
for device in result:
if device['active'] != 1:
continue
last_results.append(
Device(device['macaddress'], device['hostname'],
device['ipaddress'], now))
self.last_results = last_results
_LOGGER.info("Scan successful")
return True
| true
| true
|
f7096a6d4471f4bf32467c0baac5db6cb57c25b5
| 1,353
|
py
|
Python
|
data_structures/sorting_algos/mergesort/test_mergesort.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/sorting_algos/mergesort/test_mergesort.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/sorting_algos/mergesort/test_mergesort.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
import pytest
from mergesort import mergesort, merge
def test_empty_list_returns_empty_list():
"""Test mergesort on empty list returns same."""
empty = []
assert mergesort(empty) == []
def test_list_with_one_value():
"""Test mergesort on empty list returns same."""
lst = [8]
assert mergesort(lst) == [8]
def test_list_with_two_values():
"""Test mergesort on empty list returns same."""
lst = [8, 3]
assert mergesort(lst) == [3, 8]
def test_list_with_odd_number_of_values():
"""Test odd number of values returns ordered list."""
lst = [8, 3, 7, 9, 5]
assert mergesort(lst) == [3, 5, 7, 8, 9]
def test_list_with_unbalanced_halves():
"""Test list heavy weighted on one half returns ordered list."""
lst = [2, 4, 3, 8, 1, 9, 10, 13]
assert mergesort(lst) == [1, 2, 3, 4, 8, 9, 10, 13]
def test_merge_merges_two_pairs():
"""Test merge function separate of mergesort."""
L = [1, 3, 5]
R = [2, 4, 6]
assert merge(L, R) == [1, 2, 3, 4, 5, 6]
def test_merge_merges_uneven_lists():
L = [1, 3, 5]
R = [2, 4]
assert merge(L, R) == [1, 2, 3, 4, 5]
def test_merge_on_unbalanced_lists():
"""Test list heavy weighted on one half returns ordered list."""
L = [2, 3, 4, 8]
R = [1, 9, 10, 13]
assert merge(L, R) == [1, 2, 3, 4, 8, 9, 10, 13]
| 23.736842
| 68
| 0.601626
|
import pytest
from mergesort import mergesort, merge
def test_empty_list_returns_empty_list():
empty = []
assert mergesort(empty) == []
def test_list_with_one_value():
lst = [8]
assert mergesort(lst) == [8]
def test_list_with_two_values():
lst = [8, 3]
assert mergesort(lst) == [3, 8]
def test_list_with_odd_number_of_values():
lst = [8, 3, 7, 9, 5]
assert mergesort(lst) == [3, 5, 7, 8, 9]
def test_list_with_unbalanced_halves():
lst = [2, 4, 3, 8, 1, 9, 10, 13]
assert mergesort(lst) == [1, 2, 3, 4, 8, 9, 10, 13]
def test_merge_merges_two_pairs():
L = [1, 3, 5]
R = [2, 4, 6]
assert merge(L, R) == [1, 2, 3, 4, 5, 6]
def test_merge_merges_uneven_lists():
L = [1, 3, 5]
R = [2, 4]
assert merge(L, R) == [1, 2, 3, 4, 5]
def test_merge_on_unbalanced_lists():
L = [2, 3, 4, 8]
R = [1, 9, 10, 13]
assert merge(L, R) == [1, 2, 3, 4, 8, 9, 10, 13]
| true
| true
|
f7096bdce2180e86cf1d57c069d0bf48eacda9db
| 2,648
|
py
|
Python
|
model/encoder.py
|
project-delphi/ACS-QG
|
03aa5b79030b5ba4c09a99363a58454743876592
|
[
"MIT"
] | 63
|
2020-02-11T06:40:17.000Z
|
2022-03-03T08:07:16.000Z
|
model/encoder.py
|
project-delphi/ACS-QG
|
03aa5b79030b5ba4c09a99363a58454743876592
|
[
"MIT"
] | 18
|
2020-02-11T06:30:50.000Z
|
2022-03-24T08:39:49.000Z
|
model/encoder.py
|
project-delphi/ACS-QG
|
03aa5b79030b5ba4c09a99363a58454743876592
|
[
"MIT"
] | 17
|
2020-02-24T14:29:16.000Z
|
2022-01-12T06:42:13.000Z
|
"""
Implement input sentence encoder.
"""
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .config import *
from common.constants import DEVICE
from util.tensor_utils import to_sorted_tensor, to_original_tensor
class Encoder(nn.Module):
"""
Transform embeddings to encoding representations.
"""
def __init__(self, config, input_size, dropout=0.1):
"""
Initialize a GRU encoder.
:param config: configuration, includes total enc size, is bi-direction, etc.
:param input_size: input dimension.
:param dropout: dropout rate for GRU
"""
super(Encoder, self).__init__()
self.config = config
self.layers = config.layers
self.num_directions = 2 if config.brnn else 1
assert config.enc_rnn_size % self.num_directions == 0
self.hidden_size = config.enc_rnn_size // self.num_directions
self.rnn = nn.GRU(
input_size, self.hidden_size,
num_layers=config.layers, dropout=config.dropout,
bidirectional=config.brnn, batch_first=True)
def forward(self, input_emb, lengths, hidden=None):
"""
Given input embeddings and input seq lengths, calculate encoding representations.
:param input_emb: embedding of a batch.
Input shape - [seq_len, batch_size, hidden_dim]
:param lengths: lengths of each sample.
:param hidden: hidden of previous layer. Default None.
:return: encoding of a batch.
Output shape - [unpadded_max_thisbatch_seq_len, batch_size, hidden_dim * num_layers]
TODO: revise code to make input and output shape be [batch, length, dim]
"""
# input_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]
# sorted_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]
sorted_input_emb, sorted_lengths, sorted_idx = to_sorted_tensor(
input_emb, lengths, sort_dim=1, device=DEVICE)
emb = pack(sorted_input_emb, sorted_lengths, batch_first=False)
self.rnn.flatten_parameters()
outputs, hidden_t = self.rnn(emb, hidden)
# hidden_t shape: [num_layers, batch_size, hidden_dim] [2, 32, 256]
# outputs shape: [unpadded_seq_len, batch_size, hidden_dim * num_layers] [79, 32, 512]
# !!! NOTICE: it will unpack to max_unpadded_length.
outputs = unpack(outputs, batch_first=False)[0]
outputs = to_original_tensor(
outputs, sorted_idx, sort_dim=1, device=DEVICE)
return hidden_t, outputs
| 44.133333
| 96
| 0.673338
|
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .config import *
from common.constants import DEVICE
from util.tensor_utils import to_sorted_tensor, to_original_tensor
class Encoder(nn.Module):
def __init__(self, config, input_size, dropout=0.1):
super(Encoder, self).__init__()
self.config = config
self.layers = config.layers
self.num_directions = 2 if config.brnn else 1
assert config.enc_rnn_size % self.num_directions == 0
self.hidden_size = config.enc_rnn_size // self.num_directions
self.rnn = nn.GRU(
input_size, self.hidden_size,
num_layers=config.layers, dropout=config.dropout,
bidirectional=config.brnn, batch_first=True)
def forward(self, input_emb, lengths, hidden=None):
sorted_input_emb, sorted_lengths, sorted_idx = to_sorted_tensor(
input_emb, lengths, sort_dim=1, device=DEVICE)
emb = pack(sorted_input_emb, sorted_lengths, batch_first=False)
self.rnn.flatten_parameters()
outputs, hidden_t = self.rnn(emb, hidden)
outputs = unpack(outputs, batch_first=False)[0]
outputs = to_original_tensor(
outputs, sorted_idx, sort_dim=1, device=DEVICE)
return hidden_t, outputs
| true
| true
|
f7096c3f584c384ca7d480068e8e6659333d3299
| 356
|
py
|
Python
|
backend/backend/utils.py
|
reidy-p/DublinBusPredictions
|
a6b1fc8a5c28500a3292883ea0dfcde1770d78d1
|
[
"MIT"
] | null | null | null |
backend/backend/utils.py
|
reidy-p/DublinBusPredictions
|
a6b1fc8a5c28500a3292883ea0dfcde1770d78d1
|
[
"MIT"
] | null | null | null |
backend/backend/utils.py
|
reidy-p/DublinBusPredictions
|
a6b1fc8a5c28500a3292883ea0dfcde1770d78d1
|
[
"MIT"
] | null | null | null |
from dublinbus.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
''' JWT response handler
Adds a new ‘user’ field with the user’s serialized data when a token is generated
'''
response = UserSerializer(user, context={'request': request}).data
response["token"] = token
return response
| 32.363636
| 85
| 0.727528
|
from dublinbus.serializers import UserSerializer
def my_jwt_response_handler(token, user=None, request=None):
response = UserSerializer(user, context={'request': request}).data
response["token"] = token
return response
| true
| true
|
f7096d295eeb57b3737ef0aea08fc63bc3d5f3d0
| 4,713
|
py
|
Python
|
python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | 1
|
2022-01-28T07:23:22.000Z
|
2022-01-28T07:23:22.000Z
|
python/paddle/fluid/tests/unittests/distribution/test_distribution_dirichlet_static.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import scipy.stats
from config import ATOL, DEVICES, RTOL
from parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand
paddle.enable_static()
@place(DEVICES)
@parameterize_cls((TEST_CASE_NAME, 'concentration'),
[('test-one-dim', np.random.rand(89) + 5.0)])
class TestDirichlet(unittest.TestCase):
def setUp(self):
self.program = paddle.static.Program()
self.executor = paddle.static.Executor()
with paddle.static.program_guard(self.program):
conc = paddle.static.data('conc', self.concentration.shape,
self.concentration.dtype)
self._paddle_diric = paddle.distribution.Dirichlet(conc)
self.feeds = {'conc': self.concentration}
def test_mean(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.mean])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.mean(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_variance(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.variance])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.var(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_prob(self):
with paddle.static.program_guard(self.program):
random_number = np.random.rand(*self.concentration.shape)
random_number = random_number / random_number.sum()
feeds = dict(self.feeds, value=random_number)
value = paddle.static.data('value', random_number.shape,
random_number.dtype)
out = self._paddle_diric.prob(value)
[out] = self.executor.run(self.program,
feed=feeds,
fetch_list=[out])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.pdf(random_number, self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_log_prob(self):
with paddle.static.program_guard(self.program):
random_number = np.random.rand(*self.concentration.shape)
random_number = random_number / random_number.sum()
feeds = dict(self.feeds, value=random_number)
value = paddle.static.data('value', random_number.shape,
random_number.dtype)
out = self._paddle_diric.log_prob(value)
[out] = self.executor.run(self.program,
feed=feeds,
fetch_list=[out])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.logpdf(random_number, self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_entropy(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(
self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.entropy()])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.entropy(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
| 44.046729
| 80
| 0.591767
|
import unittest
import numpy as np
import paddle
import scipy.stats
from config import ATOL, DEVICES, RTOL
from parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand
paddle.enable_static()
@place(DEVICES)
@parameterize_cls((TEST_CASE_NAME, 'concentration'),
[('test-one-dim', np.random.rand(89) + 5.0)])
class TestDirichlet(unittest.TestCase):
def setUp(self):
self.program = paddle.static.Program()
self.executor = paddle.static.Executor()
with paddle.static.program_guard(self.program):
conc = paddle.static.data('conc', self.concentration.shape,
self.concentration.dtype)
self._paddle_diric = paddle.distribution.Dirichlet(conc)
self.feeds = {'conc': self.concentration}
def test_mean(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.mean])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.mean(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_variance(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.variance])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.var(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_prob(self):
with paddle.static.program_guard(self.program):
random_number = np.random.rand(*self.concentration.shape)
random_number = random_number / random_number.sum()
feeds = dict(self.feeds, value=random_number)
value = paddle.static.data('value', random_number.shape,
random_number.dtype)
out = self._paddle_diric.prob(value)
[out] = self.executor.run(self.program,
feed=feeds,
fetch_list=[out])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.pdf(random_number, self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_log_prob(self):
with paddle.static.program_guard(self.program):
random_number = np.random.rand(*self.concentration.shape)
random_number = random_number / random_number.sum()
feeds = dict(self.feeds, value=random_number)
value = paddle.static.data('value', random_number.shape,
random_number.dtype)
out = self._paddle_diric.log_prob(value)
[out] = self.executor.run(self.program,
feed=feeds,
fetch_list=[out])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.logpdf(random_number, self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
def test_entropy(self):
with paddle.static.program_guard(self.program):
[out] = self.executor.run(
self.program,
feed=self.feeds,
fetch_list=[self._paddle_diric.entropy()])
np.testing.assert_allclose(
out,
scipy.stats.dirichlet.entropy(self.concentration),
rtol=RTOL.get(str(self.concentration.dtype)),
atol=ATOL.get(str(self.concentration.dtype)))
| true
| true
|
f7096ddd23613e7d847f5b1698c484bd8f0b6508
| 7,244
|
py
|
Python
|
src/datadog_api_client/v2/model/api_key_update_data.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v2/model/api_key_update_data.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v2/model/api_key_update_data.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v2.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v2.model.api_key_update_attributes import APIKeyUpdateAttributes
from datadog_api_client.v2.model.api_keys_type import APIKeysType
globals()["APIKeyUpdateAttributes"] = APIKeyUpdateAttributes
globals()["APIKeysType"] = APIKeysType
class APIKeyUpdateData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"attributes": (APIKeyUpdateAttributes,), # noqa: E501
"id": (str,), # noqa: E501
"type": (APIKeysType,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"attributes": "attributes", # noqa: E501
"id": "id", # noqa: E501
"type": "type", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, attributes, id, type, *args, **kwargs): # noqa: E501
"""APIKeyUpdateData - a model defined in OpenAPI
Args:
attributes (APIKeyUpdateAttributes):
id (str): ID of the API key.
type (APIKeysType):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attributes = attributes
self.id = id
self.type = type
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| 39.369565
| 108
| 0.587659
|
import re import sys
from datadog_api_client.v2.model_utils import ( ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v2.model.api_key_update_attributes import APIKeyUpdateAttributes
from datadog_api_client.v2.model.api_keys_type import APIKeysType
globals()["APIKeyUpdateAttributes"] = APIKeyUpdateAttributes
globals()["APIKeysType"] = APIKeysType
class APIKeyUpdateData(ModelNormal):
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
"attributes": (APIKeyUpdateAttributes,), "id": (str,), "type": (APIKeysType,), }
@cached_property
def discriminator():
return None
attribute_map = {
"attributes": "attributes", "id": "id", "type": "type", }
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, attributes, id, type, *args, **kwargs):
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attributes = attributes
self.id = id
self.type = type
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
continue
setattr(self, var_name, var_value)
| true
| true
|
f7096dfb774ef917c1fa89ce59bb06b35dcfd16b
| 13,101
|
py
|
Python
|
nmt/utils/iterator_utils_test.py
|
luckmoon/nmt
|
4f6a4acf8d8e086f9d894444a2877ac1f0856ad0
|
[
"Apache-2.0"
] | null | null | null |
nmt/utils/iterator_utils_test.py
|
luckmoon/nmt
|
4f6a4acf8d8e086f9d894444a2877ac1f0856ad0
|
[
"Apache-2.0"
] | null | null | null |
nmt/utils/iterator_utils_test.py
|
luckmoon/nmt
|
4f6a4acf8d8e086f9d894444a2877ac1f0856ad0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for iterator_utils.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from ..utils import iterator_utils
class IteratorUtilsTest(tf.test.TestCase):
def testGetIterator(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["f e a g", "c c a", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c", "a b", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 0, 3]], # c a eos -- eos is padding
source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 1, 2]], # sos b c
target_input_v)
self.assertAllEqual(
[[2, 2, 3], # c c eos
[1, 2, 3]], # b c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], # c c a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], # sos a b
target_input_v)
self.assertAllEqual(
[[0, 1, 3]], # a b eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithShard(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "f e a g", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a b", "c c", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
num_shards=2,
shard_index=1,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 0, 3]], # c a eos -- eos is padding
source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 1, 2]], # sos b c
target_input_v)
self.assertAllEqual(
[[2, 2, 3], # c c eos
[1, 2, 3]], # b c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithSkipCount(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c a", "c c a", "d", "f e a g"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["b c", "a b", "", "c c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
skip_count = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
skip_count=skip_count,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer, feed_dict={skip_count: 3})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
# Re-init iterator with skip_count=0.
sess.run(iterator.initializer, feed_dict={skip_count: 0})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 0, 3], # c a eos -- eos is padding
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([2, 3], src_len_v)
self.assertAllEqual(
[[4, 1, 2], # sos b c
[4, 2, 2]], # sos c c
target_input_v)
self.assertAllEqual(
[[1, 2, 3], # b c eos
[2, 2, 3]], # c c eos
target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], # c c a
source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], # sos a b
target_input_v)
self.assertAllEqual(
[[0, 1, 3]], # a b eos
target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetInferIterator(self):
src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "c a", "d", "f e a g"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_infer_iterator(
src_dataset=src_dataset,
src_vocab_table=src_vocab_table,
batch_size=batch_size,
eos=hparams.eos,
src_max_len=src_max_len)
table_initializer = tf.tables_initializer()
source = iterator.source
seq_len = iterator.source_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None], seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[2, 2, 0], # c c a
[2, 0, 3]], # c a eos
source_v)
self.assertAllEqual([3, 2], seq_len_v)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[-1, 3, 3], # "d" == unknown, eos eos
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
source_v)
self.assertAllEqual([1, 3], seq_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run((source, seq_len))
if __name__ == "__main__":
tf.test.main()
| 40.813084
| 81
| 0.558507
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from ..utils import iterator_utils
class IteratorUtilsTest(tf.test.TestCase):
def testGetIterator(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["f e a g", "c c a", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c", "a b", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], [2, 0, 3]], source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], [4, 1, 2]], target_input_v)
self.assertAllEqual(
[[2, 2, 3], [1, 2, 3]], target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], target_input_v)
self.assertAllEqual(
[[0, 1, 3]], target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithShard(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "f e a g", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a b", "c c", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
num_shards=2,
shard_index=1,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0], [2, 0, 3]], source_v)
self.assertAllEqual([3, 2], src_len_v)
self.assertAllEqual(
[[4, 2, 2], [4, 1, 2]], target_input_v)
self.assertAllEqual(
[[2, 2, 3], [1, 2, 3]], target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetIteratorWithSkipCount(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c a", "c c a", "d", "f e a g"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["b c", "a b", "", "c c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
skip_count = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
skip_count=skip_count,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
source = iterator.source
target_input = iterator.target_input
target_output = iterator.target_output
src_seq_len = iterator.source_sequence_length
tgt_seq_len = iterator.target_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None, None], target_input.shape.as_list())
self.assertEqual([None, None], target_output.shape.as_list())
self.assertEqual([None], src_seq_len.shape.as_list())
self.assertEqual([None], tgt_seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer, feed_dict={skip_count: 3})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[-1, -1, 0]], source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 2, 2]], target_input_v)
self.assertAllEqual(
[[2, 2, 3]], target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
sess.run(iterator.initializer, feed_dict={skip_count: 0})
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 0, 3], [-1, -1, 0]], source_v)
self.assertAllEqual([2, 3], src_len_v)
self.assertAllEqual(
[[4, 1, 2], [4, 2, 2]], target_input_v)
self.assertAllEqual(
[[1, 2, 3], [2, 2, 3]], target_output_v)
self.assertAllEqual([3, 3], tgt_len_v)
(source_v, src_len_v, target_input_v, target_output_v, tgt_len_v) = (
sess.run((source, src_seq_len, target_input, target_output,
tgt_seq_len)))
self.assertAllEqual(
[[2, 2, 0]], source_v)
self.assertAllEqual([3], src_len_v)
self.assertAllEqual(
[[4, 0, 1]], target_input_v)
self.assertAllEqual(
[[0, 1, 3]], target_output_v)
self.assertAllEqual([3], tgt_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run(source)
def testGetInferIterator(self):
src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "c a", "d", "f e a g"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
iterator = iterator_utils.get_infer_iterator(
src_dataset=src_dataset,
src_vocab_table=src_vocab_table,
batch_size=batch_size,
eos=hparams.eos,
src_max_len=src_max_len)
table_initializer = tf.tables_initializer()
source = iterator.source
seq_len = iterator.source_sequence_length
self.assertEqual([None, None], source.shape.as_list())
self.assertEqual([None], seq_len.shape.as_list())
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[2, 2, 0], [2, 0, 3]], source_v)
self.assertAllEqual([3, 2], seq_len_v)
(source_v, seq_len_v) = sess.run((source, seq_len))
self.assertAllEqual(
[[-1, 3, 3], [-1, -1, 0]], source_v)
self.assertAllEqual([1, 3], seq_len_v)
with self.assertRaisesOpError("End of sequence"):
sess.run((source, seq_len))
if __name__ == "__main__":
tf.test.main()
| true
| true
|
f7096e12c17cddb1193c96063117037bd1a0d6c7
| 1,392
|
py
|
Python
|
misc/graph.py
|
OlegSomov/light-motion-analysis
|
4f510250aaa32929a6ccff3c796b53151addb9e9
|
[
"MIT"
] | null | null | null |
misc/graph.py
|
OlegSomov/light-motion-analysis
|
4f510250aaa32929a6ccff3c796b53151addb9e9
|
[
"MIT"
] | null | null | null |
misc/graph.py
|
OlegSomov/light-motion-analysis
|
4f510250aaa32929a6ccff3c796b53151addb9e9
|
[
"MIT"
] | null | null | null |
import os
import matplotlib
import json
from datetime import datetime
from matplotlib import pyplot
def show_results_graph(timer, name=None):
with (open('light_plot.json', 'r')) as f:
data = json.load(f)
with (open('light_plot_imporved.json', 'r')) as f:
data_improved = json.load(f)
os.remove('light_plot.json')
os.remove('light_plot_imporved.json')
x = []
y = []
x_improved = []
y_improved = []
for item in data:
date = datetime.strptime(item['x'], "%Y-%m-%d %H:%M:%S")
x.append(date)
if item['y'] == 1:
y.append(item['y'] + 0.1) # to distinct normal light and improved light states
else:
y.append(item['y'])
for item in data_improved:
date = datetime.strptime(item['x'], "%Y-%m-%d %H:%M:%S")
x_improved.append(date)
y_improved.append(item['y'])
dates_normal = matplotlib.dates.date2num(x)
dates_improved = matplotlib.dates.date2num(x_improved)
matplotlib.pyplot.plot_date(dates_normal, y, 'b-', label="Regular data", linewidth=2)
matplotlib.pyplot.plot_date(dates_improved, y_improved, 'b-', color="red", label="Possible improvement", linewidth=2)
pyplot.title("Compare actual data and possible improvement ({} minutes)".format(timer))
pyplot.legend()
if name:
pyplot.savefig("result.png")
pyplot.show()
| 30.933333
| 121
| 0.631466
|
import os
import matplotlib
import json
from datetime import datetime
from matplotlib import pyplot
def show_results_graph(timer, name=None):
with (open('light_plot.json', 'r')) as f:
data = json.load(f)
with (open('light_plot_imporved.json', 'r')) as f:
data_improved = json.load(f)
os.remove('light_plot.json')
os.remove('light_plot_imporved.json')
x = []
y = []
x_improved = []
y_improved = []
for item in data:
date = datetime.strptime(item['x'], "%Y-%m-%d %H:%M:%S")
x.append(date)
if item['y'] == 1:
y.append(item['y'] + 0.1) else:
y.append(item['y'])
for item in data_improved:
date = datetime.strptime(item['x'], "%Y-%m-%d %H:%M:%S")
x_improved.append(date)
y_improved.append(item['y'])
dates_normal = matplotlib.dates.date2num(x)
dates_improved = matplotlib.dates.date2num(x_improved)
matplotlib.pyplot.plot_date(dates_normal, y, 'b-', label="Regular data", linewidth=2)
matplotlib.pyplot.plot_date(dates_improved, y_improved, 'b-', color="red", label="Possible improvement", linewidth=2)
pyplot.title("Compare actual data and possible improvement ({} minutes)".format(timer))
pyplot.legend()
if name:
pyplot.savefig("result.png")
pyplot.show()
| true
| true
|
f7096eb7eef28d54b1ae656d37094eb075a9cb64
| 6,618
|
py
|
Python
|
horovod/common/elastic.py
|
Infi-zc/horovod
|
94cd8561a21d449fc8c80c8fef422025b84dfc22
|
[
"Apache-2.0"
] | 7,676
|
2019-02-12T02:57:22.000Z
|
2022-03-31T21:05:40.000Z
|
horovod/common/elastic.py
|
Infi-zc/horovod
|
94cd8561a21d449fc8c80c8fef422025b84dfc22
|
[
"Apache-2.0"
] | 2,431
|
2019-02-12T01:34:21.000Z
|
2022-03-31T21:43:38.000Z
|
horovod/common/elastic.py
|
Infi-zc/horovod
|
94cd8561a21d449fc8c80c8fef422025b84dfc22
|
[
"Apache-2.0"
] | 1,557
|
2019-02-12T07:52:15.000Z
|
2022-03-31T21:05:43.000Z
|
# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import queue
from horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt
from horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager
notification_manager = WorkerNotificationManager()
class State(object):
"""State representation used for tracking in memory state across workers.
Args:
bcast_object: Function used to broadcast a variable from rank 0 to the other workers.
get_rank: Function that returns the current rank of this worker.
"""
def __init__(self, bcast_object, get_rank):
self._bcast_object = bcast_object
self._rank = get_rank
self._host_messages = queue.Queue()
self._last_updated_timestamp = 0
self._reset_callbacks = []
def register_reset_callbacks(self, callbacks):
"""Register callbacks that will be invoked following a reset event (worker added or removed).
For example, a common use of a reset callback would be to update the learning rate scale with the
new number of workers.
Args:
callbacks: list of functions to execute.
"""
self._reset_callbacks.extend(callbacks)
def on_reset(self):
self._host_messages = queue.Queue()
self.reset()
for callback in self._reset_callbacks:
callback()
def on_hosts_updated(self, timestamp, update_res):
self._host_messages.put((timestamp, update_res))
def commit(self):
"""Commits all modifications to state tracked by this object to host memory.
This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`
if any were detected.
Because commits are a heavy operation involving data copy (potentially from GPU to host), it is
recommended to consider committing less frequently than once per batch. This allows users to tradeoff
between per-batch execution time and lost training steps in the event of a worker failure.
"""
self.save()
self.check_host_updates()
def check_host_updates(self):
"""Checks that a notification has been sent indicating that hosts can be added or will be removed.
Raises a `HostsUpdatedInterrupt` if such a notification has been received.
"""
# Iterate through the update messages sent from the server. If the update timestamp
# is greater than the last update timestamp, then trigger a HostsUpdatedException.
last_updated_timestamp = prev_timestamp = self._last_updated_timestamp
all_update = HostUpdateResult.no_update
while not self._host_messages.empty():
timestamp, update = self._host_messages.get()
if timestamp > last_updated_timestamp:
last_updated_timestamp = timestamp
all_update |= update
# In order to ensure all workers raise the exception at the same time, we need to sync
# the updated state across all the workers.
# TODO(travis): this should be a max allreduce to account for changes in rank 0
prev_timestamp, self._last_updated_timestamp, all_update = \
self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))
# At this point, updated state is globally consistent across all ranks.
if self._last_updated_timestamp > prev_timestamp:
raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)
def save(self):
"""Saves state to host memory."""
raise NotImplementedError()
def restore(self):
"""Restores the last committed state, undoing any uncommitted modifications."""
raise NotImplementedError()
def sync(self):
"""Synchronize state across workers."""
raise NotImplementedError()
def reset(self):
"""Reset objects and variables following a reset event (before synchronization)."""
pass
class ObjectState(State):
"""State for simple Python objects.
Every object is specified as a keyword argument, and will be assigned as an attribute.
Args:
bcast_object: Horovod broadcast object function used to sync state dictionary.
get_rank: Horovod rank function used to identify is this process is the coordinator.
kwargs: Properties to sync, will be exposed as attributes of the object.
"""
def __init__(self, bcast_object, get_rank, **kwargs):
self._bcast_object = bcast_object
self._saved_state = kwargs
self._set_attrs()
super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)
def save(self):
new_state = {}
for attr in self._saved_state.keys():
new_state[attr] = getattr(self, attr)
self._saved_state = new_state
def restore(self):
self._set_attrs()
def sync(self):
if self._saved_state:
self._saved_state = self._bcast_object(self._saved_state)
self._set_attrs()
def _set_attrs(self):
for attr, value in self._saved_state.items():
setattr(self, attr, value)
def run_fn(func, reset):
@functools.wraps(func)
def wrapper(state, *args, **kwargs):
notification_manager.init()
notification_manager.register_listener(state)
skip_sync = False
try:
while True:
try:
if not skip_sync:
state.sync()
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
skip_sync = False
except HostsUpdatedInterrupt as e:
skip_sync = e.skip_sync
reset()
state.on_reset()
finally:
notification_manager.remove_listener(state)
return wrapper
| 37.602273
| 109
| 0.663342
|
import functools
import queue
from horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt
from horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager
notification_manager = WorkerNotificationManager()
class State(object):
def __init__(self, bcast_object, get_rank):
self._bcast_object = bcast_object
self._rank = get_rank
self._host_messages = queue.Queue()
self._last_updated_timestamp = 0
self._reset_callbacks = []
def register_reset_callbacks(self, callbacks):
self._reset_callbacks.extend(callbacks)
def on_reset(self):
self._host_messages = queue.Queue()
self.reset()
for callback in self._reset_callbacks:
callback()
def on_hosts_updated(self, timestamp, update_res):
self._host_messages.put((timestamp, update_res))
def commit(self):
self.save()
self.check_host_updates()
def check_host_updates(self):
last_updated_timestamp = prev_timestamp = self._last_updated_timestamp
all_update = HostUpdateResult.no_update
while not self._host_messages.empty():
timestamp, update = self._host_messages.get()
if timestamp > last_updated_timestamp:
last_updated_timestamp = timestamp
all_update |= update
prev_timestamp, self._last_updated_timestamp, all_update = \
self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))
if self._last_updated_timestamp > prev_timestamp:
raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)
def save(self):
raise NotImplementedError()
def restore(self):
raise NotImplementedError()
def sync(self):
raise NotImplementedError()
def reset(self):
pass
class ObjectState(State):
def __init__(self, bcast_object, get_rank, **kwargs):
self._bcast_object = bcast_object
self._saved_state = kwargs
self._set_attrs()
super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)
def save(self):
new_state = {}
for attr in self._saved_state.keys():
new_state[attr] = getattr(self, attr)
self._saved_state = new_state
def restore(self):
self._set_attrs()
def sync(self):
if self._saved_state:
self._saved_state = self._bcast_object(self._saved_state)
self._set_attrs()
def _set_attrs(self):
for attr, value in self._saved_state.items():
setattr(self, attr, value)
def run_fn(func, reset):
@functools.wraps(func)
def wrapper(state, *args, **kwargs):
notification_manager.init()
notification_manager.register_listener(state)
skip_sync = False
try:
while True:
try:
if not skip_sync:
state.sync()
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
skip_sync = False
except HostsUpdatedInterrupt as e:
skip_sync = e.skip_sync
reset()
state.on_reset()
finally:
notification_manager.remove_listener(state)
return wrapper
| true
| true
|
f70970e710b91e652b69cabb5611ea3fb3a4a51b
| 3,331
|
py
|
Python
|
tests/performance/WorkerThreadsBuildTimePerformance/run.py
|
xpybuild/xpybuild
|
c71a73e47414871c8192381d0356ab62f5a58127
|
[
"Apache-2.0"
] | 9
|
2017-02-06T16:45:46.000Z
|
2021-12-05T09:42:58.000Z
|
tests/performance/WorkerThreadsBuildTimePerformance/run.py
|
xpybuild/xpybuild
|
c71a73e47414871c8192381d0356ab62f5a58127
|
[
"Apache-2.0"
] | 15
|
2019-01-11T19:39:34.000Z
|
2022-01-08T11:11:35.000Z
|
tests/performance/WorkerThreadsBuildTimePerformance/run.py
|
xpybuild/xpybuild
|
c71a73e47414871c8192381d0356ab62f5a58127
|
[
"Apache-2.0"
] | 5
|
2017-02-06T16:51:17.000Z
|
2020-12-02T17:36:30.000Z
|
import multiprocessing
import random
import time
from pysys.constants import *
from xpybuild.xpybuild_basetest import XpybuildBaseTest
class PySysTest(XpybuildBaseTest):
buildRoot = None # can override this with -XbuildRoot=path to measure your own build
def execute(self):
buildroot = self.buildRoot if self.buildRoot else self.input
assert os.path.isdir(buildroot), self.buildroot
cpus = multiprocessing.cpu_count()
pending = set()
pending.add(1)
pending.add(cpus*1//5)
pending.add(cpus*2//5)
pending.add(cpus*3//5)
pending.add(cpus*4//5)
pending.add(cpus)
#for i in range(1, (cpus)/4 + 1):
# pending.add(i*4)
#pending.add(1)
pending = sorted(p for p in pending if p > 0)
self.log.info('This machine has %d CPUs', cpus)
self.log.info('Planning to run with workers=%s', pending)
random.shuffle(pending) # shuffle to reduce impact of caching; also means if we cycle this test we'll get more useful data
self.bestSoFar = 10000000000
self.bestSoFarWorkers = 1
self.results = {}
starttime = time.time()
def runbuild(workers):
assert workers <= cpus, workers
assert workers > 0
self.log.info('(%d/%d) Building with workers=%d (approx %0.1f hours left)', len(self.results)+1, len(pending), workers,
-1 if (len(self.results)==0) else ( # avoid div by zero on first one
(len(pending)-len(self.results) + 2) # number left; add 2 for possible extra runs
*(time.time()-starttime)/len(self.results) # average time per result
/60.0/60.0 # secs to hours
)
)
t = time.time()
#time.sleep(1)
env = dict(os.environ) if self.buildRoot else None # inherit full parent env for custom builds
self.xpybuild(args=['--workers', str(workers),
'%s=%s'%(getattr(self, 'buildOutputDirProperty', 'OUTPUT_DIR'), self.output+'/output%d'%workers)], buildfile=buildroot+'/root.xpybuild.py', stdouterr='xpybuild-j%d'%workers, timeout=2*60*60, env=env, setOutputDir=False)
t = time.time()-t
self.reportPerformanceResult(t, 'Total build time with %d worker threads'%workers, 's', resultDetails={'workers':workers})
self.results[workers] = t
if t < self.bestSoFar:
self.bestSoFar, self.bestSoFarWorkers = t, workers
self.deletedir(self.output+'/output%d'%workers)
self.log.info('')
for w in pending:
runbuild(w)
# explore slightly more or less than the best to find the optimum, even if not in the pending list
while self.bestSoFarWorkers < cpus and self.bestSoFarWorkers+1 not in self.results:
self.log.info('Best so far is %d; running an extra test for one extra worker', self.bestSoFarWorkers)
runbuild(self.bestSoFarWorkers+1)
while self.bestSoFarWorkers>1 and self.bestSoFarWorkers-1 not in self.results:
self.log.info('Best so far is %d; running an extra test for one less worker', self.bestSoFarWorkers)
runbuild(self.bestSoFarWorkers-1)
for w in sorted(self.results):
self.log.info('Time for % 2d workers: %0.1f', w, self.results[w])
self.log.info('')
self.log.info('Optimum number of workers is %d', self.bestSoFarWorkers)
self.log.info('... which is a multiplier of %0.2f for this %d CPU machine', self.bestSoFarWorkers/float(cpus), cpus)
self.log.info('(for a more accurate result, run with multiple cycles and plot the results .csv in a spreadsheet)')
def validate(self):
pass
| 40.621951
| 223
| 0.709997
|
import multiprocessing
import random
import time
from pysys.constants import *
from xpybuild.xpybuild_basetest import XpybuildBaseTest
class PySysTest(XpybuildBaseTest):
buildRoot = None
def execute(self):
buildroot = self.buildRoot if self.buildRoot else self.input
assert os.path.isdir(buildroot), self.buildroot
cpus = multiprocessing.cpu_count()
pending = set()
pending.add(1)
pending.add(cpus*1//5)
pending.add(cpus*2//5)
pending.add(cpus*3//5)
pending.add(cpus*4//5)
pending.add(cpus)
pending = sorted(p for p in pending if p > 0)
self.log.info('This machine has %d CPUs', cpus)
self.log.info('Planning to run with workers=%s', pending)
random.shuffle(pending)
self.bestSoFar = 10000000000
self.bestSoFarWorkers = 1
self.results = {}
starttime = time.time()
def runbuild(workers):
assert workers <= cpus, workers
assert workers > 0
self.log.info('(%d/%d) Building with workers=%d (approx %0.1f hours left)', len(self.results)+1, len(pending), workers,
-1 if (len(self.results)==0) else ( # avoid div by zero on first one
(len(pending)-len(self.results) + 2) # number left; add 2 for possible extra runs
*(time.time()-starttime)/len(self.results) # average time per result
/60.0/60.0 # secs to hours
)
)
t = time.time()
#time.sleep(1)
env = dict(os.environ) if self.buildRoot else None # inherit full parent env for custom builds
self.xpybuild(args=['--workers', str(workers),
'%s=%s'%(getattr(self, 'buildOutputDirProperty', 'OUTPUT_DIR'), self.output+'/output%d'%workers)], buildfile=buildroot+'/root.xpybuild.py', stdouterr='xpybuild-j%d'%workers, timeout=2*60*60, env=env, setOutputDir=False)
t = time.time()-t
self.reportPerformanceResult(t, 'Total build time with %d worker threads'%workers, 's', resultDetails={'workers':workers})
self.results[workers] = t
if t < self.bestSoFar:
self.bestSoFar, self.bestSoFarWorkers = t, workers
self.deletedir(self.output+'/output%d'%workers)
self.log.info('')
for w in pending:
runbuild(w)
# explore slightly more or less than the best to find the optimum, even if not in the pending list
while self.bestSoFarWorkers < cpus and self.bestSoFarWorkers+1 not in self.results:
self.log.info('Best so far is %d; running an extra test for one extra worker', self.bestSoFarWorkers)
runbuild(self.bestSoFarWorkers+1)
while self.bestSoFarWorkers>1 and self.bestSoFarWorkers-1 not in self.results:
self.log.info('Best so far is %d; running an extra test for one less worker', self.bestSoFarWorkers)
runbuild(self.bestSoFarWorkers-1)
for w in sorted(self.results):
self.log.info('Time for % 2d workers: %0.1f', w, self.results[w])
self.log.info('')
self.log.info('Optimum number of workers is %d', self.bestSoFarWorkers)
self.log.info('... which is a multiplier of %0.2f for this %d CPU machine', self.bestSoFarWorkers/float(cpus), cpus)
self.log.info('(for a more accurate result, run with multiple cycles and plot the results .csv in a spreadsheet)')
def validate(self):
pass
| true
| true
|
f709711aa64e6d3bbb5a25ff10f73af015d6369a
| 723
|
py
|
Python
|
karmagrambot/config.py
|
patrickelectric/karmagrambot
|
b76d79172905c5ce906c0ff60591d75dd2994565
|
[
"MIT"
] | 6
|
2019-05-31T18:57:08.000Z
|
2021-05-03T19:02:28.000Z
|
karmagrambot/config.py
|
patrickelectric/karmagrambot
|
b76d79172905c5ce906c0ff60591d75dd2994565
|
[
"MIT"
] | 20
|
2019-05-23T22:57:44.000Z
|
2021-11-19T01:43:16.000Z
|
karmagrambot/config.py
|
patrickelectric/karmagrambot
|
b76d79172905c5ce906c0ff60591d75dd2994565
|
[
"MIT"
] | 8
|
2019-05-31T18:57:17.000Z
|
2021-11-18T19:57:26.000Z
|
import json
from os import environ
from pathlib import Path
try:
CONFIG_DIR = Path(environ['XDG_CONFIG_HOME'], __package__)
except KeyError:
CONFIG_DIR = Path.home() / '.config' / __package__
if not CONFIG_DIR.exists():
CONFIG_DIR.mkdir()
CONFIG_FILE = CONFIG_DIR / 'config.json'
with open(CONFIG_FILE) as f:
config = json.load(f)
TOKEN = config['token']
try:
DB_PATH = Path(config['db-path'])
except KeyError:
try:
DB_PATH = Path(environ['XDG_DATA_HOME'], __package__, 'messages.db')
except KeyError:
DB_PATH = Path.home() / '.local/share' / __package__ / 'messages.db'
DB_DIR = DB_PATH.parent
if not DB_DIR.exists():
DB_DIR.mkdir()
DB_URI = f'sqlite:///{DB_PATH}'
| 21.264706
| 76
| 0.679115
|
import json
from os import environ
from pathlib import Path
try:
CONFIG_DIR = Path(environ['XDG_CONFIG_HOME'], __package__)
except KeyError:
CONFIG_DIR = Path.home() / '.config' / __package__
if not CONFIG_DIR.exists():
CONFIG_DIR.mkdir()
CONFIG_FILE = CONFIG_DIR / 'config.json'
with open(CONFIG_FILE) as f:
config = json.load(f)
TOKEN = config['token']
try:
DB_PATH = Path(config['db-path'])
except KeyError:
try:
DB_PATH = Path(environ['XDG_DATA_HOME'], __package__, 'messages.db')
except KeyError:
DB_PATH = Path.home() / '.local/share' / __package__ / 'messages.db'
DB_DIR = DB_PATH.parent
if not DB_DIR.exists():
DB_DIR.mkdir()
DB_URI = f'sqlite:///{DB_PATH}'
| true
| true
|
f709713726d342f65a2d86eaded3bf3a8690609d
| 230
|
py
|
Python
|
tests/themes/golden/demo_basic_button.py
|
Rahuum/glooey
|
932edca1c8fdd710f1941038e47ac8d25a31a1a8
|
[
"MIT"
] | 86
|
2016-11-28T12:34:28.000Z
|
2022-03-17T13:49:49.000Z
|
tests/themes/golden/demo_basic_button.py
|
Rahuum/glooey
|
932edca1c8fdd710f1941038e47ac8d25a31a1a8
|
[
"MIT"
] | 57
|
2017-03-07T10:11:52.000Z
|
2022-01-16T19:35:33.000Z
|
tests/themes/golden/demo_basic_button.py
|
Rahuum/glooey
|
932edca1c8fdd710f1941038e47ac8d25a31a1a8
|
[
"MIT"
] | 9
|
2017-03-15T18:55:50.000Z
|
2022-02-17T14:52:49.000Z
|
#!/usr/bin/env python3
import pyglet
import run_demos
import glooey.themes.golden as golden
window = pyglet.window.Window()
gui = golden.Gui(window)
button = golden.BasicButton('Lorem Ipsum')
gui.add(button)
pyglet.app.run()
| 15.333333
| 42
| 0.756522
|
import pyglet
import run_demos
import glooey.themes.golden as golden
window = pyglet.window.Window()
gui = golden.Gui(window)
button = golden.BasicButton('Lorem Ipsum')
gui.add(button)
pyglet.app.run()
| true
| true
|
f70971933fba0cc7545a7ef491245651e48915fc
| 16,353
|
py
|
Python
|
tests/test_lex.py
|
Chaformbintrano/hy
|
1b220a93a509acd2c5c3c4159e841b5eb12a7a95
|
[
"MIT"
] | null | null | null |
tests/test_lex.py
|
Chaformbintrano/hy
|
1b220a93a509acd2c5c3c4159e841b5eb12a7a95
|
[
"MIT"
] | null | null | null |
tests/test_lex.py
|
Chaformbintrano/hy
|
1b220a93a509acd2c5c3c4159e841b5eb12a7a95
|
[
"MIT"
] | null | null | null |
# Copyright 2019 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import sys
import traceback
import pytest
from math import isnan
from hy.models import (HyExpression, HyInteger, HyFloat, HyComplex, HySymbol,
HyString, HyDict, HyList, HySet, HyKeyword)
from hy.lex import tokenize
from hy.lex.exceptions import LexException, PrematureEndOfInput
from hy.errors import hy_exc_handler
def peoi(): return pytest.raises(PrematureEndOfInput)
def lexe(): return pytest.raises(LexException)
def check_ex(execinfo, expected):
output = traceback.format_exception_only(execinfo.type, execinfo.value)
assert output[:-1] == expected[:-1]
# Python 2.7 doesn't give the full exception name, so we compensate.
assert output[-1].endswith(expected[-1])
def check_trace_output(capsys, execinfo, expected):
sys.__excepthook__(execinfo.type, execinfo.value, execinfo.tb)
captured_wo_filtering = capsys.readouterr()[-1].strip('\n')
hy_exc_handler(execinfo.type, execinfo.value, execinfo.tb)
captured_w_filtering = capsys.readouterr()[-1].strip('\n')
output = captured_w_filtering.split('\n')
# Make sure the filtered frames aren't the same as the unfiltered ones.
assert output[:-1] != captured_wo_filtering.split('\n')[:-1]
# Remove the origin frame lines.
assert output[3:-1] == expected[:-1]
# Python 2.7 doesn't give the full exception name, so we compensate.
assert output[-1].endswith(expected[-1])
def test_lex_exception():
""" Ensure tokenize throws a fit on a partial input """
with peoi(): tokenize("(foo")
with peoi(): tokenize("{foo bar")
with peoi(): tokenize("(defn foo [bar]")
with peoi(): tokenize("(foo \"bar")
def test_unbalanced_exception():
"""Ensure the tokenization fails on unbalanced expressions"""
with lexe(): tokenize("(bar))")
with lexe(): tokenize("(baz [quux]])")
def test_lex_single_quote_err():
"Ensure tokenizing \"' \" throws a LexException that can be stringified"
# https://github.com/hylang/hy/issues/1252
with lexe() as execinfo:
tokenize("' ")
check_ex(execinfo, [
' File "<string>", line 1\n',
" '\n",
' ^\n',
'LexException: Could not identify the next token.\n'])
def test_lex_expression_symbols():
""" Make sure that expressions produce symbols """
objs = tokenize("(foo bar)")
assert objs == [HyExpression([HySymbol("foo"), HySymbol("bar")])]
def test_lex_expression_strings():
""" Test that expressions can produce strings """
objs = tokenize("(foo \"bar\")")
assert objs == [HyExpression([HySymbol("foo"), HyString("bar")])]
def test_lex_expression_integer():
""" Make sure expressions can produce integers """
objs = tokenize("(foo 2)")
assert objs == [HyExpression([HySymbol("foo"), HyInteger(2)])]
def test_lex_symbols():
""" Make sure that symbols are valid expressions"""
objs = tokenize("foo ")
assert objs == [HySymbol("foo")]
def test_lex_strings():
""" Make sure that strings are valid expressions"""
objs = tokenize('"foo"')
assert objs == [HyString("foo")]
# Make sure backslash-escaped newlines work (see issue #831)
objs = tokenize(r"""
"a\
bc"
""")
assert objs == [HyString("abc")]
def test_lex_strings_exception():
""" Make sure tokenize throws when codec can't decode some bytes"""
with lexe() as execinfo:
tokenize('\"\\x8\"')
check_ex(execinfo, [
' File "<string>", line 1\n',
' "\\x8"\n',
' ^\n',
'LexException: Can\'t convert "\\x8" to a HyString\n'])
def test_lex_bracket_strings():
objs = tokenize("#[my delim[hello world]my delim]")
assert objs == [HyString("hello world")]
assert objs[0].brackets == "my delim"
objs = tokenize("#[[squid]]")
assert objs == [HyString("squid")]
assert objs[0].brackets == ""
def test_lex_integers():
""" Make sure that integers are valid expressions"""
objs = tokenize("42 ")
assert objs == [HyInteger(42)]
def test_lex_fractions():
""" Make sure that fractions are valid expressions"""
objs = tokenize("1/2")
assert objs == [HyExpression([HySymbol("fraction"), HyInteger(1),
HyInteger(2)])]
def test_lex_expression_float():
""" Make sure expressions can produce floats """
objs = tokenize("(foo 2.)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(2.)])]
objs = tokenize("(foo -0.5)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(-0.5)])]
objs = tokenize("(foo 1.e7)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(1.e7)])]
def test_lex_big_float():
# https://github.com/hylang/hy/issues/1448
assert tokenize("1e900") == [HyFloat(1e900)]
assert tokenize("1e900-1e900j") == [HyComplex(1e900, -1e900)]
def test_lex_nan_and_inf():
assert isnan(tokenize("NaN")[0])
assert tokenize("Nan") == [HySymbol("Nan")]
assert tokenize("nan") == [HySymbol("nan")]
assert tokenize("NAN") == [HySymbol("NAN")]
assert tokenize("Inf") == [HyFloat(float("inf"))]
assert tokenize("inf") == [HySymbol("inf")]
assert tokenize("INF") == [HySymbol("INF")]
assert tokenize("-Inf") == [HyFloat(float("-inf"))]
assert tokenize("-inf") == [HySymbol("-inf")]
assert tokenize("-INF") == [HySymbol("-INF")]
def test_lex_expression_complex():
""" Make sure expressions can produce complex """
def t(x): return tokenize("(foo {})".format(x))
def f(x): return [HyExpression([HySymbol("foo"), x])]
assert t("2.j") == f(HyComplex(2.j))
assert t("-0.5j") == f(HyComplex(-0.5j))
assert t("1.e7j") == f(HyComplex(1e7j))
assert t("j") == f(HySymbol("j"))
assert isnan(t("NaNj")[0][1].imag)
assert t("nanj") == f(HySymbol("nanj"))
assert t("Inf+Infj") == f(HyComplex(complex(float("inf"), float("inf"))))
assert t("Inf-Infj") == f(HyComplex(complex(float("inf"), float("-inf"))))
assert t("Inf-INFj") == f(HySymbol("Inf-INFj"))
def test_lex_digit_separators():
assert tokenize("1_000_000") == [HyInteger(1000000)]
assert tokenize("1,000,000") == [HyInteger(1000000)]
assert tokenize("1,000_000") == [HyInteger(1000000)]
assert tokenize("1_000,000") == [HyInteger(1000000)]
assert tokenize("0x_af") == [HyInteger(0xaf)]
assert tokenize("0x,af") == [HyInteger(0xaf)]
assert tokenize("0b_010") == [HyInteger(0b010)]
assert tokenize("0b,010") == [HyInteger(0b010)]
assert tokenize("0o_373") == [HyInteger(0o373)]
assert tokenize("0o,373") == [HyInteger(0o373)]
assert tokenize('1_2.3,4') == [HyFloat(12.34)]
assert tokenize('1_2e3,4') == [HyFloat(12e34)]
assert (tokenize("1,2/3_4") ==
[HyExpression([HySymbol("fraction"),
HyInteger(12), HyInteger(34)])])
assert tokenize("1,0_00j") == [HyComplex(1000j)]
assert tokenize("1,,,,___,____,,__,,2__,,,__") == [HyInteger(12)]
assert (tokenize("_1,,,,___,____,,__,,2__,,,__") ==
[HySymbol("_1,,,,___,____,,__,,2__,,,__")])
assert (tokenize("1,,,,___,____,,__,,2__,q,__") ==
[HySymbol("1,,,,___,____,,__,,2__,q,__")])
def test_lex_bad_attrs():
with lexe() as execinfo:
tokenize("1.foo")
check_ex(execinfo, [
' File "<string>", line 1\n',
' 1.foo\n',
' ^\n',
'LexException: Cannot access attribute on anything other'
' than a name (in order to get attributes of expressions,'
' use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\n'])
with lexe(): tokenize("0.foo")
with lexe(): tokenize("1.5.foo")
with lexe(): tokenize("1e3.foo")
with lexe(): tokenize("5j.foo")
with lexe(): tokenize("3+5j.foo")
with lexe(): tokenize("3.1+5.1j.foo")
assert tokenize("j.foo")
with lexe(): tokenize("3/4.foo")
assert tokenize("a/1.foo")
assert tokenize("1/a.foo")
with lexe(): tokenize(":hello.foo")
def test_lex_line_counting():
""" Make sure we can count lines / columns """
entry = tokenize("(foo (one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 15
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 6
assert entry.end_line == 1
assert entry.end_column == 14
def test_lex_line_counting_multi():
""" Make sure we can do multi-line tokenization """
entries = tokenize("""
(foo (one two))
(foo bar)
""")
entry = entries[0]
assert entry.start_line == 2
assert entry.start_column == 1
assert entry.end_line == 2
assert entry.end_column == 15
entry = entries[1]
assert entry.start_line == 3
assert entry.start_column == 1
assert entry.end_line == 3
assert entry.end_column == 9
def test_lex_line_counting_multi_inner():
""" Make sure we can do multi-line tokenization (inner) """
entry = tokenize("""(foo
bar)""")[0]
inner = entry[0]
assert inner.start_line == 1
assert inner.start_column == 2
inner = entry[1]
assert inner.start_line == 2
assert inner.start_column == 5
def test_dicts():
""" Ensure that we can tokenize a dict. """
objs = tokenize("{foo bar bar baz}")
assert objs == [HyDict(["foo", "bar", "bar", "baz"])]
objs = tokenize("(bar {foo bar bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HyDict(["foo", "bar",
"bar", "baz"])])]
objs = tokenize("{(foo bar) (baz quux)}")
assert objs == [HyDict([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
def test_sets():
""" Ensure that we can tokenize a set. """
objs = tokenize("#{1 2}")
assert objs == [HySet([HyInteger(1), HyInteger(2)])]
objs = tokenize("(bar #{foo bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HySet(["foo", "bar", "baz"])])]
objs = tokenize("#{(foo bar) (baz quux)}")
assert objs == [HySet([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
# Duplicate items in a literal set should be okay (and should
# be preserved).
objs = tokenize("#{1 2 1 1 2 1}")
assert objs == [HySet([HyInteger(n) for n in [1, 2, 1, 1, 2, 1]])]
assert len(objs[0]) == 6
# https://github.com/hylang/hy/issues/1120
objs = tokenize("#{a 1}")
assert objs == [HySet([HySymbol("a"), HyInteger(1)])]
def test_nospace():
""" Ensure we can tokenize without spaces if we have to """
entry = tokenize("(foo(one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 14
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 5
assert entry.end_line == 1
assert entry.end_column == 13
def test_escapes():
""" Ensure we can escape things """
entry = tokenize(r"""(foo "foo\n")""")[0]
assert entry[1] == "foo\n"
entry = tokenize(r"""(foo r"foo\s")""")[0]
assert entry[1] == r"foo\s"
def test_unicode_escapes():
"""Ensure unicode escapes are handled correctly"""
s = r'"a\xac\u1234\u20ac\U00008000"'
assert len(s) == 29
entry = tokenize(s)[0]
assert len(entry) == 5
assert [ord(x) for x in entry] == [97, 172, 4660, 8364, 32768]
def test_complex():
"""Ensure we tokenize complex numbers properly"""
# This is a regression test for #143
entry = tokenize("(1j)")[0][0]
assert entry == HyComplex("1.0j")
entry = tokenize("(j)")[0][0]
assert entry == HySymbol("j")
def test_tag_macro():
"""Ensure tag macros are handled properly"""
entry = tokenize("#^()")
assert entry[0][0] == HySymbol("dispatch-tag-macro")
assert entry[0][1] == HyString("^")
assert len(entry[0]) == 3
def test_lex_comment_382():
"""Ensure that we can tokenize sources with a comment at the end"""
entry = tokenize("foo ;bar\n;baz")
assert entry == [HySymbol("foo")]
def test_discard():
"""Check that discarded terms are removed properly."""
# empty
assert tokenize("") == []
# single
assert tokenize("#_1") == []
# multiple
assert tokenize("#_1 #_2") == []
assert tokenize("#_1 #_2 #_3") == []
# nested discard
assert tokenize("#_ #_1 2") == []
assert tokenize("#_ #_ #_1 2 3") == []
# trailing
assert tokenize("0") == [0]
assert tokenize("0 #_1") == [0]
assert tokenize("0 #_1 #_2") == [0]
# leading
assert tokenize("2") == [2]
assert tokenize("#_1 2") == [2]
assert tokenize("#_0 #_1 2") == [2]
assert tokenize("#_ #_0 1 2") == [2]
# both
assert tokenize("#_1 2 #_3") == [2]
assert tokenize("#_0 #_1 2 #_ #_3 4") == [2]
# inside
assert tokenize("0 #_1 2") == [0, 2]
assert tokenize("0 #_1 #_2 3") == [0, 3]
assert tokenize("0 #_ #_1 2 3") == [0, 3]
# in HyList
assert tokenize("[]") == [HyList([])]
assert tokenize("[#_1]") == [HyList([])]
assert tokenize("[#_1 #_2]") == [HyList([])]
assert tokenize("[#_ #_1 2]") == [HyList([])]
assert tokenize("[0]") == [HyList([HyInteger(0)])]
assert tokenize("[0 #_1]") == [HyList([HyInteger(0)])]
assert tokenize("[0 #_1 #_2]") == [HyList([HyInteger(0)])]
assert tokenize("[2]") == [HyList([HyInteger(2)])]
assert tokenize("[#_1 2]") == [HyList([HyInteger(2)])]
assert tokenize("[#_0 #_1 2]") == [HyList([HyInteger(2)])]
assert tokenize("[#_ #_0 1 2]") == [HyList([HyInteger(2)])]
# in HySet
assert tokenize("#{}") == [HySet()]
assert tokenize("#{#_1}") == [HySet()]
assert tokenize("#{0 #_1}") == [HySet([HyInteger(0)])]
assert tokenize("#{#_1 0}") == [HySet([HyInteger(0)])]
# in HyDict
assert tokenize("{}") == [HyDict()]
assert tokenize("{#_1}") == [HyDict()]
assert tokenize("{#_0 1 2}") == [HyDict([HyInteger(1), HyInteger(2)])]
assert tokenize("{1 #_0 2}") == [HyDict([HyInteger(1), HyInteger(2)])]
assert tokenize("{1 2 #_0}") == [HyDict([HyInteger(1), HyInteger(2)])]
# in HyExpression
assert tokenize("()") == [HyExpression()]
assert tokenize("(#_foo)") == [HyExpression()]
assert tokenize("(#_foo bar)") == [HyExpression([HySymbol("bar")])]
assert tokenize("(foo #_bar)") == [HyExpression([HySymbol("foo")])]
assert tokenize("(foo :bar 1)") == [HyExpression([HySymbol("foo"), HyKeyword("bar"), HyInteger(1)])]
assert tokenize("(foo #_:bar 1)") == [HyExpression([HySymbol("foo"), HyInteger(1)])]
assert tokenize("(foo :bar #_1)") == [HyExpression([HySymbol("foo"), HyKeyword("bar")])]
# discard term with nesting
assert tokenize("[1 2 #_[a b c [d e [f g] h]] 3 4]") == [
HyList([HyInteger(1), HyInteger(2), HyInteger(3), HyInteger(4)])
]
# discard with other prefix syntax
assert tokenize("a #_'b c") == [HySymbol("a"), HySymbol("c")]
assert tokenize("a '#_b c") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("c")])]
assert tokenize("a '#_b #_c d") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("d")])]
assert tokenize("a '#_ #_b c d") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("d")])]
def test_lex_exception_filtering(capsys):
"""Confirm that the exception filtering works for lexer errors."""
# First, test for PrematureEndOfInput
with peoi() as execinfo:
tokenize(" \n (foo\n \n")
check_trace_output(capsys, execinfo, [
' File "<string>", line 2',
' (foo',
' ^',
'PrematureEndOfInput: Premature end of input'])
# Now, for a generic LexException
with lexe() as execinfo:
tokenize(" \n\n 1.foo ")
check_trace_output(capsys, execinfo, [
' File "<string>", line 3',
' 1.foo',
' ^',
'LexException: Cannot access attribute on anything other'
' than a name (in order to get attributes of expressions,'
' use `(. <expression> <attr>)` or `(.<attr> <expression>)`)'])
| 33.237805
| 105
| 0.597994
|
import sys
import traceback
import pytest
from math import isnan
from hy.models import (HyExpression, HyInteger, HyFloat, HyComplex, HySymbol,
HyString, HyDict, HyList, HySet, HyKeyword)
from hy.lex import tokenize
from hy.lex.exceptions import LexException, PrematureEndOfInput
from hy.errors import hy_exc_handler
def peoi(): return pytest.raises(PrematureEndOfInput)
def lexe(): return pytest.raises(LexException)
def check_ex(execinfo, expected):
output = traceback.format_exception_only(execinfo.type, execinfo.value)
assert output[:-1] == expected[:-1]
assert output[-1].endswith(expected[-1])
def check_trace_output(capsys, execinfo, expected):
sys.__excepthook__(execinfo.type, execinfo.value, execinfo.tb)
captured_wo_filtering = capsys.readouterr()[-1].strip('\n')
hy_exc_handler(execinfo.type, execinfo.value, execinfo.tb)
captured_w_filtering = capsys.readouterr()[-1].strip('\n')
output = captured_w_filtering.split('\n')
# Make sure the filtered frames aren't the same as the unfiltered ones.
assert output[:-1] != captured_wo_filtering.split('\n')[:-1]
assert output[3:-1] == expected[:-1]
assert output[-1].endswith(expected[-1])
def test_lex_exception():
with peoi(): tokenize("(foo")
with peoi(): tokenize("{foo bar")
with peoi(): tokenize("(defn foo [bar]")
with peoi(): tokenize("(foo \"bar")
def test_unbalanced_exception():
with lexe(): tokenize("(bar))")
with lexe(): tokenize("(baz [quux]])")
def test_lex_single_quote_err():
# https://github.com/hylang/hy/issues/1252
with lexe() as execinfo:
tokenize("' ")
check_ex(execinfo, [
' File "<string>", line 1\n',
" '\n",
' ^\n',
'LexException: Could not identify the next token.\n'])
def test_lex_expression_symbols():
objs = tokenize("(foo bar)")
assert objs == [HyExpression([HySymbol("foo"), HySymbol("bar")])]
def test_lex_expression_strings():
objs = tokenize("(foo \"bar\")")
assert objs == [HyExpression([HySymbol("foo"), HyString("bar")])]
def test_lex_expression_integer():
objs = tokenize("(foo 2)")
assert objs == [HyExpression([HySymbol("foo"), HyInteger(2)])]
def test_lex_symbols():
objs = tokenize("foo ")
assert objs == [HySymbol("foo")]
def test_lex_strings():
objs = tokenize('"foo"')
assert objs == [HyString("foo")]
# Make sure backslash-escaped newlines work (see issue #831)
objs = tokenize(r"""
"a\
bc"
""")
assert objs == [HyString("abc")]
def test_lex_strings_exception():
with lexe() as execinfo:
tokenize('\"\\x8\"')
check_ex(execinfo, [
' File "<string>", line 1\n',
' "\\x8"\n',
' ^\n',
'LexException: Can\'t convert "\\x8" to a HyString\n'])
def test_lex_bracket_strings():
objs = tokenize(" assert objs == [HyString("hello world")]
assert objs[0].brackets == "my delim"
objs = tokenize(" assert objs == [HyString("squid")]
assert objs[0].brackets == ""
def test_lex_integers():
objs = tokenize("42 ")
assert objs == [HyInteger(42)]
def test_lex_fractions():
objs = tokenize("1/2")
assert objs == [HyExpression([HySymbol("fraction"), HyInteger(1),
HyInteger(2)])]
def test_lex_expression_float():
objs = tokenize("(foo 2.)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(2.)])]
objs = tokenize("(foo -0.5)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(-0.5)])]
objs = tokenize("(foo 1.e7)")
assert objs == [HyExpression([HySymbol("foo"), HyFloat(1.e7)])]
def test_lex_big_float():
# https://github.com/hylang/hy/issues/1448
assert tokenize("1e900") == [HyFloat(1e900)]
assert tokenize("1e900-1e900j") == [HyComplex(1e900, -1e900)]
def test_lex_nan_and_inf():
assert isnan(tokenize("NaN")[0])
assert tokenize("Nan") == [HySymbol("Nan")]
assert tokenize("nan") == [HySymbol("nan")]
assert tokenize("NAN") == [HySymbol("NAN")]
assert tokenize("Inf") == [HyFloat(float("inf"))]
assert tokenize("inf") == [HySymbol("inf")]
assert tokenize("INF") == [HySymbol("INF")]
assert tokenize("-Inf") == [HyFloat(float("-inf"))]
assert tokenize("-inf") == [HySymbol("-inf")]
assert tokenize("-INF") == [HySymbol("-INF")]
def test_lex_expression_complex():
def t(x): return tokenize("(foo {})".format(x))
def f(x): return [HyExpression([HySymbol("foo"), x])]
assert t("2.j") == f(HyComplex(2.j))
assert t("-0.5j") == f(HyComplex(-0.5j))
assert t("1.e7j") == f(HyComplex(1e7j))
assert t("j") == f(HySymbol("j"))
assert isnan(t("NaNj")[0][1].imag)
assert t("nanj") == f(HySymbol("nanj"))
assert t("Inf+Infj") == f(HyComplex(complex(float("inf"), float("inf"))))
assert t("Inf-Infj") == f(HyComplex(complex(float("inf"), float("-inf"))))
assert t("Inf-INFj") == f(HySymbol("Inf-INFj"))
def test_lex_digit_separators():
assert tokenize("1_000_000") == [HyInteger(1000000)]
assert tokenize("1,000,000") == [HyInteger(1000000)]
assert tokenize("1,000_000") == [HyInteger(1000000)]
assert tokenize("1_000,000") == [HyInteger(1000000)]
assert tokenize("0x_af") == [HyInteger(0xaf)]
assert tokenize("0x,af") == [HyInteger(0xaf)]
assert tokenize("0b_010") == [HyInteger(0b010)]
assert tokenize("0b,010") == [HyInteger(0b010)]
assert tokenize("0o_373") == [HyInteger(0o373)]
assert tokenize("0o,373") == [HyInteger(0o373)]
assert tokenize('1_2.3,4') == [HyFloat(12.34)]
assert tokenize('1_2e3,4') == [HyFloat(12e34)]
assert (tokenize("1,2/3_4") ==
[HyExpression([HySymbol("fraction"),
HyInteger(12), HyInteger(34)])])
assert tokenize("1,0_00j") == [HyComplex(1000j)]
assert tokenize("1,,,,___,____,,__,,2__,,,__") == [HyInteger(12)]
assert (tokenize("_1,,,,___,____,,__,,2__,,,__") ==
[HySymbol("_1,,,,___,____,,__,,2__,,,__")])
assert (tokenize("1,,,,___,____,,__,,2__,q,__") ==
[HySymbol("1,,,,___,____,,__,,2__,q,__")])
def test_lex_bad_attrs():
with lexe() as execinfo:
tokenize("1.foo")
check_ex(execinfo, [
' File "<string>", line 1\n',
' 1.foo\n',
' ^\n',
'LexException: Cannot access attribute on anything other'
' than a name (in order to get attributes of expressions,'
' use `(. <expression> <attr>)` or `(.<attr> <expression>)`)\n'])
with lexe(): tokenize("0.foo")
with lexe(): tokenize("1.5.foo")
with lexe(): tokenize("1e3.foo")
with lexe(): tokenize("5j.foo")
with lexe(): tokenize("3+5j.foo")
with lexe(): tokenize("3.1+5.1j.foo")
assert tokenize("j.foo")
with lexe(): tokenize("3/4.foo")
assert tokenize("a/1.foo")
assert tokenize("1/a.foo")
with lexe(): tokenize(":hello.foo")
def test_lex_line_counting():
entry = tokenize("(foo (one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 15
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 6
assert entry.end_line == 1
assert entry.end_column == 14
def test_lex_line_counting_multi():
entries = tokenize("""
(foo (one two))
(foo bar)
""")
entry = entries[0]
assert entry.start_line == 2
assert entry.start_column == 1
assert entry.end_line == 2
assert entry.end_column == 15
entry = entries[1]
assert entry.start_line == 3
assert entry.start_column == 1
assert entry.end_line == 3
assert entry.end_column == 9
def test_lex_line_counting_multi_inner():
entry = tokenize("""(foo
bar)""")[0]
inner = entry[0]
assert inner.start_line == 1
assert inner.start_column == 2
inner = entry[1]
assert inner.start_line == 2
assert inner.start_column == 5
def test_dicts():
objs = tokenize("{foo bar bar baz}")
assert objs == [HyDict(["foo", "bar", "bar", "baz"])]
objs = tokenize("(bar {foo bar bar baz})")
assert objs == [HyExpression([HySymbol("bar"),
HyDict(["foo", "bar",
"bar", "baz"])])]
objs = tokenize("{(foo bar) (baz quux)}")
assert objs == [HyDict([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
def test_sets():
objs = tokenize(" assert objs == [HySet([HyInteger(1), HyInteger(2)])]
objs = tokenize("(bar assert objs == [HyExpression([HySymbol("bar"),
HySet(["foo", "bar", "baz"])])]
objs = tokenize(" assert objs == [HySet([
HyExpression([HySymbol("foo"), HySymbol("bar")]),
HyExpression([HySymbol("baz"), HySymbol("quux")])
])]
# Duplicate items in a literal set should be okay (and should
# be preserved).
objs = tokenize(" assert objs == [HySet([HyInteger(n) for n in [1, 2, 1, 1, 2, 1]])]
assert len(objs[0]) == 6
# https://github.com/hylang/hy/issues/1120
objs = tokenize(" assert objs == [HySet([HySymbol("a"), HyInteger(1)])]
def test_nospace():
entry = tokenize("(foo(one two))")[0]
assert entry.start_line == 1
assert entry.start_column == 1
assert entry.end_line == 1
assert entry.end_column == 14
entry = entry[1]
assert entry.start_line == 1
assert entry.start_column == 5
assert entry.end_line == 1
assert entry.end_column == 13
def test_escapes():
entry = tokenize(r"""(foo "foo\n")""")[0]
assert entry[1] == "foo\n"
entry = tokenize(r"""(foo r"foo\s")""")[0]
assert entry[1] == r"foo\s"
def test_unicode_escapes():
s = r'"a\xac\u1234\u20ac\U00008000"'
assert len(s) == 29
entry = tokenize(s)[0]
assert len(entry) == 5
assert [ord(x) for x in entry] == [97, 172, 4660, 8364, 32768]
def test_complex():
# This is a regression test for #143
entry = tokenize("(1j)")[0][0]
assert entry == HyComplex("1.0j")
entry = tokenize("(j)")[0][0]
assert entry == HySymbol("j")
def test_tag_macro():
entry = tokenize(" assert entry[0][0] == HySymbol("dispatch-tag-macro")
assert entry[0][1] == HyString("^")
assert len(entry[0]) == 3
def test_lex_comment_382():
entry = tokenize("foo ;bar\n;baz")
assert entry == [HySymbol("foo")]
def test_discard():
# empty
assert tokenize("") == []
# single
assert tokenize(" # multiple
assert tokenize(" assert tokenize(" # nested discard
assert tokenize(" assert tokenize(" # trailing
assert tokenize("0") == [0]
assert tokenize("0 assert tokenize("0 # leading
assert tokenize("2") == [2]
assert tokenize(" assert tokenize(" assert tokenize(" # both
assert tokenize(" assert tokenize(" # inside
assert tokenize("0 assert tokenize("0 assert tokenize("0 # in HyList
assert tokenize("[]") == [HyList([])]
assert tokenize("[ assert tokenize("[ assert tokenize("[ assert tokenize("[0]") == [HyList([HyInteger(0)])]
assert tokenize("[0 assert tokenize("[0 assert tokenize("[2]") == [HyList([HyInteger(2)])]
assert tokenize("[ assert tokenize("[ assert tokenize("[ # in HySet
assert tokenize(" assert tokenize(" assert tokenize(" assert tokenize(" # in HyDict
assert tokenize("{}") == [HyDict()]
assert tokenize("{ assert tokenize("{ assert tokenize("{1 assert tokenize("{1 2 # in HyExpression
assert tokenize("()") == [HyExpression()]
assert tokenize("( assert tokenize("( assert tokenize("(foo assert tokenize("(foo :bar 1)") == [HyExpression([HySymbol("foo"), HyKeyword("bar"), HyInteger(1)])]
assert tokenize("(foo assert tokenize("(foo :bar # discard term with nesting
assert tokenize("[1 2 HyList([HyInteger(1), HyInteger(2), HyInteger(3), HyInteger(4)])
]
# discard with other prefix syntax
assert tokenize("a assert tokenize("a ' assert tokenize("a '#_b #_c d") == [HySymbol("a"), HyExpression([HySymbol("quote"), HySymbol("d")])]
assert tokenize("a '
def test_lex_exception_filtering(capsys):
# First, test for PrematureEndOfInput
with peoi() as execinfo:
tokenize(" \n (foo\n \n")
check_trace_output(capsys, execinfo, [
' File "<string>", line 2',
' (foo',
' ^',
'PrematureEndOfInput: Premature end of input'])
# Now, for a generic LexException
with lexe() as execinfo:
tokenize(" \n\n 1.foo ")
check_trace_output(capsys, execinfo, [
' File "<string>", line 3',
' 1.foo',
' ^',
'LexException: Cannot access attribute on anything other'
' than a name (in order to get attributes of expressions,'
' use `(. <expression> <attr>)` or `(.<attr> <expression>)`)'])
| true
| true
|
f70971dadca4fb414382ea607136ad5493b59e79
| 3,068
|
py
|
Python
|
vendor/github.com/elastic/beats/metricbeat/tests/system/test_kubernetes.py
|
plinde/mqttbeat
|
64e3bf19a45686a5cc6aecae58d9d2e140e7c915
|
[
"Apache-2.0"
] | 35
|
2017-07-05T07:48:03.000Z
|
2022-01-10T16:29:37.000Z
|
vendor/github.com/elastic/beats/metricbeat/tests/system/test_kubernetes.py
|
plinde/mqttbeat
|
64e3bf19a45686a5cc6aecae58d9d2e140e7c915
|
[
"Apache-2.0"
] | 8
|
2017-07-20T21:13:17.000Z
|
2020-01-10T08:08:34.000Z
|
vendor/github.com/elastic/beats/metricbeat/tests/system/test_kubernetes.py
|
plinde/mqttbeat
|
64e3bf19a45686a5cc6aecae58d9d2e140e7c915
|
[
"Apache-2.0"
] | 19
|
2017-08-03T16:49:27.000Z
|
2022-01-27T01:48:35.000Z
|
import os
import metricbeat
import unittest
KUBERNETES_FIELDS = metricbeat.COMMON_FIELDS + ["kubernetes"]
class Test(metricbeat.BaseTest):
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_node(self):
""" Kubernetes kubelet node metricset tests """
self._test_metricset('node', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_system(self):
""" Kubernetes kubelet system metricset tests """
self._test_metricset('system', 2, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_pod(self):
""" Kubernetes kubelet pod metricset tests """
self._test_metricset('pod', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_container(self):
""" Kubernetes kubelet container metricset tests """
self._test_metricset('container', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_node(self):
""" Kubernetes state node metricset tests """
self._test_metricset('state_node', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_pod(self):
""" Kubernetes state pod metricset tests """
self._test_metricset('state_pod', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_container(self):
""" Kubernetes state container metricset tests """
self._test_metricset('state_container', 1, self.get_kube_state_hosts())
def _test_metricset(self, metricset, expected_events, hosts):
self.render_config_template(modules=[{
"name": "kubernetes",
"enabled": "true",
"metricsets": [metricset],
"hosts": hosts,
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
# Ensure no errors or warnings exist in the log.
log = self.get_log()
self.assertNotRegexpMatches(log.replace("WARN BETA", ""), "ERR|WARN")
output = self.read_output_json()
self.assertEqual(len(output), expected_events)
evt = output[0]
self.assertItemsEqual(self.de_dot(KUBERNETES_FIELDS), evt.keys(), evt)
self.assert_fields_are_documented(evt)
@classmethod
def get_kubelet_hosts(cls):
return [
"http://" +
os.getenv('KUBELET_HOST', 'localhost') + ':' +
os.getenv('KUBELET_PORT', '10255')
]
@classmethod
def get_kube_state_hosts(cls):
return [
"http://" +
os.getenv('KUBE_STATE_METRICS_HOST', 'localhost') + ':' +
os.getenv('KUBE_STATE_METRICS_PORT', '18080')
]
| 36.094118
| 79
| 0.659061
|
import os
import metricbeat
import unittest
KUBERNETES_FIELDS = metricbeat.COMMON_FIELDS + ["kubernetes"]
class Test(metricbeat.BaseTest):
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_node(self):
self._test_metricset('node', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_system(self):
self._test_metricset('system', 2, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_pod(self):
self._test_metricset('pod', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_kubelet_container(self):
self._test_metricset('container', 1, self.get_kubelet_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_node(self):
self._test_metricset('state_node', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_pod(self):
self._test_metricset('state_pod', 1, self.get_kube_state_hosts())
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_state_container(self):
self._test_metricset('state_container', 1, self.get_kube_state_hosts())
def _test_metricset(self, metricset, expected_events, hosts):
self.render_config_template(modules=[{
"name": "kubernetes",
"enabled": "true",
"metricsets": [metricset],
"hosts": hosts,
"period": "5s"
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
log = self.get_log()
self.assertNotRegexpMatches(log.replace("WARN BETA", ""), "ERR|WARN")
output = self.read_output_json()
self.assertEqual(len(output), expected_events)
evt = output[0]
self.assertItemsEqual(self.de_dot(KUBERNETES_FIELDS), evt.keys(), evt)
self.assert_fields_are_documented(evt)
@classmethod
def get_kubelet_hosts(cls):
return [
"http://" +
os.getenv('KUBELET_HOST', 'localhost') + ':' +
os.getenv('KUBELET_PORT', '10255')
]
@classmethod
def get_kube_state_hosts(cls):
return [
"http://" +
os.getenv('KUBE_STATE_METRICS_HOST', 'localhost') + ':' +
os.getenv('KUBE_STATE_METRICS_PORT', '18080')
]
| true
| true
|
f709722cba9996c6b9ab92c49c7def3b7f426a8a
| 2,240
|
py
|
Python
|
oneflow/python/test/ops/test_function_input_output.py
|
wanghongsheng01/framework_enflame
|
debf613e05e3f5ea8084c3e79b60d0dd9e349526
|
[
"Apache-2.0"
] | 1
|
2021-04-14T03:19:35.000Z
|
2021-04-14T03:19:35.000Z
|
oneflow/python/test/ops/test_function_input_output.py
|
wanghongsheng01/framework_enflame
|
debf613e05e3f5ea8084c3e79b60d0dd9e349526
|
[
"Apache-2.0"
] | 1
|
2021-06-16T08:37:50.000Z
|
2021-06-16T08:37:50.000Z
|
oneflow/python/test/ops/test_function_input_output.py
|
wanghongsheng01/framework_enflame
|
debf613e05e3f5ea8084c3e79b60d0dd9e349526
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import oneflow._oneflow_internal
from typing import Tuple
@flow.unittest.skip_unless_1n4d()
class TestFunctionInputOutput(flow.unittest.TestCase):
def test_FixedTensorDef(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_FixedTensorDef_2_device(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_MirroredTensorDef(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.ListNumpy.Placeholder((2, 5))):
return x
data = np.ones((1, 5), dtype=np.float32)
ndarray_list = Foo([data]).get().numpy_list()
test_case.assertEqual(len(ndarray_list), 1)
test_case.assertTrue(np.allclose(ndarray_list[0], data))
if __name__ == "__main__":
unittest.main()
| 33.939394
| 72
| 0.691071
|
import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import oneflow._oneflow_internal
from typing import Tuple
@flow.unittest.skip_unless_1n4d()
class TestFunctionInputOutput(flow.unittest.TestCase):
def test_FixedTensorDef(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_FixedTensorDef_2_device(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_MirroredTensorDef(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.ListNumpy.Placeholder((2, 5))):
return x
data = np.ones((1, 5), dtype=np.float32)
ndarray_list = Foo([data]).get().numpy_list()
test_case.assertEqual(len(ndarray_list), 1)
test_case.assertTrue(np.allclose(ndarray_list[0], data))
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70972932e906ce90dd1294411cbbd58bc445a32
| 158
|
py
|
Python
|
tests/test_contrib_sentry.py
|
gtors/stories
|
0614624f472151f20617afa4e6c4a0af9b409b6d
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_contrib_sentry.py
|
gtors/stories
|
0614624f472151f20617afa4e6c4a0af9b409b6d
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_contrib_sentry.py
|
gtors/stories
|
0614624f472151f20617afa4e6c4a0af9b409b6d
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
pytest.importorskip("foo")
from stories.contrib.sentry.django import ( # FIXME: # isort:skip # pragma: no cover # noqa
DjangoClient,
)
| 19.75
| 94
| 0.708861
|
import pytest
pytest.importorskip("foo")
from stories.contrib.sentry.django import ( DjangoClient,
)
| true
| true
|
f7097307a99544d97a4ccf6bbdb873a3320a9c8c
| 8,779
|
py
|
Python
|
scripts/update_forecast_hub.py
|
ConsultingMD/covid-data-public
|
2b7091f7cc3877df45a7887709e999b0ebdf30ec
|
[
"MIT"
] | null | null | null |
scripts/update_forecast_hub.py
|
ConsultingMD/covid-data-public
|
2b7091f7cc3877df45a7887709e999b0ebdf30ec
|
[
"MIT"
] | null | null | null |
scripts/update_forecast_hub.py
|
ConsultingMD/covid-data-public
|
2b7091f7cc3877df45a7887709e999b0ebdf30ec
|
[
"MIT"
] | null | null | null |
import enum
from typing import Any
import click
import pandas as pd
import numpy as np
import structlog
import pathlib
import pydantic
import datetime
import zoltpy.util
from covidactnow.datapublic import common_init, common_df
from scripts import helpers
from covidactnow.datapublic.common_fields import (
GetByValueMixin,
CommonFields,
FieldNameAndCommonField,
)
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = structlog.get_logger(__name__)
class ForecastModel(enum.Enum):
""""""
ENSEMBLE = "COVIDhub-ensemble"
BASELINE = "COVIDhub-baseline"
GOOGLE = "Google_Harvard-CPF"
class Fields(GetByValueMixin, FieldNameAndCommonField, enum.Enum):
MODEL_ABBR = "model_abbr", CommonFields.MODEL_ABBR
REGION = "unit", CommonFields.FIPS
FORECAST_DATE = "forecast_date", CommonFields.FORECAST_DATE
TARGET_DATE = "target_date", CommonFields.DATE
QUANTILE = "quantile", CommonFields.QUANTILE
WEEKLY_NEW_CASES = "case", CommonFields.WEEKLY_NEW_CASES
WEEKLY_NEW_DEATHS = "death", CommonFields.WEEKLY_NEW_DEATHS
class ForecastHubUpdater(pydantic.BaseModel):
"""Updates Forecast Lab Data Set with the Latest Available Forecast
"""
FORECAST_PROJECT_NAME = "COVID-19 Forecasts"
RAW_CSV_FILENAME = "raw.csv"
conn: Any # A valid zoltpy connection
model: ForecastModel # The model to cache from Zoltar
raw_data_root: pathlib.Path
timeseries_output_path: pathlib.Path
@classmethod
def make_with_data_root(
cls, model: ForecastModel, conn: Any, data_root: pathlib.Path,
) -> "ForecastHubUpdater":
return cls(
model=model,
conn=conn,
raw_data_root=data_root / "forecast-hub",
timeseries_output_path=data_root / "forecast-hub" / "timeseries-common.csv",
)
@property
def raw_path(self):
return self.raw_data_root / self.RAW_CSV_FILENAME
def write_version_file(self, forecast_date) -> None:
stamp = datetime.datetime.utcnow().isoformat()
version_path = self.raw_data_root / "version.txt"
with version_path.open("w") as vf:
vf.write(f"Updated on {stamp}\n")
vf.write(f"Using forecast from {forecast_date}\n")
def update_source_data(self):
"""
See https://github.com/reichlab/zoltpy/tree/master for instructions.
Note: Requires environment variables for Z_USERNAME and Z_PASSWORD with correct
permissions.
"""
_logger.info(f"Updating {self.model.name} from ForecastHub")
latest_forecast_date = get_latest_forecast_date(
self.conn, self.FORECAST_PROJECT_NAME, self.model.value
)
# TODO: Save a call to the Forecast Hub by checking if latest_forecast_date is newer than
# the current one saved in version.txt. We expect the cache to be invalidated only once a
# week.
ensemble = zoltpy.util.download_forecast(
self.conn, self.FORECAST_PROJECT_NAME, self.model.value, latest_forecast_date
)
df = zoltpy.util.dataframe_from_json_io_dict(ensemble)
df["forecast_date"] = pd.to_datetime(latest_forecast_date)
df["model_abbr"] = self.model.value
df.to_csv(self.raw_path, index=False)
self.write_version_file(forecast_date=latest_forecast_date)
def load_source_data(self) -> pd.DataFrame:
_logger.info("Updating ForecastHub Ensemble dataset.")
data = pd.read_csv(
self.raw_path, parse_dates=["forecast_date"], dtype={"unit": str}, low_memory=False
)
return data
@staticmethod
def transform(df: pd.DataFrame) -> pd.DataFrame:
df["target_date"] = df.apply(
lambda x: x.forecast_date + pd.Timedelta(weeks=int(x.target.split(" ")[0])),
axis="columns",
)
# The targets have the form "X wk inc/cum cases/deaths"
# Take the final split (death/cases) and use that as target type
df["target_type"] = df.target.str.split(" ").str[-1]
# Take the penultimate split (inc/cum) and use that as aggregation type
df["target_summation"] = df.target.str.split(" ").str[-2]
masks = [
df["unit"] != "US", # Drop the national forecast
df["quantile"].notna(), # Point forecasts are duplicate of quantile = 0.5
df["target_summation"] == "inc", # Only return incidence values
# Some models return both incidence and cumulative values
# Only keep incidence targets (drop cumulative targets)
df["target_date"] <= df["forecast_date"] + pd.Timedelta(weeks=4)
# Time Horizon - Only keep up to 4 week forecasts.
# Almost all forecasts only provide 4 wks.
]
mask = np.logical_and.reduce(masks)
# The raw data is in long form and we need to pivot this to create a column for
# WEEKLY_NEW_CASES and WEEKLY_NEW_DEATHS. "target_type" has either death or cases. "value"
# has the predicted value. The rest of the columns create a unique index. For right now only
# one model and one forecast_date are being served, but we need to maintain the option of
# multiple values.
COLUMNS = [
Fields.MODEL_ABBR,
Fields.REGION,
Fields.FORECAST_DATE,
Fields.TARGET_DATE,
"target_type",
Fields.QUANTILE,
"value",
]
df = df[mask][COLUMNS].copy()
df = df.set_index(
[
Fields.MODEL_ABBR,
Fields.REGION,
Fields.FORECAST_DATE,
Fields.TARGET_DATE,
Fields.QUANTILE,
]
)
pivot = df.pivot(columns="target_type")
pivot = pivot.droplevel(level=0, axis=1).reset_index()
# This cleans up a MultiIndex Column that is an artifact of the pivot in preparation for a
# standard csv dump.
# Rename and remove any columns without a CommonField
data = helpers.rename_fields(pivot, Fields, set(), _logger)
# Need to make the quantiles into a wide form for easier downstream processing
# Mangling the column names into f"weekly_new_{cases/deaths}_{quantile}". This
# would be a good candidate to handle in long/tidy-form and we could remove both pivots.
# Using common_field because this is done after helpers.rename_fields
# TODO(michael): Not sure why pylint is confused about the common_field member not existing.
# pylint: disable=no-member
wide_df = data.set_index(
[
Fields.REGION.common_field,
Fields.TARGET_DATE.common_field,
Fields.MODEL_ABBR.common_field,
Fields.FORECAST_DATE.common_field,
]
).pivot(columns=Fields.QUANTILE.common_field)
# TODO: Once requirements have settled, explicitly pass only the quantiles needed.
wide_df.columns = [x[0] + "_" + str(x[1]) for x in wide_df.columns.to_flat_index()]
wide_df = wide_df.reset_index()
return wide_df
def get_latest_forecast_date(conn, project_name: str, model_abbr: str) -> str:
"""
Return the date string 'YYYY-MM-DD' of the latest submitted forecast for a given model in a
given zoltar project
https://github.com/reichlab/zoltpy/issues/42
Return the str date representation of the latest forecast if available, else the empty string.
"""
project = [project for project in conn.projects if project.name == project_name][0]
model = [model for model in project.models if model.abbreviation == model_abbr][0]
latest_forecast_date = model.latest_forecast.timezero.timezero_date
# Note: model.latest_forecast.timezero.timezero_date is of type datetime.datetime or None
if latest_forecast_date:
_logger.info(f"Latest forecast for {model_abbr} is {latest_forecast_date}")
return str(latest_forecast_date)
else:
_logger.info(f"No forecasts found for {model_abbr} in {project_name}")
return ""
@click.command()
@click.option("--fetch/--no-fetch", default=True)
def main(fetch: bool):
common_init.configure_logging()
connection = zoltpy.util.authenticate()
transformer = ForecastHubUpdater.make_with_data_root(
ForecastModel.ENSEMBLE, connection, DATA_ROOT
)
if fetch:
_logger.info("Fetching new data.")
transformer.update_source_data()
data = transformer.load_source_data()
data = transformer.transform(data)
common_df.write_csv(data, transformer.timeseries_output_path, _logger)
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| 37.517094
| 100
| 0.663971
|
import enum
from typing import Any
import click
import pandas as pd
import numpy as np
import structlog
import pathlib
import pydantic
import datetime
import zoltpy.util
from covidactnow.datapublic import common_init, common_df
from scripts import helpers
from covidactnow.datapublic.common_fields import (
GetByValueMixin,
CommonFields,
FieldNameAndCommonField,
)
DATA_ROOT = pathlib.Path(__file__).parent.parent / "data"
_logger = structlog.get_logger(__name__)
class ForecastModel(enum.Enum):
ENSEMBLE = "COVIDhub-ensemble"
BASELINE = "COVIDhub-baseline"
GOOGLE = "Google_Harvard-CPF"
class Fields(GetByValueMixin, FieldNameAndCommonField, enum.Enum):
MODEL_ABBR = "model_abbr", CommonFields.MODEL_ABBR
REGION = "unit", CommonFields.FIPS
FORECAST_DATE = "forecast_date", CommonFields.FORECAST_DATE
TARGET_DATE = "target_date", CommonFields.DATE
QUANTILE = "quantile", CommonFields.QUANTILE
WEEKLY_NEW_CASES = "case", CommonFields.WEEKLY_NEW_CASES
WEEKLY_NEW_DEATHS = "death", CommonFields.WEEKLY_NEW_DEATHS
class ForecastHubUpdater(pydantic.BaseModel):
FORECAST_PROJECT_NAME = "COVID-19 Forecasts"
RAW_CSV_FILENAME = "raw.csv"
conn: Any
model: ForecastModel
raw_data_root: pathlib.Path
timeseries_output_path: pathlib.Path
@classmethod
def make_with_data_root(
cls, model: ForecastModel, conn: Any, data_root: pathlib.Path,
) -> "ForecastHubUpdater":
return cls(
model=model,
conn=conn,
raw_data_root=data_root / "forecast-hub",
timeseries_output_path=data_root / "forecast-hub" / "timeseries-common.csv",
)
@property
def raw_path(self):
return self.raw_data_root / self.RAW_CSV_FILENAME
def write_version_file(self, forecast_date) -> None:
stamp = datetime.datetime.utcnow().isoformat()
version_path = self.raw_data_root / "version.txt"
with version_path.open("w") as vf:
vf.write(f"Updated on {stamp}\n")
vf.write(f"Using forecast from {forecast_date}\n")
def update_source_data(self):
_logger.info(f"Updating {self.model.name} from ForecastHub")
latest_forecast_date = get_latest_forecast_date(
self.conn, self.FORECAST_PROJECT_NAME, self.model.value
)
ensemble = zoltpy.util.download_forecast(
self.conn, self.FORECAST_PROJECT_NAME, self.model.value, latest_forecast_date
)
df = zoltpy.util.dataframe_from_json_io_dict(ensemble)
df["forecast_date"] = pd.to_datetime(latest_forecast_date)
df["model_abbr"] = self.model.value
df.to_csv(self.raw_path, index=False)
self.write_version_file(forecast_date=latest_forecast_date)
def load_source_data(self) -> pd.DataFrame:
_logger.info("Updating ForecastHub Ensemble dataset.")
data = pd.read_csv(
self.raw_path, parse_dates=["forecast_date"], dtype={"unit": str}, low_memory=False
)
return data
@staticmethod
def transform(df: pd.DataFrame) -> pd.DataFrame:
df["target_date"] = df.apply(
lambda x: x.forecast_date + pd.Timedelta(weeks=int(x.target.split(" ")[0])),
axis="columns",
)
df["target_type"] = df.target.str.split(" ").str[-1]
df["target_summation"] = df.target.str.split(" ").str[-2]
masks = [
df["unit"] != "US", df["quantile"].notna(), df["target_summation"] == "inc", df["target_date"] <= df["forecast_date"] + pd.Timedelta(weeks=4)
]
mask = np.logical_and.reduce(masks)
COLUMNS = [
Fields.MODEL_ABBR,
Fields.REGION,
Fields.FORECAST_DATE,
Fields.TARGET_DATE,
"target_type",
Fields.QUANTILE,
"value",
]
df = df[mask][COLUMNS].copy()
df = df.set_index(
[
Fields.MODEL_ABBR,
Fields.REGION,
Fields.FORECAST_DATE,
Fields.TARGET_DATE,
Fields.QUANTILE,
]
)
pivot = df.pivot(columns="target_type")
pivot = pivot.droplevel(level=0, axis=1).reset_index()
data = helpers.rename_fields(pivot, Fields, set(), _logger)
wide_df = data.set_index(
[
Fields.REGION.common_field,
Fields.TARGET_DATE.common_field,
Fields.MODEL_ABBR.common_field,
Fields.FORECAST_DATE.common_field,
]
).pivot(columns=Fields.QUANTILE.common_field)
wide_df.columns = [x[0] + "_" + str(x[1]) for x in wide_df.columns.to_flat_index()]
wide_df = wide_df.reset_index()
return wide_df
def get_latest_forecast_date(conn, project_name: str, model_abbr: str) -> str:
project = [project for project in conn.projects if project.name == project_name][0]
model = [model for model in project.models if model.abbreviation == model_abbr][0]
latest_forecast_date = model.latest_forecast.timezero.timezero_date
if latest_forecast_date:
_logger.info(f"Latest forecast for {model_abbr} is {latest_forecast_date}")
return str(latest_forecast_date)
else:
_logger.info(f"No forecasts found for {model_abbr} in {project_name}")
return ""
@click.command()
@click.option("--fetch/--no-fetch", default=True)
def main(fetch: bool):
common_init.configure_logging()
connection = zoltpy.util.authenticate()
transformer = ForecastHubUpdater.make_with_data_root(
ForecastModel.ENSEMBLE, connection, DATA_ROOT
)
if fetch:
_logger.info("Fetching new data.")
transformer.update_source_data()
data = transformer.load_source_data()
data = transformer.transform(data)
common_df.write_csv(data, transformer.timeseries_output_path, _logger)
if __name__ == "__main__":
main()
| true
| true
|
f709733239a4148a5fa9de69a8f2a8c2f75394e3
| 1,280
|
py
|
Python
|
tethys/core/pipes/filters/filter_base.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-05-20T19:03:14.000Z
|
2020-06-03T20:43:34.000Z
|
tethys/core/pipes/filters/filter_base.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tethys/core/pipes/filters/filter_base.py
|
tethys-platform/tethys
|
c27daf5a832b05f9d771b04355001c331bc08766
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import Any
from serobj.utils.serobj_calls import SerobjCallsBase
class FilterBase(SerobjCallsBase):
"""
Base class for all pipe filters. Each filter should provide
an `execute` method that returns the score.
"""
_SEROBJ__ATTRS = []
@abstractmethod
def execute(self, data_packet: Any, **kwargs) -> float:
"""
The method that pipe executes before data sending.
:param data_packet: any data object
:return: score (:class:`float`) that contains any user's value.
For many cases you can use [1, 0] values.
"""
raise NotImplementedError
| 32
| 74
| 0.70625
|
from abc import abstractmethod
from typing import Any
from serobj.utils.serobj_calls import SerobjCallsBase
class FilterBase(SerobjCallsBase):
_SEROBJ__ATTRS = []
@abstractmethod
def execute(self, data_packet: Any, **kwargs) -> float:
raise NotImplementedError
| true
| true
|
f70973aa106803b09683a40d97d6be0f8cce325f
| 369
|
py
|
Python
|
2017/future/first_unique_character.py
|
littlepea/beijing-python-meetup
|
393d7723bc092ae548fe4e6ed82aa30ee3c7801d
|
[
"MIT"
] | 10
|
2016-11-15T10:39:36.000Z
|
2020-01-14T04:59:08.000Z
|
2017/future/first_unique_character.py
|
littlepea/beijing-python-meetup
|
393d7723bc092ae548fe4e6ed82aa30ee3c7801d
|
[
"MIT"
] | 7
|
2017-01-10T05:40:05.000Z
|
2020-06-28T05:59:20.000Z
|
2017/future/first_unique_character.py
|
littlepea/beijing-python-meetup
|
393d7723bc092ae548fe4e6ed82aa30ee3c7801d
|
[
"MIT"
] | 3
|
2017-09-19T09:12:31.000Z
|
2018-10-31T06:35:21.000Z
|
"""
Find the first non-repeated character in a string
https://www.codeeval.com/open_challenges/12/
"""
import unittest
def first_unique_character(s):
return
class FirstUniqueCharacterTest(unittest.TestCase):
def test_yellow(self):
self.assertEquals('y', first_unique_character('yellow'))
if __name__ == '__main__':
unittest.main(exit=False)
| 17.571429
| 64
| 0.731707
|
import unittest
def first_unique_character(s):
return
class FirstUniqueCharacterTest(unittest.TestCase):
def test_yellow(self):
self.assertEquals('y', first_unique_character('yellow'))
if __name__ == '__main__':
unittest.main(exit=False)
| true
| true
|
f70974bd8c96a08043d013ab757fa388f47f81f6
| 1,832
|
py
|
Python
|
tests/models.py
|
charkins/boolean_parser
|
10d42610cd13e4af5afec1fdfda53a4de0e2d00c
|
[
"BSD-3-Clause"
] | 6
|
2020-09-25T06:55:37.000Z
|
2022-03-24T20:17:11.000Z
|
tests/models.py
|
charkins/boolean_parser
|
10d42610cd13e4af5afec1fdfda53a4de0e2d00c
|
[
"BSD-3-Clause"
] | 6
|
2020-11-10T16:32:30.000Z
|
2022-03-31T20:59:33.000Z
|
tests/models.py
|
charkins/boolean_parser
|
10d42610cd13e4af5afec1fdfda53a4de0e2d00c
|
[
"BSD-3-Clause"
] | 1
|
2020-12-16T12:19:11.000Z
|
2020-12-16T12:19:11.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: models.py
# Project: tests
# Author: Brian Cherinka
# Created: Friday, 15th February 2019 2:44:13 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2019 Brian Cherinka
# Last Modified: Sunday, 3rd March 2019 4:47:18 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
from sqlalchemy import Column, String, BigInteger, Integer, Float
from .database import Base, engine, Session
import factory
import factory.fuzzy
from pytest_factoryboy import register
class ModelA(Base):
__tablename__ = 'modela'
pk = Column(BigInteger, primary_key=True)
name = Column(String, nullable=False)
x = Column(Integer, nullable=False)
y = Column(Integer, nullable=False)
def __repr__(self):
return f'<ModelA(pk={self.pk},name={self.name},x={self.x},y={self.y})>'
class ModelB(Base):
__tablename__ = 'modelb'
pk = Column(BigInteger, primary_key=True)
z = Column(Float, nullable=False)
def __repr__(self):
return f'<ModelB(pk={self.pk},z={self.z})>'
@register
class ModelAFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ModelA
sqlalchemy_session = Session # the SQLAlchemy session object
pk = factory.Sequence(lambda n: n)
x = factory.Faker('pyint', min_value=0, max_value=20)
y = factory.Faker('pyint', min_value=0, max_value=20)
name = factory.fuzzy.FuzzyText(prefix='model', length=3)
@register
class ModelBFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ModelB
sqlalchemy_session = Session # the SQLAlchemy session object
pk = factory.Sequence(lambda n: n)
z = factory.Faker('pyint', min_value=0, max_value=20)
Base.metadata.create_all(engine)
| 29.079365
| 79
| 0.704694
|
from __future__ import print_function, division, absolute_import
from sqlalchemy import Column, String, BigInteger, Integer, Float
from .database import Base, engine, Session
import factory
import factory.fuzzy
from pytest_factoryboy import register
class ModelA(Base):
__tablename__ = 'modela'
pk = Column(BigInteger, primary_key=True)
name = Column(String, nullable=False)
x = Column(Integer, nullable=False)
y = Column(Integer, nullable=False)
def __repr__(self):
return f'<ModelA(pk={self.pk},name={self.name},x={self.x},y={self.y})>'
class ModelB(Base):
__tablename__ = 'modelb'
pk = Column(BigInteger, primary_key=True)
z = Column(Float, nullable=False)
def __repr__(self):
return f'<ModelB(pk={self.pk},z={self.z})>'
@register
class ModelAFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ModelA
sqlalchemy_session = Session pk = factory.Sequence(lambda n: n)
x = factory.Faker('pyint', min_value=0, max_value=20)
y = factory.Faker('pyint', min_value=0, max_value=20)
name = factory.fuzzy.FuzzyText(prefix='model', length=3)
@register
class ModelBFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = ModelB
sqlalchemy_session = Session pk = factory.Sequence(lambda n: n)
z = factory.Faker('pyint', min_value=0, max_value=20)
Base.metadata.create_all(engine)
| true
| true
|
f7097552e3f4f3c653e56165e572f774956c3add
| 6,585
|
py
|
Python
|
env/lib/python3.6/site-packages/scipy/optimize/tests/test__spectral.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
venv/lib/python3.7/site-packages/scipy/optimize/tests/test__spectral.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
venv/lib/python3.7/site-packages/scipy/optimize/tests/test__spectral.py
|
John1001Song/Big-Data-Robo-Adviser
|
9444dce96954c546333d5aecc92a06c3bfd19aa5
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
from __future__ import division, absolute_import, print_function
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
| 31.208531
| 120
| 0.528929
|
from __future__ import division, absolute_import, print_function
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
| true
| true
|
f70975b2a600c129a02d2b7302e2595d0a4aa9c1
| 138
|
py
|
Python
|
src/apps/cecyrd/config.py
|
SGC-Tlaxcala/cerebro
|
6c842f66d849065a70002fccdb1eaca1e3d61d99
|
[
"MIT"
] | null | null | null |
src/apps/cecyrd/config.py
|
SGC-Tlaxcala/cerebro
|
6c842f66d849065a70002fccdb1eaca1e3d61d99
|
[
"MIT"
] | 48
|
2017-04-21T17:35:23.000Z
|
2020-08-29T04:19:35.000Z
|
src/apps/cecyrd/config.py
|
SGC-Tlaxcala/cerebro
|
6c842f66d849065a70002fccdb1eaca1e3d61d99
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CecyrdConfig(AppConfig):
name = 'apps.cecyrd'
verbose_name = 'Evaluación del proveedor'
| 19.714286
| 45
| 0.746377
|
from django.apps import AppConfig
class CecyrdConfig(AppConfig):
name = 'apps.cecyrd'
verbose_name = 'Evaluación del proveedor'
| true
| true
|
f70975e19b9a44269656d1320aa5ab33699c0fbe
| 120,184
|
py
|
Python
|
pandas/core/indexes/multi.py
|
FlingeR/pandas
|
01f399854f9febefa9e97005f3720aa312409b98
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/multi.py
|
FlingeR/pandas
|
01f399854f9febefa9e97005f3720aa312409b98
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/multi.py
|
FlingeR/pandas
|
01f399854f9febefa9e97005f3720aa312409b98
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
ensure_index,
)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import Series # noqa:F401
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
-------
int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each).
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype("object") << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_locs
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html>`_
for more.
Examples
--------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
"""
_deprecations = Index._deprecations | frozenset()
# initialize to zero-length tuples to make everything work
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
_tuples = None
sortorder: Optional[int]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
_set_identity: bool = True,
):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(MultiIndex)
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
if _set_identity:
result._reset_identity()
return result
def _validate_codes(self, level: List, code: List):
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : list
Code to reassign.
level : list
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(
self, codes: Optional[List] = None, levels: Optional[List] = None
):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
Returns
-------
new codes where code value = -1 if it corresponds to a
NaN level.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > self._lexsort_depth():
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {self._lexsort_depth()}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = zip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=lib.no_default):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
.. versionchanged:: 1.0.0
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
return self.values
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
# overriding the base Index.shape definition to avoid materializing
# the values (GH-27384, GH-27775)
return (len(self),)
def __len__(self) -> int:
return len(self.codes[0])
# --------------------------------------------------------------------
# Levels Methods
@cache_readonly
def levels(self):
# Use cache_readonly to ensure that self.get_locs doesn't repeatedly
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [
x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning new index.
Parameters
----------
levels : sequence or list of sequence
New level(s) to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'),
(2, 'one'), (2, 'two'),
(3, 'one'), (3, 'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2]])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2),
('c', 1),
('c', 2)],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b', 'c'], level=0)
MultiIndex([('a', 'one'),
('a', 'two'),
('b', 'one'),
('b', 'two'),
('c', 'one'),
('c', 'two')],
names=['foo', 'bar'])
>>> idx.set_levels(['a', 'b'], level='bar')
MultiIndex([(1, 'a'),
(1, 'b'),
(2, 'a'),
(2, 'b'),
(3, 'a'),
(3, 'b')],
names=['foo', 'bar'])
If any of the levels passed to ``set_levels()`` exceeds the
existing length, all of the values from that argument will
be stored in the MultiIndex levels, though the values will
be truncated in the MultiIndex output.
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1])
MultiIndex([('a', 1),
('a', 2),
('b', 1),
('b', 2)],
names=['foo', 'bar'])
>>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels
FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]])
"""
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
"""
Integer number of levels in this MultiIndex.
"""
return len(self._levels)
@property
def levshape(self):
"""
A tuple with the length of each level.
"""
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self, codes, level=None, copy=False, validate=True, verify_integrity=False
):
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy)
new_codes = FrozenList(new_codes)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._tuples = None
self._reset_cache()
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning
new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
inplace : bool
If True, mutates in place.
verify_integrity : bool (default True)
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level='bar')
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self):
return MultiIndex.from_tuples
@Appender(Index._shallow_copy.__doc__)
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop("names", kwargs.pop("name", self.names))
# discards freq
kwargs.pop("freq", None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.copy(**kwargs)
def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs,
)
return self._shallow_copy(values, **kwargs)
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
_set_identity=False,
):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
codes : sequence, optional
deep : bool, default False
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
else:
if levels is None:
levels = self.levels
if codes is None:
codes = self.codes
return MultiIndex(
levels=levels,
codes=codes,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity,
)
def __array__(self, dtype=None) -> np.ndarray:
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
""" return a boolean if we need a qualified .info display """
def f(l):
return "mixed" in l or "string" in l or "unicode" in l
return any(f(l) for l in self._inferred_type_levels)
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
"""
Formats each item in tup according to its level's formatter function.
"""
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi.values
def format(
self,
space=2,
sparsify=None,
adjoin=True,
names=False,
na_rep=None,
formatter=None,
):
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_1d(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(name, escape_chars=("\t", "\r", "\n"))
if name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthy" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
# --------------------------------------------------------------------
# Names Methods
def _get_names(self):
return FrozenList(self._names)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
# --------------------------------------------------------------------
@Appender(Index._get_grouper_for_level.__doc__)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
if all(x.is_monotonic for x in self.levels):
# If each level is sorted, we can operate on the codes directly. GH27495
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
# reversed() because lexsort() wants the most significant key last.
values = [
self._get_level_values(i).values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError("isna is not defined for MultiIndex")
@Appender(Index.dropna.__doc__)
def dropna(self, how="any"):
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(Index.unique.__doc__)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self.values, tupleize_cols=False)
@property
def is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
"""
Return True if the codes are lexicographically sorted.
Returns
-------
bool
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
return self.sortorder
return self._lexsort_depth()
def _lexsort_depth(self) -> int:
"""
Compute and return the lexsort_depth, the number of levels of the
MultiIndex that are sorted lexically
Returns
-------
int
"""
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
"""
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.sort_values()
MultiIndex([('a', 'aa'),
('a', 'bb'),
('b', 'aa'),
('b', 'bb')],
)
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self):
"""
Create a new MultiIndex from the current that removes
unused levels, meaning that they are not expressed in the labels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> mi = pd.MultiIndex.from_product([range(2), list('ab')])
>>> mi
MultiIndex([(0, 'a'),
(0, 'b'),
(1, 'a'),
(1, 'b')],
)
>>> mi[2:]
MultiIndex([(1, 'a'),
(1, 'b')],
)
The 0 from the first level is not represented
and can be removed
>>> mi2 = mi[2:].remove_unused_levels()
>>> mi2.levels
FrozenList([[1], ['a', 'b']])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
# --------------------------------------------------------------------
# Pickling Methods
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(
levels=list(self.levels),
codes=list(self.codes),
sortorder=self.sortorder,
names=list(self.names),
)
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(
self.codes,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1,
)
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def _assert_take_fillable(
self, values, indices, allow_fill=True, fill_value=None, na_value=None
):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
repeats = ensure_platform_int(repeats)
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for MultiIndex operations")
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(range(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise"):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
if mask.all() and errors != "ignore":
raise KeyError(f"labels {codes} not found in level")
return self[mask]
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
Returns
-------
MultiIndex
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self):
"""
we categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level.
If list-like must be names or ints of levels.
ascending : bool, default True
False to sort in descending order.
Can also be a list to specify a directed ordering.
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index.
indexer : np.ndarray
Indices of output values in original index.
"""
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
# Reverse sorted to retain the order of
# smaller indices that needs to be removed
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", return_indexers=True, keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def get_value(self, series, key):
# Label-based
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
try:
loc = self.get_loc(key)
except KeyError:
if is_integer(key):
loc = key
else:
raise
return self._get_values_for_loc(series, loc, key)
def _get_values_for_loc(self, series: "Series", loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
"""
new_values = series._values[loc]
if is_scalar(loc):
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(self.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
if not self.is_unique:
raise ValueError("Reindexing only valid with uniquely valued Index objects")
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
indexer = self._engine.get_indexer(target, method, limit)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super().get_indexer_non_unique(target)
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
kind : {'loc', 'getitem'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbc'), list('gefd')])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound('b', side="left", kind="loc")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(('b','f'), side="right", kind="loc")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super().slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
"""
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, (tuple, list)):
# not including list here breaks some indexing, xref #30892
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype="int64")
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
"""
# different name to distinguish from maybe_droplevels
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
code = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side="left")
j = level_codes.searchsorted(code, side="right")
if i == j:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a sequence of labels.
Parameters
----------
seq : label, slice, list, mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
numpy.ndarray
NumPy array of integers suitable for passing to iloc.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b') # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']]) # doctest: +SKIP
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')]) # doctest: +SKIP
array([2], dtype=int64)
"""
from pandas.core.indexes.numeric import Int64Index
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self.lexsort_depth}"
)
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = idxrs if indexers is None else indexers | idxrs
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
indexer = self._reorder_indexer(seq, indexer)
return indexer._ndarray_values
def _reorder_indexer(
self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
) -> ArrayLike:
"""
Reorder an indexer of a MultiIndex (self) so that the label are in the
same order as given in seq
Parameters
----------
seq : label/slice/list/mask or a sequence of such
indexer: an Int64Index indexer of self
Returns
-------
indexer : a sorted Int64Index indexer of self ordered as seq
"""
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self.is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: Tuple[np.ndarray, ...] = tuple()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)
def equals(self, other) -> bool:
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not is_object_dtype(other.dtype):
if self.nlevels != other.nlevels:
return False
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(
np.asarray(self.levels[i]._values), self_codes, allow_fill=False
)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other) -> bool:
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
Index
>>> index.union(index2)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
# TODO: Index.union returns other when `len(self)` is 0.
uniq_tuples = lib.fast_unique_multiple(
[self._ndarray_values, other._ndarray_values], sort=sort
)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def intersection(self, other, sort=False):
"""
Form the intersection of two MultiIndex objects.
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default False
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
behaviour from before 0.24.0
Returns
-------
Index
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
lvals = self._ndarray_values
rvals = other._ndarray_values
uniq_tuples = None # flag whether _inner_indexer was succesful
if self.is_monotonic and other.is_monotonic:
try:
uniq_tuples = self._inner_indexer(lvals, rvals)[0]
sort = False # uniq_tuples is already sorted
except TypeError:
pass
if uniq_tuples is None:
other_uniq = set(rvals)
seen = set()
uniq_tuples = [
x for x in lvals if x in other_uniq and not (x in seen or seen.add(x))
]
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, "names"):
if len(other) == 0:
other = MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
verify_integrity=False,
)
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other)
except TypeError as err:
raise TypeError(msg) from err
else:
result_names = self.names if self.names == other.names else None
return other, result_names
# --------------------------------------------------------------------
@Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
f"Setting {type(self)} dtype to anything other "
"than object is not supported"
)
elif copy is True:
return self._shallow_copy()
return self
def insert(self, loc: int, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values, names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index, key):
"""
Attempt to drop level or levels from the given index.
Parameters
----------
index: Index
key : scalar or tuple
Returns
-------
Index
"""
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except ValueError:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array_like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
| 33.769036
| 88
| 0.537601
|
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, index as libindex, lib
from pandas._libs.hashtable import duplicated_int64
from pandas._typing import AnyArrayLike, ArrayLike, Scalar
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
is_categorical_dtype,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import factorize_from_iterables
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
ensure_index,
)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.sorting import (
get_group_index,
indexer_from_factorized,
lexsort_indexer,
)
from pandas.io.formats.printing import (
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import Series
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass="MultiIndex", target_klass="MultiIndex or list of tuples")
)
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine):
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
codes <<= self.offsets
if codes.ndim == 1:
return np.bitwise_or.reduce(codes)
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine):
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
codes = codes.astype("object") << self.offsets
if codes.ndim == 1:
return np.bitwise_or.reduce(codes)
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
_deprecations = Index._deprecations | frozenset()
_typ = "multiindex"
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ["names"]
rename = Index.set_names
_tuples = None
sortorder: Optional[int]
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
_set_identity: bool = True,
):
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must be the same.")
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
result = object.__new__(MultiIndex)
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
result._names = [None] * len(levels)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
new_codes = result._verify_integrity()
result._codes = new_codes
if _set_identity:
result._reset_identity()
return result
def _validate_codes(self, level: List, code: List):
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
def _verify_integrity(
self, codes: Optional[List] = None, levels: Optional[List] = None
):
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError(
"Length of levels and codes must match. NOTE: "
"this index is in an inconsistent state."
)
codes_length = len(codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError(
f"Unequal code lengths: {[len(code_) for code_ in codes]}"
)
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError(
f"On level {i}, code max ({level_codes.max()}) >= length of "
f"level ({len(level)}). NOTE: this index is in an "
"inconsistent state"
)
if len(level_codes) and level_codes.min() < -1:
raise ValueError(f"On level {i}, code value ({level_codes.min()}) < -1")
if not level.is_unique:
raise ValueError(
f"Level values must be unique: {list(level)} on level {i}"
)
if self.sortorder is not None:
if self.sortorder > self._lexsort_depth():
raise ValueError(
"Value for sortorder must be inferior or equal to actual "
f"lexsort_depth: sortorder {self.sortorder} "
f"with lexsort_depth {self._lexsort_depth()}"
)
codes = [
self._validate_codes(level, code) for level, code in zip(levels, codes)
]
new_codes = FrozenList(codes)
return new_codes
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=lib.no_default):
error_msg = "Input must be a list / sequence of array-likes."
if not is_list_like(arrays):
raise TypeError(error_msg)
elif is_iterator(arrays):
arrays = list(arrays)
# Check if elements of array are list-like
for array in arrays:
if not is_list_like(array):
raise TypeError(error_msg)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError("all arrays must be same length")
codes, levels = factorize_from_iterables(arrays)
if names is lib.no_default:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
verify_integrity=False,
)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
if not is_list_like(tuples):
raise TypeError("Input must be a list / sequence of tuple-likes.")
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
raise TypeError("Cannot infer number of levels from empty list")
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = zip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=lib.no_default):
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@property
def _values(self):
# We override here, since our parent uses _data, which we don't use.
return self.values
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals._internal_get_values()
if isinstance(vals.dtype, ExtensionDtype) or hasattr(vals, "_box_values"):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
@property
def array(self):
raise ValueError(
"MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples."
)
@property
def shape(self):
return (len(self),)
def __len__(self) -> int:
return len(self.codes[0])
@cache_readonly
def levels(self):
# create new IndexEngine
# https://github.com/pandas-dev/pandas/issues/31648
result = [
x._shallow_copy(name=name) for x, name in zip(self._levels, self._names)
]
for level in result:
# disallow midx.levels[0].name = "foo"
level._no_setting_name = True
return FrozenList(result)
def _set_levels(
self, levels, level=None, copy=False, validate=True, verify_integrity=False
):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate:
if len(levels) == 0:
raise ValueError("Must set non-zero number of levels.")
if level is None and len(levels) != self.nlevels:
raise ValueError("Length of levels must match number of levels.")
if level is not None and len(levels) != len(level):
raise ValueError("Length of levels must match length of level.")
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy() for lev in levels
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_levels = list(self._levels)
for lev_num, lev in zip(level_numbers, levels):
new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
new_codes = self._verify_integrity(levels=new_levels)
self._codes = new_codes
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(
levels, level=level, validate=True, verify_integrity=verify_integrity
)
if not inplace:
return idx
@property
def nlevels(self) -> int:
return len(self._levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
# --------------------------------------------------------------------
# Codes Methods
@property
def codes(self):
return self._codes
def _set_codes(
self, codes, level=None, copy=False, validate=True, verify_integrity=False
):
if validate:
if level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if level is not None and len(codes) != len(level):
raise ValueError("Length of codes must match length of levels.")
if level is None:
new_codes = FrozenList(
_coerce_indexer_frozen(level_codes, lev, copy=copy).view()
for lev, level_codes in zip(self._levels, codes)
)
else:
level_numbers = [self._get_level_number(lev) for lev in level]
new_codes = list(self._codes)
for lev_num, level_codes in zip(level_numbers, codes):
lev = self.levels[lev_num]
new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy)
new_codes = FrozenList(new_codes)
if verify_integrity:
new_codes = self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._tuples = None
self._reset_cache()
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True):
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# --------------------------------------------------------------------
# Index Internals
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype("uint64")
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def _constructor(self):
return MultiIndex.from_tuples
@Appender(Index._shallow_copy.__doc__)
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop("names", kwargs.pop("name", self.names))
# discards freq
kwargs.pop("freq", None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.copy(**kwargs)
def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(
levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs,
)
return self._shallow_copy(values, **kwargs)
# --------------------------------------------------------------------
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
_set_identity=False,
):
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
else:
if levels is None:
levels = self.levels
if codes is None:
codes = self.codes
return MultiIndex(
levels=levels,
codes=codes,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity,
)
def __array__(self, dtype=None) -> np.ndarray:
return self.values
def view(self, cls=None):
result = self.copy()
result._id = self._id
return result
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError, ValueError):
return False
@cache_readonly
def dtype(self) -> np.dtype:
return np.dtype("O")
def _is_memory_usage_qualified(self) -> bool:
def f(l):
return "mixed" in l or "string" in l or "unicode" in l
return any(f(l) for l in self._inferred_type_levels)
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation unnecessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self) -> int:
return self._nbytes(False)
def _nbytes(self, deep: bool = False) -> int:
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _formatter_func(self, tup):
formatter_funcs = [level._formatter_func for level in self.levels]
return tuple(func(val) for func, val in zip(formatter_funcs, tup))
def _format_data(self, name=None):
return format_object_summary(
self, self._formatter_func, name=name, line_break_each_value=True
)
def _format_attrs(self):
return format_object_attrs(self, include_dtype=False)
def _format_native_types(self, na_rep="nan", **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = level_codes == -1
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
assert not level_codes.flags.writeable # i.e. copy is needed
level_codes = level_codes.copy() # make writeable
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
# a single-level multi-index
return Index(new_levels[0].take(new_codes[0]))._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(
levels=new_levels,
codes=new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
return mi.values
def format(
self,
space=2,
sparsify=None,
adjoin=True,
names=False,
na_rep=None,
formatter=None,
):
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
for x in algos.take_1d(lev._values, level_codes)
]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(
pprint_thing(name, escape_chars=("\t", "\r", "\n"))
if name is not None
else ""
)
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ""
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
if sparsify not in [True, 1]:
sentinel = sparsify
result_levels = _sparsify(
result_levels, start=int(names), sentinel=sentinel
)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
def _get_names(self):
return FrozenList(self._names)
def _set_names(self, names, level=None, validate=True):
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if validate:
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(lev) for lev in level]
# set the name
for lev, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
self._names[lev] = name
# If .levels has been accessed, the names in our cache will be stale.
self._reset_cache()
names = property(
fset=_set_names, fget=_get_names, doc="""\nNames of levels in MultiIndex.\n"""
)
# --------------------------------------------------------------------
@Appender(Index._get_grouper_for_level.__doc__)
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
else:
# break references back to us so that setting the name
# on the output of a groupby doesn't reflect back here.
level_index = level_index.copy()
if level_index._can_hold_na:
grouper = level_index.take(codes, fill_value=True)
else:
grouper = level_index.take(codes)
return grouper, codes, level_index
@cache_readonly
def inferred_type(self) -> str:
return "mixed"
def _get_level_number(self, level) -> int:
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError(
f"The name {level} occurs multiple times, use a level number"
)
try:
level = self.names.index(level)
except ValueError as err:
if not is_integer(level):
raise KeyError(f"Level {level} not found") from err
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"{orig_level} is not a valid level number"
) from err
elif level >= self.nlevels:
raise IndexError(
f"Too many levels: Index has only {self.nlevels} levels, "
f"not {level + 1}"
) from err
return level
@property
def _has_complex_internals(self) -> bool:
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
if all(x.is_monotonic for x in self.levels):
return libalgos.is_lexsorted(
[x.astype("int64", copy=False) for x in self.codes]
)
values = [
self._get_level_values(i).values for i in reversed(range(len(self.levels)))
]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self[::-1].is_monotonic_increasing
@cache_readonly
def _inferred_type_levels(self):
return [i.inferred_type for i in self.levels]
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep="first"):
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
raise NotImplementedError("isna is not defined for MultiIndex")
@Appender(Index.dropna.__doc__)
def dropna(self, how="any"):
nans = [level_codes == -1 for level_codes in self.codes]
if how == "any":
indexer = np.any(nans, axis=0)
elif how == "all":
indexer = np.all(nans, axis=0)
else:
raise ValueError(f"invalid how option: {how}")
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
def _get_level_values(self, level, unique=False):
lev = self.levels[level]
level_codes = self.codes[level]
name = self._names[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)
return lev._shallow_copy(filled, name=name)
def get_level_values(self, level):
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(Index.unique.__doc__)
def unique(self, level=None):
if level is None:
return super().unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def _to_safe_for_reshape(self):
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True, name=None):
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self.names
result = DataFrame(
{
(level if lvlname is None else lvlname): self._get_level_values(level)
for lvlname, level in zip(idx_names, range(len(self.levels)))
},
copy=False,
)
if index:
result.index = self
return result
def to_flat_index(self):
return Index(self.values, tupleize_cols=False)
@property
def is_all_dates(self) -> bool:
return False
def is_lexsorted(self) -> bool:
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
return self.sortorder
return self._lexsort_depth()
def _lexsort_depth(self) -> int:
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(
new_levels,
new_codes,
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def remove_unused_levels(self):
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
changed = True
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
code_mapping = np.zeros(len(lev) + has_na)
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self.view()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
def __reduce__(self):
d = dict(
levels=list(self.levels),
codes=list(self.codes),
sortorder=self.sortorder,
names=list(self.names),
)
return ibase._new_Index, (type(self), d), None
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(
self.codes,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1,
)
return MultiIndex(
levels=self.levels, codes=taken, names=self.names, verify_integrity=False
)
def _assert_take_fillable(
self, values, indices, allow_fill=True, fill_value=None, na_value=None
):
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return taken
def append(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs) -> np.ndarray:
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
repeats = ensure_platform_int(repeats)
return MultiIndex(
levels=self.levels,
codes=[
level_codes.view(np.ndarray).astype(np.intp).repeat(repeats)
for level_codes in self.codes
],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False,
)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for MultiIndex operations")
def drop(self, codes, level=None, errors="raise"):
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(range(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
def _drop_from_level(self, codes, level, errors="raise"):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
if mask.all() and errors != "ignore":
raise KeyError(f"labels {codes} not found in level")
return self[mask]
def swaplevel(self, i=-2, j=-1):
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def reorder_levels(self, order):
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(
f"Length of order must be same as number of levels ({self.nlevels}), "
f"got {len(order)}"
)
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
def _get_codes_for_sorting(self):
def cats(level_codes):
return np.arange(
np.array(level_codes).max() + 1 if len(level_codes) else 0,
dtype=level_codes.dtype,
)
return [
Categorical.from_codes(level_codes, cats(level_codes), ordered=True)
for level_codes in self.codes
]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
indexer = lexsort_indexer(
[self.codes[lev] for lev in level], orders=ascending
)
else:
codes = list(self.codes)
shape = list(self.levshape)
primary = tuple(codes[lev] for lev in level)
primshp = tuple(shape[lev] for lev in level)
for lev in sorted(level, reverse=True):
codes.pop(lev)
shape.pop(lev)
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp, compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(
codes=new_codes,
levels=self.levels,
names=self.names,
sortorder=sortorder,
verify_integrity=False,
)
return new_index, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
preserve_names = not hasattr(target, "names")
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop("freq", None) target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(
target, level, how="right", return_indexers=True, keep_order=False
)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (
preserve_names
and target.nlevels == self.nlevels
and target.names != self.names
):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
# --------------------------------------------------------------------
# Indexing Methods
def get_value(self, series, key):
# Label-based
if not is_hashable(key) or is_iterator(key):
# We allow tuples if they are hashable, whereas other Index
# subclasses require scalar.
# We have to explicitly exclude generators, as these are hashable.
raise InvalidIndexError(key)
try:
loc = self.get_loc(key)
except KeyError:
if is_integer(key):
loc = key
else:
raise
return self._get_values_for_loc(series, loc, key)
def _get_values_for_loc(self, series: "Series", loc, key):
new_values = series._values[loc]
if is_scalar(loc):
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, key)
new_ser = series._constructor(new_values, index=new_index, name=series.name)
return new_ser.__finalize__(series)
def _convert_listlike_indexer(self, keyarr):
indexer, keyarr = super()._convert_listlike_indexer(keyarr)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError(f"{keyarr[mask]} not in index")
return indexer, keyarr
def _get_partial_string_timestamp_match_key(self, key):
# GH#10331
if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(self.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if (
isinstance(component, str)
and self.levels[i]._supports_partial_string_indexing
):
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
if not self.is_unique:
raise ValueError("Reindexing only valid with uniquely valued Index objects")
if method == "pad" or method == "backfill":
if tolerance is not None:
raise NotImplementedError(
"tolerance not implemented yet for MultiIndex"
)
indexer = self._engine.get_indexer(target, method, limit)
elif method == "nearest":
raise NotImplementedError(
"method='nearest' not implemented yet "
"for MultiIndex; see GitHub issue 9365"
)
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super().get_indexer_non_unique(target)
def get_slice_bound(
self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str
) -> int:
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
return super().slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
if is_scalar(key) and isna(key):
return -1
else:
return level_index.get_loc(key)
def get_loc(self, key, method=None):
if method is not None:
raise NotImplementedError(
"only the default get_loc method is "
"currently supported for MultiIndex"
)
def _maybe_to_slice(loc):
if not isinstance(loc, np.ndarray) or loc.dtype != "int64":
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, (tuple, list)):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (
self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))
)
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=10,
)
loc = np.arange(start, stop, dtype="int64")
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level: bool = True):
def maybe_mi_droplevels(indexer, levels, drop_level: bool):
if not drop_level:
return self[indexer]
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError(
"Key for location must have same length as number of levels"
)
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_mi_droplevels(result, level, drop_level)
level = self._get_level_number(level)
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_mi_droplevels(indexer, [0], drop_level)
return indexer, new_index
except (TypeError, InvalidIndexError):
pass
if not any(isinstance(k, slice) for k in key):
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [
i for i in range(len(key)) if key[i] != slice(None, None)
]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
if len(key) == self.nlevels and self.is_unique:
try:
return (self._engine.get_loc(key), None)
except KeyError as e:
raise KeyError(key) from e
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)]
return indexer, maybe_mi_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_mi_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
return convert_indexer(start, stop + 1, step)
else:
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
code = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side="left")
j = level_codes.searchsorted(code, side="right")
if i == j:
raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
from pandas.core.indexes.numeric import Int64Index
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError(
"MultiIndex slicing requires the index to be lexsorted: slicing "
f"on levels {true_slices}, lexsort depth {self.lexsort_depth}"
)
n = len(self)
indexer = None
def _convert_to_indexer(r):
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError(
"cannot index with a boolean indexer "
"that is not the same length as the "
"index"
)
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer)
elif is_list_like(k):
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i, indexer=indexer)
)
indexers = idxrs if indexers is None else indexers | idxrs
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(
_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)
),
indexer=indexer,
)
else:
# a single label
indexer = _update_indexer(
_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]
),
indexer=indexer,
)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
indexer = self._reorder_indexer(seq, indexer)
return indexer._ndarray_values
def _reorder_indexer(
self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], indexer: ArrayLike
) -> ArrayLike:
# If the index is lexsorted and the list_like label in seq are sorted
# then we do not need to sort
if self.is_lexsorted():
need_sort = False
for i, k in enumerate(seq):
if is_list_like(k):
if not need_sort:
k_codes = self.levels[i].get_indexer(k)
k_codes = k_codes[k_codes >= 0] # Filter absent keys
# True if the given codes are not ordered
need_sort = (k_codes[:-1] > k_codes[1:]).any()
# Bail out if both index and seq are sorted
if not need_sort:
return indexer
n = len(self)
keys: Tuple[np.ndarray, ...] = tuple()
# For each level of the sequence in seq, map the level codes with the
# order they appears in a list-like sequence
# This mapping is then use to reorder the indexer
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
new_order = np.arange(n)[indexer]
elif is_list_like(k):
# Generate a map with all level codes as sorted initially
key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(
self.levels[i]
)
# Set order as given in the indexer list
level_indexer = self.levels[i].get_indexer(k)
level_indexer = level_indexer[level_indexer >= 0] # Filter absent keys
key_order_map[level_indexer] = np.arange(len(level_indexer))
new_order = key_order_map[self.codes[i][indexer]]
else:
# For all other case, use the same order as the level
new_order = np.arange(n)[indexer]
keys = (new_order,) + keys
# Find the reordering using lexsort on the keys mapping
ind = np.lexsort(keys)
return indexer[ind]
def truncate(self, before=None, after=None):
if after and before and after < before:
raise ValueError("after < before")
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)
def equals(self, other) -> bool:
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
# d-level MultiIndex can equal d-tuple Index
if not is_object_dtype(other.dtype):
if self.nlevels != other.nlevels:
return False
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(
np.asarray(self.levels[i]._values), self_codes, allow_fill=False
)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other) -> bool:
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
# --------------------------------------------------------------------
# Set Methods
def union(self, other, sort=None):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
# TODO: Index.union returns other when `len(self)` is 0.
uniq_tuples = lib.fast_unique_multiple(
[self._ndarray_values, other._ndarray_values], sort=sort
)
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
lvals = self._ndarray_values
rvals = other._ndarray_values
uniq_tuples = None # flag whether _inner_indexer was succesful
if self.is_monotonic and other.is_monotonic:
try:
uniq_tuples = self._inner_indexer(lvals, rvals)[0]
sort = False # uniq_tuples is already sorted
except TypeError:
pass
if uniq_tuples is None:
other_uniq = set(rvals)
seen = set()
uniq_tuples = [
x for x in lvals if x in other_uniq and not (x in seen or seen.add(x))
]
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_arrays(
zip(*uniq_tuples), sortorder=0, names=result_names
)
def difference(self, other, sort=None):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(
levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names,
verify_integrity=False,
)
else:
return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, "names"):
if len(other) == 0:
other = MultiIndex(
levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
verify_integrity=False,
)
else:
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other)
except TypeError as err:
raise TypeError(msg) from err
else:
result_names = self.names if self.names == other.names else None
return other, result_names
# --------------------------------------------------------------------
@Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = "> 1 ndim Categorical are not supported at this time"
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
raise TypeError(
f"Setting {type(self)} dtype to anything other "
"than object is not supported"
)
elif copy is True:
return self._shallow_copy()
return self
def insert(self, loc: int, item):
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ("",) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError("Item must have length equal to number of levels.")
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
def delete(self, loc):
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(
levels=self.levels,
codes=new_codes,
names=self.names,
verify_integrity=False,
)
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values, names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.get_level_values(num)
if levs.size == 0:
return np.zeros(len(levs), dtype=np.bool_)
return levs.isin(values)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start: int = 0, sentinel=""):
pivoted = list(zip(*label_list))
k = len(label_list)
result = pivoted[: start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1 :]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return list(zip(*result))
def _get_na_rep(dtype) -> str:
return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN")
def maybe_droplevels(index, key):
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except ValueError:
return original_index
else:
try:
index = index.droplevel(0)
except ValueError:
pass
return index
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
| true
| true
|
f70976272c40dc0ae0eee0a064546febb929a4e0
| 347
|
py
|
Python
|
src/sql/master/stock_item_group.py
|
springmaple/yotimes_sql_bridge
|
0d1d17c63d867bb2c2ad286851343c1884fdddf8
|
[
"Apache-2.0"
] | null | null | null |
src/sql/master/stock_item_group.py
|
springmaple/yotimes_sql_bridge
|
0d1d17c63d867bb2c2ad286851343c1884fdddf8
|
[
"Apache-2.0"
] | 4
|
2020-06-02T14:29:17.000Z
|
2021-05-16T09:13:52.000Z
|
src/sql/master/stock_item_group.py
|
springmaple/yotimes_sql_bridge
|
0d1d17c63d867bb2c2ad286851343c1884fdddf8
|
[
"Apache-2.0"
] | null | null | null |
from sql.entity import Entity
class StockItemGroup(Entity):
def __init__(self, data):
super().__init__(data)
self.code = self._get_str('Code') # Primary Key
self.description = self._get_str('Description')
self.is_active = self._get_bool('IsActive')
self.last_modified = self._get_int('LastModified')
| 31.545455
| 58
| 0.67147
|
from sql.entity import Entity
class StockItemGroup(Entity):
def __init__(self, data):
super().__init__(data)
self.code = self._get_str('Code') self.description = self._get_str('Description')
self.is_active = self._get_bool('IsActive')
self.last_modified = self._get_int('LastModified')
| true
| true
|
f70977cc983222645bc9a20cd05075c26c497868
| 456
|
py
|
Python
|
src/slime/core/utils.py
|
ihmeuw-msca/SLIME
|
255dfc6fc1880545f1ca9a5062eff823571cc025
|
[
"MIT"
] | null | null | null |
src/slime/core/utils.py
|
ihmeuw-msca/SLIME
|
255dfc6fc1880545f1ca9a5062eff823571cc025
|
[
"MIT"
] | null | null | null |
src/slime/core/utils.py
|
ihmeuw-msca/SLIME
|
255dfc6fc1880545f1ca9a5062eff823571cc025
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
utils
~~~~~
"""
import numpy as np
def sizes_to_indices(sizes):
"""Converting sizes to corresponding indices.
Args:
sizes (numpy.dnarray):
An array consist of non-negative number.
Returns:
list{range}:
List the indices.
"""
u_id = np.cumsum(sizes)
l_id = np.insert(u_id[:-1], 0, 0)
return [
np.arange(l, u) for l, u in zip(l_id, u_id)
]
| 19
| 52
| 0.537281
|
import numpy as np
def sizes_to_indices(sizes):
u_id = np.cumsum(sizes)
l_id = np.insert(u_id[:-1], 0, 0)
return [
np.arange(l, u) for l, u in zip(l_id, u_id)
]
| true
| true
|
f70978299ac9e682f5cdb99a7396541fd08c115c
| 2,119
|
py
|
Python
|
youtube_dl/extractor/unistra.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 1
|
2015-02-19T13:13:47.000Z
|
2015-02-19T13:13:47.000Z
|
youtube_dl/extractor/unistra.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 2
|
2019-05-20T12:46:30.000Z
|
2020-11-07T12:50:32.000Z
|
youtube_dl/extractor/unistra.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | 5
|
2020-10-25T09:18:58.000Z
|
2021-05-23T22:57:55.000Z
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import qualities
class UnistraIE(InfoExtractor):
_VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://utv.unistra.fr/video.php?id_video=154',
'md5': '736f605cfdc96724d55bb543ab3ced24',
'info_dict': {
'id': '154',
'ext': 'mp4',
'title': 'M!ss Yella',
'description': 'md5:104892c71bd48e55d70b902736b81bbf',
},
},
{
'url': 'http://utv.unistra.fr/index.php?id_video=437',
'md5': '1ddddd6cccaae76f622ce29b8779636d',
'info_dict': {
'id': '437',
'ext': 'mp4',
'title': 'Prix Louise Weiss 2014',
'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
for file_path in files:
format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
formats.append({
'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
'format_id': format_id,
'quality': quality(format_id)
})
title = self._html_search_regex(
r'<title>UTV - (.*?)</', webpage, 'title')
description = self._html_search_regex(
r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
thumbnail = self._search_regex(
r'image: "(.*?)"', webpage, 'thumbnail')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
| 31.626866
| 97
| 0.511562
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import qualities
class UnistraIE(InfoExtractor):
_VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://utv.unistra.fr/video.php?id_video=154',
'md5': '736f605cfdc96724d55bb543ab3ced24',
'info_dict': {
'id': '154',
'ext': 'mp4',
'title': 'M!ss Yella',
'description': 'md5:104892c71bd48e55d70b902736b81bbf',
},
},
{
'url': 'http://utv.unistra.fr/index.php?id_video=437',
'md5': '1ddddd6cccaae76f622ce29b8779636d',
'info_dict': {
'id': '437',
'ext': 'mp4',
'title': 'Prix Louise Weiss 2014',
'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
for file_path in files:
format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
formats.append({
'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
'format_id': format_id,
'quality': quality(format_id)
})
title = self._html_search_regex(
r'<title>UTV - (.*?)</', webpage, 'title')
description = self._html_search_regex(
r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
thumbnail = self._search_regex(
r'image: "(.*?)"', webpage, 'thumbnail')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
| true
| true
|
f70978813547c167df85dc88b5887caeeabba537
| 2,408
|
py
|
Python
|
lab-12-2-char-seq-rnn.py
|
KANG91/Deep_Learning
|
e3e9de769ab835215d0ebeee79ff869afbe64ebf
|
[
"MIT"
] | null | null | null |
lab-12-2-char-seq-rnn.py
|
KANG91/Deep_Learning
|
e3e9de769ab835215d0ebeee79ff869afbe64ebf
|
[
"MIT"
] | null | null | null |
lab-12-2-char-seq-rnn.py
|
KANG91/Deep_Learning
|
e3e9de769ab835215d0ebeee79ff869afbe64ebf
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # reproducibility
sample = " if you want you"
idx2char = list(set(sample)) # index -> char
char2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex
# hyper parameters
dic_size = len(char2idx) # RNN input size (one hot size)
rnn_hidden_size = len(char2idx) # RNN output size
num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
batch_size = 1 # one sample data, one batch
sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
sample_idx = [char2idx[c] for c in sample] # char to index
x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
y_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello
X = tf.placeholder(tf.int32, [None, sequence_length]) # X data
Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
x_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0
cell = tf.contrib.rnn.BasicLSTMCell(
num_units=rnn_hidden_size, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(
cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)
weights = tf.ones([batch_size, sequence_length])
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
prediction = tf.argmax(outputs, axis=2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(3000):
l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
result = sess.run(prediction, feed_dict={X: x_data})
# print char using dic
result_str = [idx2char[c] for c in np.squeeze(result)]
print(i, "loss:", l, "Prediction:", ''.join(result_str))
'''
0 loss: 2.29895 Prediction: nnuffuunnuuuyuy
1 loss: 2.29675 Prediction: nnuffuunnuuuyuy
2 loss: 2.29459 Prediction: nnuffuunnuuuyuy
3 loss: 2.29247 Prediction: nnuffuunnuuuyuy
...
1413 loss: 1.3745 Prediction: if you want you
1414 loss: 1.3743 Prediction: if you want you
1415 loss: 1.3741 Prediction: if you want you
1416 loss: 1.3739 Prediction: if you want you
1417 loss: 1.3737 Prediction: if you want you
1418 loss: 1.37351 Prediction: if you want you
1419 loss: 1.37331 Prediction: if you want you
'''
| 36.484848
| 75
| 0.71387
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
sample = " if you want you"
idx2char = list(set(sample)) char2idx = {c: i for i, c in enumerate(idx2char)}
dic_size = len(char2idx) rnn_hidden_size = len(char2idx) num_classes = len(char2idx) batch_size = 1 sequence_length = len(sample) - 1
sample_idx = [char2idx[c] for c in sample] x_data = [sample_idx[:-1]] y_data = [sample_idx[1:]]
X = tf.placeholder(tf.int32, [None, sequence_length]) Y = tf.placeholder(tf.int32, [None, sequence_length])
x_one_hot = tf.one_hot(X, num_classes) cell = tf.contrib.rnn.BasicLSTMCell(
num_units=rnn_hidden_size, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(
cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)
weights = tf.ones([batch_size, sequence_length])
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
prediction = tf.argmax(outputs, axis=2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(3000):
l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
result = sess.run(prediction, feed_dict={X: x_data})
result_str = [idx2char[c] for c in np.squeeze(result)]
print(i, "loss:", l, "Prediction:", ''.join(result_str))
| true
| true
|
f70979275ff6133599e91195e91f814fe1e7645d
| 10,392
|
py
|
Python
|
p2_continuous-control/agent_ddpg.py
|
zhulingchen/deep-reinforcement-learning
|
193486659e17861208fa0a8703487e7be5868ff9
|
[
"MIT"
] | null | null | null |
p2_continuous-control/agent_ddpg.py
|
zhulingchen/deep-reinforcement-learning
|
193486659e17861208fa0a8703487e7be5868ff9
|
[
"MIT"
] | 4
|
2020-09-26T00:50:40.000Z
|
2022-02-10T00:43:36.000Z
|
p2_continuous-control/agent_ddpg.py
|
zhulingchen/deep-reinforcement-learning
|
193486659e17861208fa0a8703487e7be5868ff9
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from model_ddpg import Actor, Critic
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
BUFFER_SIZE = int(1e6) # replay buffer size
START_SIZE = 1024 # when to start training
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-3 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
TRAIN_EVERY = 5 # how often to train a batch
TRAIN_STEPS = 3 # how many training steps when a batch is trained
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, num_agents, state_size, action_size, random_seed, use_per=False):
"""Initialize an Agent object.
Params
======
num_agents (int): number of agents
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
use_per (bool): whether to use prioritized replay buffer
"""
self.num_agents = num_agents
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.use_per = use_per
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
if use_per:
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)
else:
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)
# Initialize time step
self.t_step = 0
def get_critic_Q(self, states, actions, rewards, next_states, dones, gamma, is_train=True):
# Get max predicted Q values (for next states) from target model
if is_train:
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
else:
self.actor_local.eval()
self.actor_target.eval()
self.critic_local.eval()
self.critic_target.eval()
with torch.no_grad():
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
self.actor_local.train()
self.actor_target.train()
self.critic_local.train()
self.critic_target.train()
return Q_expected, Q_targets
def step(self, states, actions, rewards, next_states, dones):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
if self.use_per:
# Convert numpy array to torch tensor
states = torch.from_numpy(states).float().to(device)
actions = torch.from_numpy(actions).float().to(device)
rewards = torch.from_numpy(np.array(rewards)).float().unsqueeze(1).to(device)
next_states = torch.from_numpy(next_states).float().to(device)
dones = torch.from_numpy(np.array(dones).astype(np.uint8)).float().unsqueeze(1).to(device)
# Get max predicted Q values (for next states) from target model
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, GAMMA, is_train=False)
# Convert torch tensor to numpy array
states = states.cpu().data.numpy()
actions = actions.cpu().data.numpy()
rewards = rewards.cpu().data.numpy().squeeze(1).tolist()
next_states = next_states.cpu().data.numpy()
dones = dones.cpu().data.numpy().squeeze(1).astype(np.bool).tolist()
# Calculate error
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze(1)
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i], errors[i])
else:
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])
# Update time step
self.t_step += 1
# If enough samples are available in memory,
if len(self.memory) >= START_SIZE:
# Get random subset and learn every TRAIN_EVERY time steps,
if self.t_step % TRAIN_EVERY == 0:
for _ in range(TRAIN_STEPS):
if self.use_per:
experiences, idx_tree, is_weight = self.memory.sample()
self.learn(experiences, GAMMA, idx_tree, is_weight)
else:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, states, add_noise=True):
"""Returns epsilon-greedy actions for given state as per current policy."""
states = torch.from_numpy(states).float().to(device)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(states).cpu().data.numpy()
self.actor_local.train()
if add_noise:
actions += np.concatenate([np.expand_dims(self.noise.sample(), axis=0) for _ in range(self.num_agents)], axis=0)
return np.clip(actions, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma, idx_tree=None, is_weight=None):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, gamma, is_train=True)
# Compute critic loss
if self.use_per:
assert ((is_weight is not None) and (is_weight.size > 0))
is_weight = torch.from_numpy(is_weight).float().to(device)
critic_loss = (is_weight * F.smooth_l1_loss(Q_expected, Q_targets, reduction='none').squeeze()).mean()
else:
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
# torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) # use gradient norm clipping
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
# update priority
if self.use_per:
assert((idx_tree is not None) and (len(idx_tree) > 0))
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze()
for i in range(self.memory.batch_size):
self.memory.update(idx_tree[i], errors[i])
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
| 44.410256
| 124
| 0.610277
|
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from model_ddpg import Actor, Critic
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
BUFFER_SIZE = int(1e6) START_SIZE = 1024 BATCH_SIZE = 512 GAMMA = 0.99 TAU = 1e-3 LR_ACTOR = 1e-3 LR_CRITIC = 1e-3 WEIGHT_DECAY = 0 TRAIN_EVERY = 5 TRAIN_STEPS = 3
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, num_agents, state_size, action_size, random_seed, use_per=False):
self.num_agents = num_agents
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.use_per = use_per
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
self.noise = OUNoise(action_size, random_seed)
if use_per:
self.memory = PrioritizedReplayBuffer(BUFFER_SIZE, BATCH_SIZE)
else:
self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, random_seed)
self.t_step = 0
def get_critic_Q(self, states, actions, rewards, next_states, dones, gamma, is_train=True):
if is_train:
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
else:
self.actor_local.eval()
self.actor_target.eval()
self.critic_local.eval()
self.critic_target.eval()
with torch.no_grad():
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
Q_targets = rewards + (gamma * (1 - dones) * Q_targets_next)
Q_expected = self.critic_local(states, actions)
self.actor_local.train()
self.actor_target.train()
self.critic_local.train()
self.critic_target.train()
return Q_expected, Q_targets
def step(self, states, actions, rewards, next_states, dones):
if self.use_per:
states = torch.from_numpy(states).float().to(device)
actions = torch.from_numpy(actions).float().to(device)
rewards = torch.from_numpy(np.array(rewards)).float().unsqueeze(1).to(device)
next_states = torch.from_numpy(next_states).float().to(device)
dones = torch.from_numpy(np.array(dones).astype(np.uint8)).float().unsqueeze(1).to(device)
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, GAMMA, is_train=False)
states = states.cpu().data.numpy()
actions = actions.cpu().data.numpy()
rewards = rewards.cpu().data.numpy().squeeze(1).tolist()
next_states = next_states.cpu().data.numpy()
dones = dones.cpu().data.numpy().squeeze(1).astype(np.bool).tolist()
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze(1)
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i], errors[i])
else:
for i in range(self.num_agents):
self.memory.add(states[i], actions[i], rewards[i], next_states[i], dones[i])
self.t_step += 1
if len(self.memory) >= START_SIZE:
if self.t_step % TRAIN_EVERY == 0:
for _ in range(TRAIN_STEPS):
if self.use_per:
experiences, idx_tree, is_weight = self.memory.sample()
self.learn(experiences, GAMMA, idx_tree, is_weight)
else:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, states, add_noise=True):
states = torch.from_numpy(states).float().to(device)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(states).cpu().data.numpy()
self.actor_local.train()
if add_noise:
actions += np.concatenate([np.expand_dims(self.noise.sample(), axis=0) for _ in range(self.num_agents)], axis=0)
return np.clip(actions, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma, idx_tree=None, is_weight=None):
states, actions, rewards, next_states, dones = experiences
Q_expected, Q_targets = self.get_critic_Q(states, actions, rewards, next_states, dones, gamma, is_train=True)
if self.use_per:
assert ((is_weight is not None) and (is_weight.size > 0))
is_weight = torch.from_numpy(is_weight).float().to(device)
critic_loss = (is_weight * F.smooth_l1_loss(Q_expected, Q_targets, reduction='none').squeeze()).mean()
else:
critic_loss = F.mse_loss(Q_expected, Q_targets)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
if self.use_per:
assert((idx_tree is not None) and (len(idx_tree) > 0))
errors = Q_expected - Q_targets
errors = errors.cpu().data.numpy().squeeze()
for i in range(self.memory.batch_size):
self.memory.update(idx_tree[i], errors[i])
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
self.state = copy.copy(self.mu)
def sample(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
| true
| true
|
f7097ac4b4a333a6d31d512970f1db16724cdd27
| 30,578
|
py
|
Python
|
src/python/system/environment.py
|
acrofrank/clusterfuzz
|
4b3c8b6e3beff5299f642ee139cad57c1061974f
|
[
"Apache-2.0"
] | 1
|
2019-11-09T23:09:00.000Z
|
2019-11-09T23:09:00.000Z
|
src/python/system/environment.py
|
acrofrank/clusterfuzz
|
4b3c8b6e3beff5299f642ee139cad57c1061974f
|
[
"Apache-2.0"
] | null | null | null |
src/python/system/environment.py
|
acrofrank/clusterfuzz
|
4b3c8b6e3beff5299f642ee139cad57c1061974f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment functions."""
from builtins import range
import ast
import functools
import os
import re
import six
import socket
import sys
import yaml
try:
from shlex import quote
except ImportError:
from pipes import quote
# Tools supporting customization of options via ADDITIONAL_{TOOL_NAME}_OPTIONS.
# FIXME: Support ADDITIONAL_UBSAN_OPTIONS and ADDITIONAL_LSAN_OPTIONS in an
# ASAN instrumented build.
SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS = [
'HWASAN', 'ASAN', 'CFI', 'MSAN', 'TSAN', 'UBSAN'
]
SANITIZER_NAME_MAP = {
'ASAN': 'address',
'CFI': 'cfi',
'MSAN': 'memory',
'TSAN': 'thread',
'UBSAN': 'undefined',
}
COMMON_SANITIZER_OPTIONS = {
'handle_abort': 1,
'handle_segv': 1,
'handle_sigbus': 1,
'handle_sigfpe': 1,
'handle_sigill': 1,
'print_summary': 1,
'use_sigaltstack': 1,
}
def _eval_value(value_string):
"""Returns evaluated value."""
try:
return ast.literal_eval(value_string)
except:
# String fallback.
return value_string
def join_memory_tool_options(options):
"""Joins a dict holding memory tool options into a string that can be set in
the environment."""
return ':'.join('%s=%s' % (key, str(value))
for key, value in sorted(six.iteritems(options)))
def _maybe_convert_to_int(value):
"""Returns the int representation contained by string |value| if it contains
one. Otherwise returns |value|."""
try:
return int(value)
except ValueError:
return value
# Matches anything that isn't an unquoted (ie: not between two single or two
# double quotes) colon.
UNQUOTED_COLON_REGEX = re.compile('((?:[^\'":]|\'[^\']*\'|"[^"]*")+)')
def _parse_memory_tool_options(options_str):
"""Parses memory tool options into a dict."""
parsed = {}
for item in UNQUOTED_COLON_REGEX.split(options_str):
# Regex split can give us empty strings at the beginning and the end. Skip
# these.
if not item:
continue
# Regex split gives us each ':'. Skip these.
if item == ':':
continue
values = item.split('=', 1)
if len(values) != 2:
# TODO(mbarbella): Factor this out of environment, and switch to logging
# an error and continuing. This error should be recoverable.
raise ValueError('Invalid memory tool option "%s"' % item)
option_name = values[0]
option_value = _maybe_convert_to_int(values[1])
parsed[option_name] = option_value
return parsed
def _quote_value_if_needed(value):
"""Quote environment value as needed for certain platforms like Windows."""
result = value
bot_platform = platform()
if bot_platform == 'WINDOWS':
result = '"%s"' % result
return result
def copy():
"""Return a safe copy of the environment."""
environment_copy = os.environ.copy()
return environment_copy
def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb,
bot_platform, leaks):
"""Generates default ASAN options."""
asan_options = {}
# Default options needed for all cases.
asan_options['alloc_dealloc_mismatch'] = 0
asan_options['print_scariness'] = 1
asan_options['strict_memcmp'] = 0
# Set provided redzone size.
if redzone_size:
asan_options['redzone'] = redzone_size
# This value is used in determining whether to report OOM crashes or not.
set_value('REDZONE', redzone_size)
# Set maximum number of stack frames to report.
if malloc_context_size:
asan_options['malloc_context_size'] = malloc_context_size
# Set quarantine size.
if quarantine_size_mb:
asan_options['quarantine_size_mb'] = quarantine_size_mb
# Test for leaks if this is an LSan-enabled job type.
if get_value('LSAN') and leaks:
lsan_options = join_memory_tool_options(get_lsan_options())
set_value('LSAN_OPTIONS', lsan_options)
asan_options['detect_leaks'] = 1
else:
remove_key('LSAN_OPTIONS')
asan_options['detect_leaks'] = 0
# FIXME: Support container overflow on Android.
if bot_platform == 'ANDROID':
asan_options['detect_container_overflow'] = 0
# Enable stack use-after-return.
asan_options['detect_stack_use_after_return'] = 1
asan_options['max_uar_stack_size_log'] = 16
# Other less important default options for all cases.
asan_options.update({
'allocator_may_return_null': 1,
'allow_user_segv_handler': 0,
'check_malloc_usable_size': 0,
'detect_odr_violation': 0,
'fast_unwind_on_fatal': 1,
'print_suppressions': 0,
})
# Add common sanitizer options.
asan_options.update(COMMON_SANITIZER_OPTIONS)
# FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe
# in build archive does not work.
asan_options['symbolize'] = int(bot_platform == 'WINDOWS')
# For Android, allow user defined segv handler to work.
if bot_platform == 'ANDROID':
asan_options['allow_user_segv_handler'] = 1
# Check if UBSAN is enabled as well for this ASAN build.
# If yes, set UBSAN_OPTIONS and enable suppressions.
if get_value('UBSAN'):
ubsan_options = get_ubsan_options()
# Remove |symbolize| explicitly to avoid overridding ASan defaults.
ubsan_options.pop('symbolize', None)
set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options))
return asan_options
def get_cpu_arch():
"""Return cpu architecture."""
if platform() == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
return android.settings.get_cpu_arch()
# FIXME: Add support for desktop architectures as needed.
return None
def get_current_memory_tool_var():
"""Get the environment variable name for the current job type's sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
return memory_tool_name + '_OPTIONS'
def get_memory_tool_options(env_var, default_value=None):
"""Get the current memory tool options as a dict. Returns |default_value| if
|env_var| isn't set. Otherwise returns a dictionary containing the memory tool
options and their values."""
env_value = get_value(env_var)
if env_value is not None:
return _parse_memory_tool_options(env_value)
return default_value
def get_instrumented_libraries_paths():
"""Get the instrumented libraries path for the current sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
if memory_tool_name == 'MSAN':
if 'no-origins' in get_value('BUILD_URL', ''):
memory_tool_name += '_NO_ORIGINS'
else:
memory_tool_name += '_CHAINED'
paths = get_value('INSTRUMENTED_LIBRARIES_PATHS_' + memory_tool_name)
if not paths:
return None
return paths.split(':')
def get_default_tool_path(tool_name):
"""Get the default tool for this platform (from scripts/ dir)."""
if platform().lower() == 'android':
# For android devices, we do symbolization on the host machine, which is
# linux. So, we use the linux version of llvm-symbolizer.
platform_override = 'linux'
else:
# No override needed, use default.
platform_override = None
tool_filename = get_executable_filename(tool_name)
tool_path = os.path.join(
get_platform_resources_directory(platform_override), tool_filename)
return tool_path
def get_environment_settings_as_string():
"""Return environment settings as a string. Includes settings for memory
debugging tools (e.g. ASAN_OPTIONS for ASAN), application binary revision,
application command line, etc."""
environment_string = ''
# Add Android specific variables.
if platform() == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
environment_string += '[Environment] Build fingerprint: %s\n' % (
get_value('BUILD_FINGERPRINT'))
environment_string += ('[Environment] Patch level: %s\n' %
android.settings.get_security_patch_level())
environment_string += (
'[Environment] Local properties file "%s" with contents:\n%s\n' %
(android.device.LOCAL_PROP_PATH,
android.adb.read_data_from_file(android.device.LOCAL_PROP_PATH)))
command_line = get_value('COMMAND_LINE_PATH')
if command_line:
environment_string += (
'[Environment] Command line file "%s" with contents:\n%s\n' %
(command_line, android.adb.read_data_from_file(command_line)))
asan_options = get_value('ASAN_OPTIONS')
if asan_options:
# FIXME: Need better documentation for Chrome builds. Chrome builds use
# asan_device_setup.sh and we send this options file path as an include
# to extra-options parameter.
sanitizer_options_file_path = (
android.sanitizer.get_options_file_path('ASAN'))
environment_string += (
'[Environment] ASAN options file "%s" with contents:\n%s\n' %
(sanitizer_options_file_path, asan_options))
else:
# For desktop platforms, add |*_OPTIONS| variables from environment.
for sanitizer_option in get_sanitizer_options_for_display():
environment_string += '[Environment] %s\n' % sanitizer_option
return environment_string
def get_sanitizer_options_for_display():
"""Return a list of sanitizer options with quoted values."""
result = []
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
options_variable = tool + '_OPTIONS'
options_value = os.getenv(options_variable)
if not options_value:
continue
result.append('{options_variable}="{options_value}"'.format(
options_variable=options_variable, options_value=quote(options_value)))
return result
def get_llvm_symbolizer_path():
"""Get the path of the llvm-symbolizer binary."""
llvm_symbolizer_path = get_value('LLVM_SYMBOLIZER_PATH')
# Use default llvm symbolizer for the following:
# 1. If we don't have |LLVM_SYMBOLIZER_PATH| env variable set.
# 2. If this build is deleted, then our own llvm symbolizer.
if not llvm_symbolizer_path or not os.path.exists(llvm_symbolizer_path):
llvm_symbolizer_path = get_default_tool_path('llvm-symbolizer')
# Make sure that llvm symbolizer binary exists.
if not os.path.exists(llvm_symbolizer_path):
return None
# Make sure that llvm symbolizer binary is executable.
os.chmod(llvm_symbolizer_path, 0o750)
return llvm_symbolizer_path
def get_root_directory():
"""Return root directory."""
return get_value('ROOT_DIR')
def get_startup_scripts_directory():
"""Return path to startup scripts."""
return os.path.join(get_value('ROOT_DIR'), 'src', 'python', 'bot', 'startup')
def get_config_directory():
"""Return the path to the configs directory."""
config_dir = get_value('CONFIG_DIR_OVERRIDE')
if config_dir:
return config_dir
if is_running_on_app_engine():
# Root is already src/appengine.
return 'config'
# Running on bot, give path to config folder inside appengine dir.
return os.path.join(get_root_directory(), 'src', 'appengine', 'config')
def get_gae_config_directory():
"""Return the path to the google appengine configs directory."""
return os.path.join(get_config_directory(), 'gae')
def get_gce_config_directory():
"""Return the path to the google compute engine configs directory."""
return os.path.join(get_config_directory(), 'gce')
def get_resources_directory():
"""Return the path to the resources directory."""
return os.path.join(get_root_directory(), 'resources')
def get_platform_resources_directory(platform_override=None):
"""Return the path to platform-specific resources directory."""
return os.path.join(get_resources_directory(), 'platform',
platform_override or platform().lower())
def get_suppressions_directory():
"""Return the path to the suppressions directory."""
return os.path.join(get_config_directory(), 'suppressions')
def get_suppressions_file(sanitizer, suffix='suppressions'):
"""Return the path to sanitizer suppressions file, if exists."""
sanitizer_suppressions_filename = '{sanitizer}_{suffix}.txt'.format(
sanitizer=sanitizer, suffix=suffix)
sanitizer_suppressions_file_path = os.path.join(
get_suppressions_directory(), sanitizer_suppressions_filename)
if not os.path.exists(sanitizer_suppressions_file_path):
return None
if not os.path.getsize(sanitizer_suppressions_file_path):
return None
return sanitizer_suppressions_file_path
def get_lsan_options():
"""Generates default LSAN options."""
lsan_suppressions_path = get_suppressions_file('lsan')
lsan_options = {
'print_suppressions': 0,
}
# Add common sanitizer options.
lsan_options.update(COMMON_SANITIZER_OPTIONS)
if lsan_suppressions_path:
lsan_options['suppressions'] = lsan_suppressions_path
return lsan_options
def get_msan_options():
"""Generates default MSAN options."""
msan_options = {'symbolize': 0}
# Add common sanitizer options.
msan_options.update(COMMON_SANITIZER_OPTIONS)
return msan_options
def get_platform_id():
"""Return a platform id as a lowercase string."""
bot_platform = platform()
if bot_platform == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
platform_id = get_value('PLATFORM_ID', android.settings.get_platform_id())
return platform_id.lower()
return bot_platform.lower()
def get_platform_group():
"""Return the platform group (specified via QUEUE_OVERRIDE) if it
exists, otherwise platform()."""
platform_group = get_value('QUEUE_OVERRIDE')
if platform_group:
return platform_group
return platform()
def get_memory_tool_name(job_name):
"""Figures out name of memory debugging tool."""
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
if tool_matches(tool, job_name):
return tool
# If no tool specified, assume it is ASAN. Also takes care of LSAN job type.
return 'ASAN'
def get_memory_tool_display_string(job_name):
"""Return memory tool string for a testcase."""
memory_tool_name = get_memory_tool_name(job_name)
sanitizer_name = SANITIZER_NAME_MAP.get(memory_tool_name)
if not sanitizer_name:
return 'Memory Tool: %s' % memory_tool_name
return 'Sanitizer: %s (%s)' % (sanitizer_name, memory_tool_name)
def get_executable_filename(executable_name):
"""Return the filename for the given executable."""
if platform() != 'WINDOWS':
return executable_name
extension = '.exe'
if executable_name.endswith(extension):
return executable_name
return executable_name + extension
def get_tsan_options():
"""Generates default TSAN options."""
tsan_suppressions_path = get_suppressions_file('tsan')
tsan_options = {
'atexit_sleep_ms': 200,
'flush_memory_ms': 2000,
'history_size': 3,
'print_suppressions': 0,
'report_thread_leaks': 0,
'report_signal_unsafe': 0,
'stack_trace_format': 'DEFAULT',
'symbolize': 1,
}
# Add common sanitizer options.
tsan_options.update(COMMON_SANITIZER_OPTIONS)
if tsan_suppressions_path:
tsan_options['suppressions'] = tsan_suppressions_path
return tsan_options
def get_ubsan_options():
"""Generates default UBSAN options."""
# Note that UBSAN can work together with ASAN as well.
ubsan_suppressions_path = get_suppressions_file('ubsan')
ubsan_options = {
'halt_on_error': 1,
'print_stacktrace': 1,
'print_suppressions': 0,
# We use -fsanitize=unsigned-integer-overflow as an additional coverage
# signal and do not want those errors to be reported by UBSan as bugs.
# See https://github.com/google/oss-fuzz/issues/910 for additional info.
'silence_unsigned_overflow': 1,
'symbolize': 1,
}
# Add common sanitizer options.
ubsan_options.update(COMMON_SANITIZER_OPTIONS)
# TODO(crbug.com/877070): Make this code configurable on a per job basis.
if ubsan_suppressions_path and not is_chromeos_system_job():
ubsan_options['suppressions'] = ubsan_suppressions_path
return ubsan_options
def get_value(environment_variable, default_value=None):
"""Return an environment variable value."""
value_string = os.getenv(environment_variable)
# value_string will be None if the variable is not defined.
if value_string is None:
return default_value
# Exception for ANDROID_SERIAL. Sometimes serial can be just numbers,
# so we don't want to it eval it.
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string)
def _job_substring_match(search_string, job_name):
"""Return a bool on whether a string exists in a provided job name or
use from environment if available (case insensitive)."""
job_name = job_name or get_value('JOB_NAME')
if not job_name:
return False
return search_string in job_name.lower()
def is_afl_job(job_name=None):
"""Return true if the current job uses AFL."""
# Prefix matching is not sufficient.
return _job_substring_match('afl', job_name)
def is_chromeos_job(job_name=None):
"""Return True if the current job is for ChromeOS."""
return _job_substring_match('chromeos', job_name)
def is_chromeos_system_job(job_name=None):
"""Return True if the current job is for ChromeOS system (i.e. not libFuzzer
or entire Chrome browser for Chrome on ChromeOS)."""
return is_chromeos_job(job_name) and get_value('CHROMEOS_SYSTEM')
def is_libfuzzer_job(job_name=None):
"""Return true if the current job uses libFuzzer."""
# Prefix matching is not sufficient.
return _job_substring_match('libfuzzer', job_name)
def is_engine_fuzzer_job(job_name=None):
"""Return if true is this is an engine fuzzer."""
return is_afl_job(job_name) or is_libfuzzer_job(job_name)
def is_posix():
"""Return true if we are on a posix platform (linux/unix and mac os)."""
return os.name == 'posix'
def is_trusted_host(ensure_connected=True):
"""Return whether or not the current bot is a trusted host."""
return get_value('TRUSTED_HOST') and (not ensure_connected or
get_value('WORKER_BOT_NAME'))
def is_untrusted_worker():
"""Return whether or not the current bot is an untrusted worker."""
return get_value('UNTRUSTED_WORKER')
def is_running_on_app_engine():
"""Return True if we are running on appengine (local or production)."""
return (is_running_on_app_engine_development() or
os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/'))
def is_running_on_app_engine_development():
"""Return True if running on the local development appengine server."""
return os.getenv('SERVER_SOFTWARE', '').startswith('Development/')
def parse_environment_definition(environment_string):
"""Parses a job's environment definition."""
if not environment_string:
return {}
definitions = [environment_string.splitlines()]
values = {}
for definition in definitions:
for line in definition:
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
key = m.group(1).strip()
value = m.group(2).strip()
values[key] = value
return values
def platform():
"""Return the operating system type, unless an override is provided."""
environment_override = get_value('OS_OVERRIDE')
if environment_override:
return environment_override.upper()
if sys.platform.startswith('win'):
return 'WINDOWS'
elif sys.platform.startswith('linux'):
return 'LINUX'
elif sys.platform == 'darwin':
return 'MAC'
raise ValueError('Unsupported platform "%s".' % sys.platform)
def remove_key(key_name):
"""Remove environment |key| and its associated value."""
if not key_name:
return
if key_name not in os.environ:
return
del os.environ[key_name]
# Used by reset_environment to store the initial environment.
_initial_environment = None
def reset_environment():
"""Resets environment variables to their initial state. Saves the initial
state on first call."""
global _initial_environment
if _initial_environment is None:
_initial_environment = copy()
# There is nothing to reset if we are initializing for the first time.
else:
# Clean current environment.
os.environ.clear()
# Add shared variables with values from _initial_environment.
os.environ.update(_initial_environment)
if is_trusted_host():
from bot.untrusted_runner import environment as untrusted_env
untrusted_env.reset_environment()
def set_common_environment_variables():
"""Sets environment variables common for different memory debugging tools."""
# G_SLICE = always-malloc: make glib use system malloc.
# NSS_DISABLE_UNLOAD = 1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST = 1: make nss use system malloc.
set_value('G_SLICE', 'always-malloc')
set_value('NSS_DISABLE_UNLOAD', 1)
set_value('NSS_DISABLE_ARENA_FREE_LIST', 1)
set_value('NACL_DANGEROUS_SKIP_QUALIFICATION_TEST', 1)
def set_memory_tool_options(env_var, options_dict):
"""Set current memory tool options."""
set_value(env_var, join_memory_tool_options(options_dict))
def set_environment_parameters_from_file(file_path):
"""Set environment variables from a file."""
if not os.path.exists(file_path):
return
with open(file_path, 'r') as f:
file_data = f.read()
for line in file_data.splitlines():
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
environment_variable = m.group(1)
environment_variable_value = m.group(2)
set_value(environment_variable, environment_variable_value)
def reset_current_memory_tool_options(redzone_size=0,
malloc_context_size=0,
leaks=True,
symbolize_inline_frames=False,
quarantine_size_mb=None):
"""Resets environment variables for memory debugging tool to default
values."""
# FIXME: Handle these imports in a cleaner way.
from platforms import android
# Set common environment variable useful for memory debugging tools.
set_common_environment_variables()
# Set memory tool name in our environment for easy access.
job_name = get_value('JOB_NAME')
tool_name = get_memory_tool_name(job_name)
set_value('MEMORY_TOOL', tool_name)
bot_platform = platform()
# Default options for memory debuggin tool used.
if tool_name in ['ASAN', 'HWASAN']:
tool_options = get_asan_options(redzone_size, malloc_context_size,
quarantine_size_mb, bot_platform, leaks)
elif tool_name == 'MSAN':
tool_options = get_msan_options()
elif tool_name == 'TSAN':
tool_options = get_tsan_options()
elif tool_name in ['UBSAN', 'CFI']:
tool_options = get_ubsan_options()
# Additional options. These override the defaults.
additional_tool_options = get_value('ADDITIONAL_%s_OPTIONS' % tool_name)
if additional_tool_options:
tool_options.update(_parse_memory_tool_options(additional_tool_options))
if tool_options.get('symbolize') == 1:
if 'external_symbolizer_path' not in tool_options:
llvm_symbolizer_path_arg = _quote_value_if_needed(
get_llvm_symbolizer_path())
tool_options.update({
'external_symbolizer_path': llvm_symbolizer_path_arg
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({
'symbolize_inline_frames': str(symbolize_inline_frames).lower()
})
# Join the options.
joined_tool_options = join_memory_tool_options(tool_options)
tool_options_variable_name = '%s_OPTIONS' % tool_name
set_value(tool_options_variable_name, joined_tool_options)
# CFI handles various signals through the UBSan runtime, so need to set
# UBSAN_OPTIONS explicitly. See crbug.com/716235#c25
if tool_name == 'CFI':
set_value('UBSAN_OPTIONS', joined_tool_options)
# For Android, we need to set shell property |asan.options|.
# For engine-based uzzers, it is not needed as options variable is directly
# passed to shell.
if bot_platform == 'ANDROID' and not is_engine_fuzzer_job():
android.sanitizer.set_options(tool_name, joined_tool_options)
def set_default_vars():
"""Set default environment vars and values."""
env_file_path = os.path.join(get_value('ROOT_DIR'), 'bot', 'env.yaml')
with open(env_file_path) as file_handle:
env_file_contents = file_handle.read()
env_vars_and_values = yaml.safe_load(env_file_contents)
for variable, value in six.iteritems(env_vars_and_values):
# We cannot call set_value here.
os.environ[variable] = str(value)
def set_bot_environment():
"""Set environment for the bots."""
root_dir = get_value('ROOT_DIR')
if not root_dir:
# Error, bail out.
return False
# Reset our current working directory. Our's last job might
# have left us in a non-existent temp directory.
# Or ROOT_DIR might be deleted and recreated.
os.chdir(root_dir)
# Set some default directories. These can be overriden by config files below.
bot_dir = os.path.join(root_dir, 'bot')
if is_trusted_host(ensure_connected=False):
worker_root_dir = os.environ['WORKER_ROOT_DIR']
os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot', 'builds')
else:
os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds')
os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls')
os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs')
os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache')
inputs_dir = os.path.join(bot_dir, 'inputs')
os.environ['INPUT_DIR'] = inputs_dir
os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir, 'crash-stacks')
os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers')
os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles')
os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases')
os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS']
os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir,
'fuzzer-testcases-disk')
os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(inputs_dir,
'mutator-plugins')
os.environ['FUZZ_DATA'] = os.path.join(inputs_dir,
'fuzzer-common-data-bundles')
os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images')
os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols')
os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir,
'user-profile-dirs')
# Set bot name.
if not get_value('BOT_NAME'):
# If not defined, default to host name.
os.environ['BOT_NAME'] = socket.gethostname().lower()
# Set BOT_TMPDIR if not already set.
if not get_value('BOT_TMPDIR'):
os.environ['BOT_TMPDIR'] = os.path.join(bot_dir, 'tmp')
# Add common environment variables needed by Bazel test runner.
# See https://docs.bazel.build/versions/master/test-encyclopedia.html.
os.environ['TEST_TMPDIR'] = get_value('BOT_TMPDIR')
# Sets the default configuration. Can be overridden by job environment.
set_default_vars()
# Set environment variable from local project configuration.
from config import local_config
local_config.ProjectConfig().set_environment()
# Success.
return True
def set_tsan_max_history_size():
"""Sets maximum history size for TSAN tool."""
tsan_options = get_value('TSAN_OPTIONS')
if not tsan_options:
return
tsan_max_history_size = 7
for i in range(tsan_max_history_size):
tsan_options = (
tsan_options.replace('history_size=%d' % i,
'history_size=%d' % tsan_max_history_size))
set_value('TSAN_OPTIONS', tsan_options)
def set_value(environment_variable, value):
"""Set an environment variable."""
value_str = str(value)
environment_variable_str = str(environment_variable)
value_str = value_str.replace('%ROOT_DIR%', os.environ['ROOT_DIR'])
os.environ[environment_variable_str] = value_str
if is_trusted_host():
from bot.untrusted_runner import environment as untrusted_env
untrusted_env.forward_environment_variable(environment_variable_str,
value_str)
def tool_matches(tool_name, job_name):
"""Return if the memory debugging tool is used in this job."""
match_prefix = '(.*[^a-zA-Z]|^)%s'
matches_tool = re.match(match_prefix % tool_name.lower(), job_name.lower())
return bool(matches_tool)
def appengine_noop(func):
"""Wrap a function into no-op and return None if running on App Engine."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_running_on_app_engine():
return None
return func(*args, **kwargs)
return wrapper
def bot_noop(func):
"""Wrap a function into no-op and return None if running on bot."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_bot = not is_running_on_app_engine()
if is_bot:
return None
return func(*args, **kwargs)
return wrapper
def is_local_development():
"""Return true if running in local development environment (e.g. running
a bot locally, excludes tests)."""
return bool(get_value('LOCAL_DEVELOPMENT') and not get_value('PY_UNITTESTS'))
def local_noop(func):
"""Wrap a function into no-op and return None if running in local
development environment."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (is_local_development() or is_running_on_app_engine_development()):
return None
return func(*args, **kwargs)
return wrapper
def is_ephemeral():
"""Return whether or not we are an ephemeral bot."""
return get_value('EPHEMERAL')
| 31.588843
| 80
| 0.715482
|
from builtins import range
import ast
import functools
import os
import re
import six
import socket
import sys
import yaml
try:
from shlex import quote
except ImportError:
from pipes import quote
SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS = [
'HWASAN', 'ASAN', 'CFI', 'MSAN', 'TSAN', 'UBSAN'
]
SANITIZER_NAME_MAP = {
'ASAN': 'address',
'CFI': 'cfi',
'MSAN': 'memory',
'TSAN': 'thread',
'UBSAN': 'undefined',
}
COMMON_SANITIZER_OPTIONS = {
'handle_abort': 1,
'handle_segv': 1,
'handle_sigbus': 1,
'handle_sigfpe': 1,
'handle_sigill': 1,
'print_summary': 1,
'use_sigaltstack': 1,
}
def _eval_value(value_string):
try:
return ast.literal_eval(value_string)
except:
return value_string
def join_memory_tool_options(options):
return ':'.join('%s=%s' % (key, str(value))
for key, value in sorted(six.iteritems(options)))
def _maybe_convert_to_int(value):
try:
return int(value)
except ValueError:
return value
# double quotes) colon.
UNQUOTED_COLON_REGEX = re.compile('((?:[^\'":]|\'[^\']*\'|"[^"]*")+)')
def _parse_memory_tool_options(options_str):
parsed = {}
for item in UNQUOTED_COLON_REGEX.split(options_str):
# Regex split can give us empty strings at the beginning and the end. Skip
# these.
if not item:
continue
# Regex split gives us each ':'. Skip these.
if item == ':':
continue
values = item.split('=', 1)
if len(values) != 2:
# TODO(mbarbella): Factor this out of environment, and switch to logging
# an error and continuing. This error should be recoverable.
raise ValueError('Invalid memory tool option "%s"' % item)
option_name = values[0]
option_value = _maybe_convert_to_int(values[1])
parsed[option_name] = option_value
return parsed
def _quote_value_if_needed(value):
result = value
bot_platform = platform()
if bot_platform == 'WINDOWS':
result = '"%s"' % result
return result
def copy():
environment_copy = os.environ.copy()
return environment_copy
def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb,
bot_platform, leaks):
asan_options = {}
# Default options needed for all cases.
asan_options['alloc_dealloc_mismatch'] = 0
asan_options['print_scariness'] = 1
asan_options['strict_memcmp'] = 0
# Set provided redzone size.
if redzone_size:
asan_options['redzone'] = redzone_size
# This value is used in determining whether to report OOM crashes or not.
set_value('REDZONE', redzone_size)
# Set maximum number of stack frames to report.
if malloc_context_size:
asan_options['malloc_context_size'] = malloc_context_size
# Set quarantine size.
if quarantine_size_mb:
asan_options['quarantine_size_mb'] = quarantine_size_mb
# Test for leaks if this is an LSan-enabled job type.
if get_value('LSAN') and leaks:
lsan_options = join_memory_tool_options(get_lsan_options())
set_value('LSAN_OPTIONS', lsan_options)
asan_options['detect_leaks'] = 1
else:
remove_key('LSAN_OPTIONS')
asan_options['detect_leaks'] = 0
# FIXME: Support container overflow on Android.
if bot_platform == 'ANDROID':
asan_options['detect_container_overflow'] = 0
# Enable stack use-after-return.
asan_options['detect_stack_use_after_return'] = 1
asan_options['max_uar_stack_size_log'] = 16
# Other less important default options for all cases.
asan_options.update({
'allocator_may_return_null': 1,
'allow_user_segv_handler': 0,
'check_malloc_usable_size': 0,
'detect_odr_violation': 0,
'fast_unwind_on_fatal': 1,
'print_suppressions': 0,
})
# Add common sanitizer options.
asan_options.update(COMMON_SANITIZER_OPTIONS)
# FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe
# in build archive does not work.
asan_options['symbolize'] = int(bot_platform == 'WINDOWS')
# For Android, allow user defined segv handler to work.
if bot_platform == 'ANDROID':
asan_options['allow_user_segv_handler'] = 1
# Check if UBSAN is enabled as well for this ASAN build.
# If yes, set UBSAN_OPTIONS and enable suppressions.
if get_value('UBSAN'):
ubsan_options = get_ubsan_options()
# Remove |symbolize| explicitly to avoid overridding ASan defaults.
ubsan_options.pop('symbolize', None)
set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options))
return asan_options
def get_cpu_arch():
if platform() == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
return android.settings.get_cpu_arch()
# FIXME: Add support for desktop architectures as needed.
return None
def get_current_memory_tool_var():
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
return memory_tool_name + '_OPTIONS'
def get_memory_tool_options(env_var, default_value=None):
env_value = get_value(env_var)
if env_value is not None:
return _parse_memory_tool_options(env_value)
return default_value
def get_instrumented_libraries_paths():
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
if memory_tool_name == 'MSAN':
if 'no-origins' in get_value('BUILD_URL', ''):
memory_tool_name += '_NO_ORIGINS'
else:
memory_tool_name += '_CHAINED'
paths = get_value('INSTRUMENTED_LIBRARIES_PATHS_' + memory_tool_name)
if not paths:
return None
return paths.split(':')
def get_default_tool_path(tool_name):
if platform().lower() == 'android':
# For android devices, we do symbolization on the host machine, which is
# linux. So, we use the linux version of llvm-symbolizer.
platform_override = 'linux'
else:
# No override needed, use default.
platform_override = None
tool_filename = get_executable_filename(tool_name)
tool_path = os.path.join(
get_platform_resources_directory(platform_override), tool_filename)
return tool_path
def get_environment_settings_as_string():
environment_string = ''
# Add Android specific variables.
if platform() == 'ANDROID':
# FIXME: Handle this import in a cleaner way.
from platforms import android
environment_string += '[Environment] Build fingerprint: %s\n' % (
get_value('BUILD_FINGERPRINT'))
environment_string += ('[Environment] Patch level: %s\n' %
android.settings.get_security_patch_level())
environment_string += (
'[Environment] Local properties file "%s" with contents:\n%s\n' %
(android.device.LOCAL_PROP_PATH,
android.adb.read_data_from_file(android.device.LOCAL_PROP_PATH)))
command_line = get_value('COMMAND_LINE_PATH')
if command_line:
environment_string += (
'[Environment] Command line file "%s" with contents:\n%s\n' %
(command_line, android.adb.read_data_from_file(command_line)))
asan_options = get_value('ASAN_OPTIONS')
if asan_options:
# FIXME: Need better documentation for Chrome builds. Chrome builds use
# asan_device_setup.sh and we send this options file path as an include
# to extra-options parameter.
sanitizer_options_file_path = (
android.sanitizer.get_options_file_path('ASAN'))
environment_string += (
'[Environment] ASAN options file "%s" with contents:\n%s\n' %
(sanitizer_options_file_path, asan_options))
else:
# For desktop platforms, add |*_OPTIONS| variables from environment.
for sanitizer_option in get_sanitizer_options_for_display():
environment_string += '[Environment] %s\n' % sanitizer_option
return environment_string
def get_sanitizer_options_for_display():
result = []
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
options_variable = tool + '_OPTIONS'
options_value = os.getenv(options_variable)
if not options_value:
continue
result.append('{options_variable}="{options_value}"'.format(
options_variable=options_variable, options_value=quote(options_value)))
return result
def get_llvm_symbolizer_path():
llvm_symbolizer_path = get_value('LLVM_SYMBOLIZER_PATH')
# Use default llvm symbolizer for the following:
# 1. If we don't have |LLVM_SYMBOLIZER_PATH| env variable set.
if not llvm_symbolizer_path or not os.path.exists(llvm_symbolizer_path):
llvm_symbolizer_path = get_default_tool_path('llvm-symbolizer')
if not os.path.exists(llvm_symbolizer_path):
return None
os.chmod(llvm_symbolizer_path, 0o750)
return llvm_symbolizer_path
def get_root_directory():
return get_value('ROOT_DIR')
def get_startup_scripts_directory():
return os.path.join(get_value('ROOT_DIR'), 'src', 'python', 'bot', 'startup')
def get_config_directory():
config_dir = get_value('CONFIG_DIR_OVERRIDE')
if config_dir:
return config_dir
if is_running_on_app_engine():
return 'config'
return os.path.join(get_root_directory(), 'src', 'appengine', 'config')
def get_gae_config_directory():
return os.path.join(get_config_directory(), 'gae')
def get_gce_config_directory():
return os.path.join(get_config_directory(), 'gce')
def get_resources_directory():
return os.path.join(get_root_directory(), 'resources')
def get_platform_resources_directory(platform_override=None):
return os.path.join(get_resources_directory(), 'platform',
platform_override or platform().lower())
def get_suppressions_directory():
return os.path.join(get_config_directory(), 'suppressions')
def get_suppressions_file(sanitizer, suffix='suppressions'):
sanitizer_suppressions_filename = '{sanitizer}_{suffix}.txt'.format(
sanitizer=sanitizer, suffix=suffix)
sanitizer_suppressions_file_path = os.path.join(
get_suppressions_directory(), sanitizer_suppressions_filename)
if not os.path.exists(sanitizer_suppressions_file_path):
return None
if not os.path.getsize(sanitizer_suppressions_file_path):
return None
return sanitizer_suppressions_file_path
def get_lsan_options():
lsan_suppressions_path = get_suppressions_file('lsan')
lsan_options = {
'print_suppressions': 0,
}
lsan_options.update(COMMON_SANITIZER_OPTIONS)
if lsan_suppressions_path:
lsan_options['suppressions'] = lsan_suppressions_path
return lsan_options
def get_msan_options():
msan_options = {'symbolize': 0}
msan_options.update(COMMON_SANITIZER_OPTIONS)
return msan_options
def get_platform_id():
bot_platform = platform()
if bot_platform == 'ANDROID':
from platforms import android
platform_id = get_value('PLATFORM_ID', android.settings.get_platform_id())
return platform_id.lower()
return bot_platform.lower()
def get_platform_group():
platform_group = get_value('QUEUE_OVERRIDE')
if platform_group:
return platform_group
return platform()
def get_memory_tool_name(job_name):
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
if tool_matches(tool, job_name):
return tool
return 'ASAN'
def get_memory_tool_display_string(job_name):
memory_tool_name = get_memory_tool_name(job_name)
sanitizer_name = SANITIZER_NAME_MAP.get(memory_tool_name)
if not sanitizer_name:
return 'Memory Tool: %s' % memory_tool_name
return 'Sanitizer: %s (%s)' % (sanitizer_name, memory_tool_name)
def get_executable_filename(executable_name):
if platform() != 'WINDOWS':
return executable_name
extension = '.exe'
if executable_name.endswith(extension):
return executable_name
return executable_name + extension
def get_tsan_options():
tsan_suppressions_path = get_suppressions_file('tsan')
tsan_options = {
'atexit_sleep_ms': 200,
'flush_memory_ms': 2000,
'history_size': 3,
'print_suppressions': 0,
'report_thread_leaks': 0,
'report_signal_unsafe': 0,
'stack_trace_format': 'DEFAULT',
'symbolize': 1,
}
tsan_options.update(COMMON_SANITIZER_OPTIONS)
if tsan_suppressions_path:
tsan_options['suppressions'] = tsan_suppressions_path
return tsan_options
def get_ubsan_options():
ubsan_suppressions_path = get_suppressions_file('ubsan')
ubsan_options = {
'halt_on_error': 1,
'print_stacktrace': 1,
'print_suppressions': 0,
'silence_unsigned_overflow': 1,
'symbolize': 1,
}
ubsan_options.update(COMMON_SANITIZER_OPTIONS)
if ubsan_suppressions_path and not is_chromeos_system_job():
ubsan_options['suppressions'] = ubsan_suppressions_path
return ubsan_options
def get_value(environment_variable, default_value=None):
value_string = os.getenv(environment_variable)
if value_string is None:
return default_value
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string)
def _job_substring_match(search_string, job_name):
job_name = job_name or get_value('JOB_NAME')
if not job_name:
return False
return search_string in job_name.lower()
def is_afl_job(job_name=None):
# Prefix matching is not sufficient.
return _job_substring_match('afl', job_name)
def is_chromeos_job(job_name=None):
return _job_substring_match('chromeos', job_name)
def is_chromeos_system_job(job_name=None):
return is_chromeos_job(job_name) and get_value('CHROMEOS_SYSTEM')
def is_libfuzzer_job(job_name=None):
# Prefix matching is not sufficient.
return _job_substring_match('libfuzzer', job_name)
def is_engine_fuzzer_job(job_name=None):
return is_afl_job(job_name) or is_libfuzzer_job(job_name)
def is_posix():
return os.name == 'posix'
def is_trusted_host(ensure_connected=True):
return get_value('TRUSTED_HOST') and (not ensure_connected or
get_value('WORKER_BOT_NAME'))
def is_untrusted_worker():
return get_value('UNTRUSTED_WORKER')
def is_running_on_app_engine():
return (is_running_on_app_engine_development() or
os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/'))
def is_running_on_app_engine_development():
return os.getenv('SERVER_SOFTWARE', '').startswith('Development/')
def parse_environment_definition(environment_string):
if not environment_string:
return {}
definitions = [environment_string.splitlines()]
values = {}
for definition in definitions:
for line in definition:
if line.startswith(' continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
key = m.group(1).strip()
value = m.group(2).strip()
values[key] = value
return values
def platform():
environment_override = get_value('OS_OVERRIDE')
if environment_override:
return environment_override.upper()
if sys.platform.startswith('win'):
return 'WINDOWS'
elif sys.platform.startswith('linux'):
return 'LINUX'
elif sys.platform == 'darwin':
return 'MAC'
raise ValueError('Unsupported platform "%s".' % sys.platform)
def remove_key(key_name):
if not key_name:
return
if key_name not in os.environ:
return
del os.environ[key_name]
# Used by reset_environment to store the initial environment.
_initial_environment = None
def reset_environment():
global _initial_environment
if _initial_environment is None:
_initial_environment = copy()
# There is nothing to reset if we are initializing for the first time.
else:
# Clean current environment.
os.environ.clear()
# Add shared variables with values from _initial_environment.
os.environ.update(_initial_environment)
if is_trusted_host():
from bot.untrusted_runner import environment as untrusted_env
untrusted_env.reset_environment()
def set_common_environment_variables():
# G_SLICE = always-malloc: make glib use system malloc.
# NSS_DISABLE_UNLOAD = 1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST = 1: make nss use system malloc.
set_value('G_SLICE', 'always-malloc')
set_value('NSS_DISABLE_UNLOAD', 1)
set_value('NSS_DISABLE_ARENA_FREE_LIST', 1)
set_value('NACL_DANGEROUS_SKIP_QUALIFICATION_TEST', 1)
def set_memory_tool_options(env_var, options_dict):
set_value(env_var, join_memory_tool_options(options_dict))
def set_environment_parameters_from_file(file_path):
if not os.path.exists(file_path):
return
with open(file_path, 'r') as f:
file_data = f.read()
for line in file_data.splitlines():
if line.startswith(' continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
environment_variable = m.group(1)
environment_variable_value = m.group(2)
set_value(environment_variable, environment_variable_value)
def reset_current_memory_tool_options(redzone_size=0,
malloc_context_size=0,
leaks=True,
symbolize_inline_frames=False,
quarantine_size_mb=None):
# FIXME: Handle these imports in a cleaner way.
from platforms import android
# Set common environment variable useful for memory debugging tools.
set_common_environment_variables()
# Set memory tool name in our environment for easy access.
job_name = get_value('JOB_NAME')
tool_name = get_memory_tool_name(job_name)
set_value('MEMORY_TOOL', tool_name)
bot_platform = platform()
# Default options for memory debuggin tool used.
if tool_name in ['ASAN', 'HWASAN']:
tool_options = get_asan_options(redzone_size, malloc_context_size,
quarantine_size_mb, bot_platform, leaks)
elif tool_name == 'MSAN':
tool_options = get_msan_options()
elif tool_name == 'TSAN':
tool_options = get_tsan_options()
elif tool_name in ['UBSAN', 'CFI']:
tool_options = get_ubsan_options()
# Additional options. These override the defaults.
additional_tool_options = get_value('ADDITIONAL_%s_OPTIONS' % tool_name)
if additional_tool_options:
tool_options.update(_parse_memory_tool_options(additional_tool_options))
if tool_options.get('symbolize') == 1:
if 'external_symbolizer_path' not in tool_options:
llvm_symbolizer_path_arg = _quote_value_if_needed(
get_llvm_symbolizer_path())
tool_options.update({
'external_symbolizer_path': llvm_symbolizer_path_arg
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({
'symbolize_inline_frames': str(symbolize_inline_frames).lower()
})
# Join the options.
joined_tool_options = join_memory_tool_options(tool_options)
tool_options_variable_name = '%s_OPTIONS' % tool_name
set_value(tool_options_variable_name, joined_tool_options)
# CFI handles various signals through the UBSan runtime, so need to set
# UBSAN_OPTIONS explicitly. See crbug.com/716235#c25
if tool_name == 'CFI':
set_value('UBSAN_OPTIONS', joined_tool_options)
# For Android, we need to set shell property |asan.options|.
# For engine-based uzzers, it is not needed as options variable is directly
# passed to shell.
if bot_platform == 'ANDROID' and not is_engine_fuzzer_job():
android.sanitizer.set_options(tool_name, joined_tool_options)
def set_default_vars():
env_file_path = os.path.join(get_value('ROOT_DIR'), 'bot', 'env.yaml')
with open(env_file_path) as file_handle:
env_file_contents = file_handle.read()
env_vars_and_values = yaml.safe_load(env_file_contents)
for variable, value in six.iteritems(env_vars_and_values):
# We cannot call set_value here.
os.environ[variable] = str(value)
def set_bot_environment():
root_dir = get_value('ROOT_DIR')
if not root_dir:
# Error, bail out.
return False
# Reset our current working directory. Our's last job might
os.chdir(root_dir)
bot_dir = os.path.join(root_dir, 'bot')
if is_trusted_host(ensure_connected=False):
worker_root_dir = os.environ['WORKER_ROOT_DIR']
os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot', 'builds')
else:
os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds')
os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls')
os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs')
os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache')
inputs_dir = os.path.join(bot_dir, 'inputs')
os.environ['INPUT_DIR'] = inputs_dir
os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir, 'crash-stacks')
os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers')
os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles')
os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases')
os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS']
os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir,
'fuzzer-testcases-disk')
os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(inputs_dir,
'mutator-plugins')
os.environ['FUZZ_DATA'] = os.path.join(inputs_dir,
'fuzzer-common-data-bundles')
os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images')
os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols')
os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir,
'user-profile-dirs')
if not get_value('BOT_NAME'):
os.environ['BOT_NAME'] = socket.gethostname().lower()
if not get_value('BOT_TMPDIR'):
os.environ['BOT_TMPDIR'] = os.path.join(bot_dir, 'tmp')
os.environ['TEST_TMPDIR'] = get_value('BOT_TMPDIR')
set_default_vars()
from config import local_config
local_config.ProjectConfig().set_environment()
return True
def set_tsan_max_history_size():
tsan_options = get_value('TSAN_OPTIONS')
if not tsan_options:
return
tsan_max_history_size = 7
for i in range(tsan_max_history_size):
tsan_options = (
tsan_options.replace('history_size=%d' % i,
'history_size=%d' % tsan_max_history_size))
set_value('TSAN_OPTIONS', tsan_options)
def set_value(environment_variable, value):
value_str = str(value)
environment_variable_str = str(environment_variable)
value_str = value_str.replace('%ROOT_DIR%', os.environ['ROOT_DIR'])
os.environ[environment_variable_str] = value_str
if is_trusted_host():
from bot.untrusted_runner import environment as untrusted_env
untrusted_env.forward_environment_variable(environment_variable_str,
value_str)
def tool_matches(tool_name, job_name):
match_prefix = '(.*[^a-zA-Z]|^)%s'
matches_tool = re.match(match_prefix % tool_name.lower(), job_name.lower())
return bool(matches_tool)
def appengine_noop(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_running_on_app_engine():
return None
return func(*args, **kwargs)
return wrapper
def bot_noop(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_bot = not is_running_on_app_engine()
if is_bot:
return None
return func(*args, **kwargs)
return wrapper
def is_local_development():
return bool(get_value('LOCAL_DEVELOPMENT') and not get_value('PY_UNITTESTS'))
def local_noop(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (is_local_development() or is_running_on_app_engine_development()):
return None
return func(*args, **kwargs)
return wrapper
def is_ephemeral():
return get_value('EPHEMERAL')
| true
| true
|
f7097b11985e8d3d6942661393042492e18ebdfc
| 18,311
|
py
|
Python
|
python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py
|
muxiaoqi/Paddle
|
9291c7019ae6287b5b88da138bf6bb1d3d1d8bba
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py
|
muxiaoqi/Paddle
|
9291c7019ae6287b5b88da138bf6bb1d3d1d8bba
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/dygraph/dygraph_to_static/loop_transformer.py
|
muxiaoqi/Paddle
|
9291c7019ae6287b5b88da138bf6bb1d3d1d8bba
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import gast
from collections import defaultdict
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import generate_name_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_constant_variable_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_attribute_full_name
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node
__all__ = ['LoopTransformer', 'NameVisitor']
WHILE_CONDITION_PREFIX = 'while_condition'
WHILE_BODY_PREFIX = 'while_body'
FOR_CONDITION_PREFIX = 'for_loop_condition'
FOR_BODY_PREFIX = 'for_loop_body'
def create_while_node(condition_name, body_name, loop_var_names):
while_args = []
while_args.append(
gast.Name(
id=condition_name,
ctx=gast.Param(),
annotation=None,
type_comment=None))
while_args.append(
gast.Name(
id=body_name, ctx=gast.Param(), annotation=None, type_comment=None))
assign_targets = [
gast.Name(
id=var_name, ctx=gast.Param(), annotation=None, type_comment=None)
for var_name in loop_var_names
]
while_args.append(gast.List(elts=assign_targets, ctx=gast.Param()))
while_func_id = gast.parse('fluid.layers.while_loop').body[0].value
while_node = gast.Call(func=while_func_id, args=while_args, keywords=[])
assign_node = gast.Assign(
targets=[gast.Tuple(
elts=assign_targets, ctx=gast.Store())],
value=while_node)
return assign_node
class LogicalOpTransformer(gast.NodeTransformer):
"""
Transform python boolean op into Paddle logical op
"""
def __init__(self, node):
self.root = node
def transform(self):
return self.visit(self.root)
def visit_UnaryOp(self, node):
self.generic_visit(node)
if isinstance(node.op, gast.Not):
arg = ast_to_source_code(node.operand)
new_node_str = "fluid.layers.logical_not({})".format(arg)
# gast.parse returns Module(body=[expr(value=...)])
new_node = gast.parse(new_node_str).body[0].value
return new_node
return node
def visit_BoolOp(self, node):
self.generic_visit(node)
if isinstance(node.op, gast.And):
new_node = self._create_bool_op_node(node.values, 'and')
elif isinstance(node.op, gast.Or):
new_node = self._create_bool_op_node(node.values, 'or')
else:
raise TypeError(
"Only supports and/or syntax in control flow if statement.")
return new_node
def _create_bool_op_node(self, nodes, api_type):
assert len(
nodes
) > 1, "The length of BoolOp should be at least 2, but received {}.".format(
len(nodes))
if len(nodes) > 2:
# Creates logic_and/logic_or node recursively.
pre_assign_node = self._create_bool_op_node(nodes[:2], api_type)
nodes = [pre_assign_node] + nodes[2:]
args = [ast_to_source_code(child) for child in nodes]
new_node_str = "fluid.layers.logical_{}(x={}, y={})".format(
api_type, args[0], args[1])
# gast.parse return Module(body=[expr(...)])
new_node = gast.parse(new_node_str).body[0].value
return new_node
class NameVisitor(gast.NodeVisitor):
'''
Analysis name liveness for loop transformer
'''
def __init__(self, root_node):
# Set of gast.Name or gast.Attribute for variables
self.current_seen_vars = set()
# List of gast.While/gast.For nodes
self.current_loop = []
# Mapping from gast.While/gast.For to variable nodes
self.before_loop_body_vars = defaultdict(set)
self.in_loop_vars = defaultdict(set)
self.static_analysis_visitor = StaticAnalysisVisitor(root_node)
self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map(
)
self.visit(root_node)
def is_control_flow_loop(self, node):
# TODO: make a better condition
return True
def get_loop_var_names(self, node):
assert isinstance(
node, (gast.While, gast.For)), "Input node is not gast loop node"
loop_var_names = set()
create_var_names = set()
read_context = {type(gast.Load()), type(gast.AugLoad())}
in_loop_vars = self.in_loop_vars[node]
in_loop_name_strs = self._var_nodes_to_names(in_loop_vars)
before_loop_body_vars = self.before_loop_body_vars[node]
before_loop_name_strs = self._var_nodes_to_names(before_loop_body_vars)
after_loop_vars = self.current_seen_vars - before_loop_body_vars - in_loop_vars
after_loop_name_strs = self._var_nodes_to_names(after_loop_vars,
read_context)
for name in in_loop_name_strs:
if name in before_loop_name_strs:
# If a variable is used in loop and created before loop, it
# should be in loop_var as input
loop_var_names.add(name)
elif name in after_loop_name_strs:
# If a variable is created in the while loop and read after
# loop, it should be in loop_var and we should create it
loop_var_names.add(name)
create_var_names.add(name)
return loop_var_names, create_var_names
def visit_Name(self, node):
if self._is_call_func_name_node(node):
self.generic_visit(node)
return
if node.id == "False" or node.id == "True":
self.generic_visit(node)
return
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].add(node)
self.generic_visit(node)
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
ret = visitor(node)
return ret
def visit_Attribute(self, node):
if self._is_call_func_name_node(node):
return
attr_full_name = get_attribute_full_name(node)
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].add(node)
# sub-nodes are visited during get_attribute_full_name and we shouldn't
# visit again
def visit_For(self, node):
self.current_loop.append(node)
self.visit(node.target)
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def visit_While(self, node):
self.current_loop.append(node)
self.visit(node.test)
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def _var_nodes_to_names(self, node_set, ctx_filter_set=None):
ret = set()
for node in node_set:
if ctx_filter_set is None or type(node.ctx) in ctx_filter_set:
if isinstance(node, gast.Name):
ret.add(node.id)
elif isinstance(node, gast.Attribute):
ret.add(get_attribute_full_name(node))
return ret
def _is_call_func_name_node(self, node):
parent_node = self.node_to_wrapper_map[node].parent.node
if isinstance(parent_node, gast.Call) and parent_node.func == node:
return True
return False
class LoopTransformer(gast.NodeTransformer):
"""
This class transforms python while/for statement into Static Graph Ast
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of WhileTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
self.name_visitor = NameVisitor(self.root)
def transform(self):
self.visit(self.root)
def visit(self, node):
self.generic_visit(node)
# All parent nodes that may contain gast.While/gast.For
if hasattr(node, 'body'):
self.replace_stmt_list(node.body)
if hasattr(node, 'orelse'):
self.replace_stmt_list(node.orelse)
return node
def replace_stmt_list(self, body_list):
if not isinstance(body_list, list):
return
i = 0
while i < len(body_list):
if isinstance(body_list[i], gast.While):
new_stmts = self.get_while_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
elif isinstance(body_list[i], gast.For):
new_stmts = self.get_for_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
else:
i += 1
def get_for_range_node(self, node):
if not isinstance(node.iter, gast.Call):
return None
if not isinstance(node.iter.func, gast.Name):
return None
if node.iter.func.id != "range":
return None
return node.iter
def get_for_args_stmts(self, iter_name, args_list):
'''
Returns 3 gast stmt nodes for argument.
1. Initailize of iterate variable
2. Condition for the loop
3. Statement for changing of iterate variable during the loop
NOTE(TODO): Python allows to access iteration variable after loop, such
as "for i in range(10)" will create i = 9 after the loop. But using
current conversion will make i = 10. We should find a way to change it
'''
len_range_args = len(args_list)
assert len_range_args >= 1 and len_range_args <= 3, "range() function takes 1 to 3 arguments"
if len_range_args == 1:
init_stmt = get_constant_variable_node(iter_name, 0)
else:
init_stmt = gast.Assign(
targets=[
gast.Name(
id=iter_name,
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=args_list[0])
range_max_node = args_list[0] if len_range_args == 1 else args_list[1]
step_node = args_list[2] if len_range_args == 3 else gast.Constant(
value=1, kind=None)
cond_stmt = gast.Compare(
left=gast.BinOp(
left=gast.Name(
id=iter_name,
ctx=gast.Load(),
annotation=None,
type_comment=None),
op=gast.Add(),
right=step_node),
ops=[gast.LtE()],
comparators=[range_max_node])
change_stmt = gast.AugAssign(
target=gast.Name(
id=iter_name,
ctx=gast.Store(),
annotation=None,
type_comment=None),
op=gast.Add(),
value=step_node)
return init_stmt, cond_stmt, change_stmt
def get_for_stmt_nodes(self, node):
# TODO: consider for - else in python
if not self.name_visitor.is_control_flow_loop(node):
return [node]
# TODO: support non-range case
range_call_node = self.get_for_range_node(node)
if range_call_node is None:
return [node]
if not isinstance(node.target, gast.Name):
return [node]
iter_var_name = node.target.id
init_stmt, cond_stmt, change_stmt = self.get_for_args_stmts(
iter_var_name, range_call_node.args)
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# for x in range(10):
# y += x
# print(x) # x = 10
#
# We need to create static variable for those variables
for name in create_var_names:
new_stmts.append(create_static_variable_gast_node(name))
new_stmts.append(init_stmt)
# for x in range(10) in dygraph should be convert into static tensor + 1 <= 10
for name in loop_var_names:
new_stmts.append(to_static_variable_gast_node(name))
condition_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_stmt)],
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(change_stmt)
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(body_func_node)
while_loop_node = create_while_node(condition_func_node.name,
body_func_node.name, loop_var_names)
new_stmts.append(while_loop_node)
return new_stmts
def get_while_stmt_nodes(self, node):
# TODO: consider while - else in python
if not self.name_visitor.is_control_flow_loop(node):
return [node]
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# while x < 10:
# x += 1
# y = x
# z = y
#
# We need to create static variable for those variables
for name in create_var_names:
new_stmts.append(create_static_variable_gast_node(name))
# while x < 10 in dygraph should be convert into static tensor < 10
for name in loop_var_names:
new_stmts.append(to_static_variable_gast_node(name))
logical_op_transformer = LogicalOpTransformer(node.test)
cond_value_node = logical_op_transformer.transform()
condition_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_value_node)],
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(body_func_node)
while_loop_node = create_while_node(condition_func_node.name,
body_func_node.name, loop_var_names)
new_stmts.append(while_loop_node)
return new_stmts
| 36.476096
| 103
| 0.597837
|
from __future__ import print_function
import copy
import gast
from collections import defaultdict
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import generate_name_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_constant_variable_node
from paddle.fluid.dygraph.dygraph_to_static.utils import get_attribute_full_name
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_static_variable_gast_node
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import to_static_variable_gast_node
__all__ = ['LoopTransformer', 'NameVisitor']
WHILE_CONDITION_PREFIX = 'while_condition'
WHILE_BODY_PREFIX = 'while_body'
FOR_CONDITION_PREFIX = 'for_loop_condition'
FOR_BODY_PREFIX = 'for_loop_body'
def create_while_node(condition_name, body_name, loop_var_names):
while_args = []
while_args.append(
gast.Name(
id=condition_name,
ctx=gast.Param(),
annotation=None,
type_comment=None))
while_args.append(
gast.Name(
id=body_name, ctx=gast.Param(), annotation=None, type_comment=None))
assign_targets = [
gast.Name(
id=var_name, ctx=gast.Param(), annotation=None, type_comment=None)
for var_name in loop_var_names
]
while_args.append(gast.List(elts=assign_targets, ctx=gast.Param()))
while_func_id = gast.parse('fluid.layers.while_loop').body[0].value
while_node = gast.Call(func=while_func_id, args=while_args, keywords=[])
assign_node = gast.Assign(
targets=[gast.Tuple(
elts=assign_targets, ctx=gast.Store())],
value=while_node)
return assign_node
class LogicalOpTransformer(gast.NodeTransformer):
def __init__(self, node):
self.root = node
def transform(self):
return self.visit(self.root)
def visit_UnaryOp(self, node):
self.generic_visit(node)
if isinstance(node.op, gast.Not):
arg = ast_to_source_code(node.operand)
new_node_str = "fluid.layers.logical_not({})".format(arg)
new_node = gast.parse(new_node_str).body[0].value
return new_node
return node
def visit_BoolOp(self, node):
self.generic_visit(node)
if isinstance(node.op, gast.And):
new_node = self._create_bool_op_node(node.values, 'and')
elif isinstance(node.op, gast.Or):
new_node = self._create_bool_op_node(node.values, 'or')
else:
raise TypeError(
"Only supports and/or syntax in control flow if statement.")
return new_node
def _create_bool_op_node(self, nodes, api_type):
assert len(
nodes
) > 1, "The length of BoolOp should be at least 2, but received {}.".format(
len(nodes))
if len(nodes) > 2:
pre_assign_node = self._create_bool_op_node(nodes[:2], api_type)
nodes = [pre_assign_node] + nodes[2:]
args = [ast_to_source_code(child) for child in nodes]
new_node_str = "fluid.layers.logical_{}(x={}, y={})".format(
api_type, args[0], args[1])
new_node = gast.parse(new_node_str).body[0].value
return new_node
class NameVisitor(gast.NodeVisitor):
def __init__(self, root_node):
self.current_seen_vars = set()
self.current_loop = []
self.before_loop_body_vars = defaultdict(set)
self.in_loop_vars = defaultdict(set)
self.static_analysis_visitor = StaticAnalysisVisitor(root_node)
self.node_to_wrapper_map = self.static_analysis_visitor.get_node_to_wrapper_map(
)
self.visit(root_node)
def is_control_flow_loop(self, node):
return True
def get_loop_var_names(self, node):
assert isinstance(
node, (gast.While, gast.For)), "Input node is not gast loop node"
loop_var_names = set()
create_var_names = set()
read_context = {type(gast.Load()), type(gast.AugLoad())}
in_loop_vars = self.in_loop_vars[node]
in_loop_name_strs = self._var_nodes_to_names(in_loop_vars)
before_loop_body_vars = self.before_loop_body_vars[node]
before_loop_name_strs = self._var_nodes_to_names(before_loop_body_vars)
after_loop_vars = self.current_seen_vars - before_loop_body_vars - in_loop_vars
after_loop_name_strs = self._var_nodes_to_names(after_loop_vars,
read_context)
for name in in_loop_name_strs:
if name in before_loop_name_strs:
loop_var_names.add(name)
elif name in after_loop_name_strs:
loop_var_names.add(name)
create_var_names.add(name)
return loop_var_names, create_var_names
def visit_Name(self, node):
if self._is_call_func_name_node(node):
self.generic_visit(node)
return
if node.id == "False" or node.id == "True":
self.generic_visit(node)
return
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].add(node)
self.generic_visit(node)
def visit(self, node):
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
ret = visitor(node)
return ret
def visit_Attribute(self, node):
if self._is_call_func_name_node(node):
return
attr_full_name = get_attribute_full_name(node)
self.current_seen_vars.add(node)
for loop_node in self.current_loop:
self.in_loop_vars[loop_node].add(node)
# visit again
def visit_For(self, node):
self.current_loop.append(node)
self.visit(node.target)
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def visit_While(self, node):
self.current_loop.append(node)
self.visit(node.test)
self.before_loop_body_vars[node] = copy.copy(self.current_seen_vars)
self.generic_visit(node)
self.current_loop.pop()
def _var_nodes_to_names(self, node_set, ctx_filter_set=None):
ret = set()
for node in node_set:
if ctx_filter_set is None or type(node.ctx) in ctx_filter_set:
if isinstance(node, gast.Name):
ret.add(node.id)
elif isinstance(node, gast.Attribute):
ret.add(get_attribute_full_name(node))
return ret
def _is_call_func_name_node(self, node):
parent_node = self.node_to_wrapper_map[node].parent.node
if isinstance(parent_node, gast.Call) and parent_node.func == node:
return True
return False
class LoopTransformer(gast.NodeTransformer):
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of WhileTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
self.name_visitor = NameVisitor(self.root)
def transform(self):
self.visit(self.root)
def visit(self, node):
self.generic_visit(node)
# All parent nodes that may contain gast.While/gast.For
if hasattr(node, 'body'):
self.replace_stmt_list(node.body)
if hasattr(node, 'orelse'):
self.replace_stmt_list(node.orelse)
return node
def replace_stmt_list(self, body_list):
if not isinstance(body_list, list):
return
i = 0
while i < len(body_list):
if isinstance(body_list[i], gast.While):
new_stmts = self.get_while_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
elif isinstance(body_list[i], gast.For):
new_stmts = self.get_for_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
else:
i += 1
def get_for_range_node(self, node):
if not isinstance(node.iter, gast.Call):
return None
if not isinstance(node.iter.func, gast.Name):
return None
if node.iter.func.id != "range":
return None
return node.iter
def get_for_args_stmts(self, iter_name, args_list):
len_range_args = len(args_list)
assert len_range_args >= 1 and len_range_args <= 3, "range() function takes 1 to 3 arguments"
if len_range_args == 1:
init_stmt = get_constant_variable_node(iter_name, 0)
else:
init_stmt = gast.Assign(
targets=[
gast.Name(
id=iter_name,
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=args_list[0])
range_max_node = args_list[0] if len_range_args == 1 else args_list[1]
step_node = args_list[2] if len_range_args == 3 else gast.Constant(
value=1, kind=None)
cond_stmt = gast.Compare(
left=gast.BinOp(
left=gast.Name(
id=iter_name,
ctx=gast.Load(),
annotation=None,
type_comment=None),
op=gast.Add(),
right=step_node),
ops=[gast.LtE()],
comparators=[range_max_node])
change_stmt = gast.AugAssign(
target=gast.Name(
id=iter_name,
ctx=gast.Store(),
annotation=None,
type_comment=None),
op=gast.Add(),
value=step_node)
return init_stmt, cond_stmt, change_stmt
def get_for_stmt_nodes(self, node):
# TODO: consider for - else in python
if not self.name_visitor.is_control_flow_loop(node):
return [node]
# TODO: support non-range case
range_call_node = self.get_for_range_node(node)
if range_call_node is None:
return [node]
if not isinstance(node.target, gast.Name):
return [node]
iter_var_name = node.target.id
init_stmt, cond_stmt, change_stmt = self.get_for_args_stmts(
iter_var_name, range_call_node.args)
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# for x in range(10):
# y += x
# print(x) # x = 10
#
# We need to create static variable for those variables
for name in create_var_names:
new_stmts.append(create_static_variable_gast_node(name))
new_stmts.append(init_stmt)
# for x in range(10) in dygraph should be convert into static tensor + 1 <= 10
for name in loop_var_names:
new_stmts.append(to_static_variable_gast_node(name))
condition_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_stmt)],
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(change_stmt)
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(FOR_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(body_func_node)
while_loop_node = create_while_node(condition_func_node.name,
body_func_node.name, loop_var_names)
new_stmts.append(while_loop_node)
return new_stmts
def get_while_stmt_nodes(self, node):
# TODO: consider while - else in python
if not self.name_visitor.is_control_flow_loop(node):
return [node]
loop_var_names, create_var_names = self.name_visitor.get_loop_var_names(
node)
new_stmts = []
# Python can create variable in loop and use it out of loop, E.g.
#
# while x < 10:
# x += 1
# y = x
# z = y
#
# We need to create static variable for those variables
for name in create_var_names:
new_stmts.append(create_static_variable_gast_node(name))
# while x < 10 in dygraph should be convert into static tensor < 10
for name in loop_var_names:
new_stmts.append(to_static_variable_gast_node(name))
logical_op_transformer = LogicalOpTransformer(node.test)
cond_value_node = logical_op_transformer.transform()
condition_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_CONDITION_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=[gast.Return(value=cond_value_node)],
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(condition_func_node)
new_body = node.body
new_body.append(
gast.Return(value=generate_name_node(
loop_var_names, ctx=gast.Load())))
body_func_node = gast.FunctionDef(
name=unique_name.generate(WHILE_BODY_PREFIX),
args=gast.arguments(
args=[
gast.Name(
id=name,
ctx=gast.Param(),
annotation=None,
type_comment=None) for name in loop_var_names
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=None,
kwarg=None,
defaults=[]),
body=new_body,
decorator_list=[],
returns=None,
type_comment=None)
new_stmts.append(body_func_node)
while_loop_node = create_while_node(condition_func_node.name,
body_func_node.name, loop_var_names)
new_stmts.append(while_loop_node)
return new_stmts
| true
| true
|
f7097b887ec1a020df4865f96866666a50dcd027
| 1,589
|
py
|
Python
|
api/tests/test_models.py
|
SP-Vita-Tolstikova/avt_checktwfriends
|
2b5a77b1284b24ed189d8fb741a25221a15d0ab5
|
[
"MIT"
] | null | null | null |
api/tests/test_models.py
|
SP-Vita-Tolstikova/avt_checktwfriends
|
2b5a77b1284b24ed189d8fb741a25221a15d0ab5
|
[
"MIT"
] | null | null | null |
api/tests/test_models.py
|
SP-Vita-Tolstikova/avt_checktwfriends
|
2b5a77b1284b24ed189d8fb741a25221a15d0ab5
|
[
"MIT"
] | null | null | null |
"""
Test module for NotFollowerTwFriend model
"""
from django.test import TestCase
from ..models import NotFollowerTwFriend
# Create your tests here.
class NotFollowerTwFriendTestCase(TestCase):
"""
Test class for NotFollowerTwFriend model
"""
def setUp(self):
NotFollowerTwFriend.objects.create(
id_str='123456789',
screen_name='tw_user',
name='Twitter User',
created_at='Mon Jan 01 00:00:00 +0000 2018'
)
def test_create_not_follower_tw_friend(self):
self.assertEqual(NotFollowerTwFriend.objects.count(), 1)
self.assertEqual(NotFollowerTwFriend.objects.get().id_str, '123456789')
self.assertEqual(NotFollowerTwFriend.objects.get().screen_name, 'tw_user')
self.assertEqual(NotFollowerTwFriend.objects.get().name, 'Twitter User')
self.assertEqual(NotFollowerTwFriend.objects.get().description, '')
self.assertEqual(NotFollowerTwFriend.objects.get().statuses_count, 0)
self.assertEqual(NotFollowerTwFriend.objects.get().followers_count, 0)
self.assertEqual(NotFollowerTwFriend.objects.get().friends_count, 0)
self.assertEqual(
NotFollowerTwFriend.objects.get().created_at, 'Mon Jan 01 00:00:00 +0000 2018')
self.assertEqual(NotFollowerTwFriend.objects.get().location, '')
self.assertEqual(NotFollowerTwFriend.objects.get().avg_tweetsperday, 0.00)
self.assertEqual(NotFollowerTwFriend.objects.get().tff_ratio, 0.00)
self.assertEqual(NotFollowerTwFriend.objects.get().need_unfollow, True)
| 44.138889
| 91
| 0.709251
|
from django.test import TestCase
from ..models import NotFollowerTwFriend
class NotFollowerTwFriendTestCase(TestCase):
def setUp(self):
NotFollowerTwFriend.objects.create(
id_str='123456789',
screen_name='tw_user',
name='Twitter User',
created_at='Mon Jan 01 00:00:00 +0000 2018'
)
def test_create_not_follower_tw_friend(self):
self.assertEqual(NotFollowerTwFriend.objects.count(), 1)
self.assertEqual(NotFollowerTwFriend.objects.get().id_str, '123456789')
self.assertEqual(NotFollowerTwFriend.objects.get().screen_name, 'tw_user')
self.assertEqual(NotFollowerTwFriend.objects.get().name, 'Twitter User')
self.assertEqual(NotFollowerTwFriend.objects.get().description, '')
self.assertEqual(NotFollowerTwFriend.objects.get().statuses_count, 0)
self.assertEqual(NotFollowerTwFriend.objects.get().followers_count, 0)
self.assertEqual(NotFollowerTwFriend.objects.get().friends_count, 0)
self.assertEqual(
NotFollowerTwFriend.objects.get().created_at, 'Mon Jan 01 00:00:00 +0000 2018')
self.assertEqual(NotFollowerTwFriend.objects.get().location, '')
self.assertEqual(NotFollowerTwFriend.objects.get().avg_tweetsperday, 0.00)
self.assertEqual(NotFollowerTwFriend.objects.get().tff_ratio, 0.00)
self.assertEqual(NotFollowerTwFriend.objects.get().need_unfollow, True)
| true
| true
|
f7097b8c2b03efc35c14a367968343a8798ac543
| 1,798
|
py
|
Python
|
app/templating/summary/block.py
|
uk-gov-mirror/ONSdigital.eq-survey-runner
|
b3a67a82347d024177f7fa6bf05499f47ece7ea5
|
[
"MIT"
] | 27
|
2015-10-02T17:27:54.000Z
|
2021-04-05T12:39:16.000Z
|
app/templating/summary/block.py
|
uk-gov-mirror/ONSdigital.eq-survey-runner
|
b3a67a82347d024177f7fa6bf05499f47ece7ea5
|
[
"MIT"
] | 1,836
|
2015-09-16T09:59:03.000Z
|
2022-03-30T14:27:06.000Z
|
app/templating/summary/block.py
|
uk-gov-mirror/ONSdigital.eq-survey-runner
|
b3a67a82347d024177f7fa6bf05499f47ece7ea5
|
[
"MIT"
] | 20
|
2016-09-09T16:56:12.000Z
|
2021-11-12T06:09:27.000Z
|
from flask import url_for
from app.questionnaire.rules import evaluate_skip_conditions
from app.templating.summary.question import Question
class Block:
def __init__(self, block_schema, group_id, answer_store, metadata, schema, group_instance):
self.id = block_schema['id']
self.title = block_schema.get('title')
self.number = block_schema.get('number')
self.link = self._build_link(block_schema, group_id, metadata, group_instance)
self.questions = self._build_questions(block_schema, answer_store, metadata, schema, group_instance)
@staticmethod
def _build_link(block_schema, group_id, metadata, group_instance):
return url_for('questionnaire.get_block',
eq_id=metadata['eq_id'],
form_type=metadata['form_type'],
collection_id=metadata['collection_exercise_sid'],
group_id=group_id,
group_instance=group_instance,
block_id=block_schema['id'])
@staticmethod
def _build_questions(block_schema, answer_store, metadata, schema, group_instance):
questions = []
for question_schema in block_schema.get('questions', []):
is_skipped = evaluate_skip_conditions(question_schema.get('skip_conditions'), schema, metadata, answer_store)
if not is_skipped:
question = Question(question_schema, answer_store, metadata, schema, group_instance).serialize()
questions.append(question)
return questions
def serialize(self):
return {
'id': self.id,
'title': self.title,
'number': self.number,
'link': self.link,
'questions': self.questions,
}
| 40.863636
| 121
| 0.640156
|
from flask import url_for
from app.questionnaire.rules import evaluate_skip_conditions
from app.templating.summary.question import Question
class Block:
def __init__(self, block_schema, group_id, answer_store, metadata, schema, group_instance):
self.id = block_schema['id']
self.title = block_schema.get('title')
self.number = block_schema.get('number')
self.link = self._build_link(block_schema, group_id, metadata, group_instance)
self.questions = self._build_questions(block_schema, answer_store, metadata, schema, group_instance)
@staticmethod
def _build_link(block_schema, group_id, metadata, group_instance):
return url_for('questionnaire.get_block',
eq_id=metadata['eq_id'],
form_type=metadata['form_type'],
collection_id=metadata['collection_exercise_sid'],
group_id=group_id,
group_instance=group_instance,
block_id=block_schema['id'])
@staticmethod
def _build_questions(block_schema, answer_store, metadata, schema, group_instance):
questions = []
for question_schema in block_schema.get('questions', []):
is_skipped = evaluate_skip_conditions(question_schema.get('skip_conditions'), schema, metadata, answer_store)
if not is_skipped:
question = Question(question_schema, answer_store, metadata, schema, group_instance).serialize()
questions.append(question)
return questions
def serialize(self):
return {
'id': self.id,
'title': self.title,
'number': self.number,
'link': self.link,
'questions': self.questions,
}
| true
| true
|
f7097c0f0380fec1555929058faf1457682eb112
| 15,917
|
py
|
Python
|
logicqubit/gates.py
|
clnrp/logicqubit
|
516022186e2611eaf64a0683bcd70500695c6bbd
|
[
"Apache-2.0"
] | null | null | null |
logicqubit/gates.py
|
clnrp/logicqubit
|
516022186e2611eaf64a0683bcd70500695c6bbd
|
[
"Apache-2.0"
] | null | null | null |
logicqubit/gates.py
|
clnrp/logicqubit
|
516022186e2611eaf64a0683bcd70500695c6bbd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Author Cleoner S. Pietralonga
# e-mail: cleonerp@gmail.com
# Apache License
from cmath import *
from logicqubit.hilbert import *
"""
In this class, the numerical definition of operators is performed,
and the quantum gates methods performs the tensor product with the matrices in the correct order.
It is necessary to enter the qubit id as an input parameter.
"""
class Gates(Hilbert):
def __init__(self, number_of_qubits=1):
self.__number_of_qubits = number_of_qubits
# basic matrices for the generation of operators
# .......................................
def ID(self):
M = Matrix([[1, 0], [0, 1]], self.getCuda())
return M
def P0(self):
M = Matrix([[1, 0], [0, 0]], self.getCuda()) # |0><0|
return M
def P1(self):
M = Matrix([[0, 0], [0, 1]], self.getCuda()) # |1><1|
return M
def L0(self):
M = Matrix([[0, 1], [0, 0]], self.getCuda()) # |0><1|
return M
def L1(self):
M = Matrix([[0, 0], [1, 0]], self.getCuda()) # |1><0|
return M
# One qubit gates
# input parameters: target
# .......................................
def X(self, target=1):
M = Matrix([[0, 1], [1, 0]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def Y(self, target=1):
M = Matrix([[0, -1j], [1j, 0]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def Z(self, target=1):
M = Matrix([[1, 0], [0, -1]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def V(self, target=1, adjoint=False):
M = Matrix([[1, -1j], [-1j, 1]], self.getCuda()) * ((1j + 1) / 2) # sqrt(X) ou sqrt(NOT)
if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def S(self, target=1, adjoint=False):
M = Matrix([[1, 0], [0, 1j]], self.getCuda()) # sqrt(Z)
if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def T(self, target=1, adjoint=False):
M = Matrix([[1, 0], [0, (1 + 1j) / sqrt(2)]], self.getCuda()) # sqrt(S)
if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def H(self, target=1):
M = Matrix([[1, 1], [1, -1]], self.getCuda()) * (1 / sqrt(2))
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U(self, target, *argv): # U or theta, phi and _lambda
if len(argv) == 1:
M = Matrix(argv[0][0], self.getCuda())
else:
theta = argv[0]
phi = argv[1]
_lambda = argv[2]
M = Matrix(
[[exp(-1j * (phi + _lambda) / 2) * cos(theta / 2), -exp(-1j * (phi - _lambda) / 2) * sin(theta / 2)],
[exp(-1j * (phi - _lambda) / 2) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]],
self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U3(self, target, theta, phi, _lambda):
M = Matrix([[cos(theta / 2), -exp(1j * _lambda) * sin(theta / 2)],
[exp(1j * phi) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U2(self, target, phi, _lambda):
M = Matrix([[1, -exp(1j * _lambda)], [exp(1j * phi), exp(1j * (phi + _lambda))]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U1(self, target, _lambda):
M = Matrix([[1, 0], [0, exp(1j * _lambda)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RX(self, target, theta):
M = Matrix([[cos(theta / 2), -1j * sin(theta / 2)],
[-1j * sin(theta / 2), cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RY(self, target, theta):
M = Matrix([[cos(theta / 2), -sin(theta / 2)],
[sin(theta / 2), cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RZ(self, target, phi):
M = Matrix([[exp(-1j * phi / 2), 0], [0, exp(1j * phi / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
# Two qubit gates
# input parameters: control and target
# .......................................
def CX(self, control, target):
M = Matrix([[0, 1], [1, 0]], self.getCuda()) # X
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CNOT(self, control, target):
return self.CX(control, target)
def CY(self, control, target):
M = Matrix([[0, -1j], [1j, 0]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CZ(self, control, target):
M = Matrix([[1, 0], [0, -1]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CV(self, control, target, adjoint=False):
M = Matrix([[1, -1j], [-1j, 1]], self.getCuda()) * ((1j + 1) / 2) # sqrt(X) ou sqrt(NOT)
if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CS(self, control, target, adjoint=False):
M = Matrix([[1, 0], [0, 1j]], self.getCuda()) # sqrt(Z)
if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CT(self, control, target, adjoint=False):
M = Matrix([[1, 0], [0, (1 + 1j) / sqrt(2)]], self.getCuda()) # sqrt(S)
if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRX(self, control, target, theta):
M = Matrix([[cos(theta / 2), -1j * sin(theta / 2)],
[-1j * sin(theta / 2), cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRY(self, control, target, theta):
M = Matrix([[cos(theta / 2), -sin(theta / 2)],
[sin(theta / 2), cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRZ(self, control, target, phi):
M = Matrix([[exp(-1j * phi / 2), 0], [0, exp(1j * phi / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
# generic controlled gate
def CU(self, control, target, *argv): # U or theta, phi and _lambda
if len(argv) == 1:
M = Matrix(argv[0][0], self.getCuda())
else:
theta = argv[0]
phi = argv[1]
_lambda = argv[2]
M = Matrix(
[[exp(-1j * (phi + _lambda) / 2) * cos(theta / 2), -exp(-1j * (phi - _lambda) / 2) * sin(theta / 2)],
[exp(1j * (phi - _lambda) / 2) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]],
self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU3(self, control, target, theta, phi, _lambda):
M = Matrix([[cos(theta / 2), -exp(1j * _lambda) * sin(theta / 2)],
[exp(1j * phi) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU2(self, control, target, phi, _lambda):
M = Matrix([[1, -exp(1j * _lambda)], [exp(1j * phi), exp(1j * (phi + _lambda))]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU1(self, control, target, _lambda):
M = Matrix([[1, 0], [0, exp(1j * _lambda)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
# performs a state change of two qubits
def SWAP(self, target1, target2):
list1, list2, list3, list4 = self.getOrdListSWAP(target1, target2)
operator = self.kronProduct(list1) + self.kronProduct(list2) + self.kronProduct(list3) + self.kronProduct(list4)
return operator
# Three qubit gates, which perform and record the operation
# input parameters: control1, control2, and target
# .......................................
def CCX(self, control1, control2, target):
Gate = Matrix([[0, 1], [1, 0]], self.getCuda()) - self.ID()
list1, list2 = self.getOrdListCtrl2Gate(control1, control2, target, Gate)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def Toffoli(self, control1, control2, target):
return self.CCX(control1, control2, target)
# it's a controlled SWAP
def Fredkin(self, control, target1, target2):
list1, list2, list3, list4, list5, list6 = self.getOrdListFredkin(control, target1, target2)
ID = self.kronProduct(list1)
P1_SWAP = self.kronProduct(list2) + self.kronProduct(list3) + self.kronProduct(list4) + self.kronProduct(list5)
P1_ID = self.kronProduct(list6)
operator = ID + (P1_SWAP-P1_ID)
return operator
# orders the matrices for the tensor product of 1 qubit operations
def getOrdListSimpleGate(self, target, Gate):
list = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == target:
list.append(Gate)
else:
list.append(Matrix([[1, 0], [0, 1]], self.getCuda()))
return list
# orders the matrices for the tensor product of 2 qubits operations
def getOrdListCtrlGate(self, control, target, Gate):
list1 = []
list2 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control:
list1.append(self.P0()) # |0><0|
list2.append(self.P1()) # |1><1|
elif i == target:
list1.append(self.ID())
list2.append(Gate)
else:
list1.append(self.ID())
list2.append(self.ID())
return list1, list2
# orders the matrices for the tensor product of 3 qubits operations
def getOrdListCtrl2Gate(self, control1, control2, target, Gate):
list1 = []
list2 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control1 or i == control2:
list1.append(self.ID())
list2.append(self.P1()) # |1><1|
elif i == target:
list1.append(self.ID())
list2.append(Gate)
else:
list1.append(self.ID())
list2.append(self.ID())
return list1, list2
# orders the matrices for the tensor product of the SWAP gate operation
def getOrdListSWAP(self, target1, target2):
list1 = []
list2 = []
list3 = []
list4 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == target1:
list1.append(self.P0()) # |0><0|
list2.append(self.L0()) # |0><1|
list3.append(self.L1()) # |1><0|
list4.append(self.P1()) # |1><1|
elif i == target2:
list1.append(self.P0()) # |0><0|
list2.append(self.L1()) # |1><0|
list3.append(self.L0()) # |0><1|
list4.append(self.P1()) # |1><1|
else:
list1.append(self.ID())
list2.append(self.ID())
list3.append(self.ID())
list4.append(self.ID())
return list1, list2, list3, list4
# orders the matrices for the tensor product of the Fredkin gate operation
def getOrdListFredkin(self, control, target1, target2):
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list6 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control:
list1.append(self.ID()) # ID
list2.append(self.P1()) # SWAP P0xP0
list3.append(self.P1()) # SWAP L0xL1
list4.append(self.P1()) # SWAP L1xL0
list5.append(self.P1()) # SWAP P1xP1
list6.append(self.P1()) # -ID
elif i == target1:
list1.append(self.ID())
list2.append(self.P0()) # |0><0|
list3.append(self.L0()) # |0><1|
list4.append(self.L1()) # |1><0|
list5.append(self.P1()) # |1><1|
list6.append(self.ID())
elif i == target2:
list1.append(self.ID())
list2.append(self.P0()) # |0><0|
list3.append(self.L1()) # |1><0|
list4.append(self.L0()) # |0><1|
list5.append(self.P1()) # |1><1|
list6.append(self.ID())
else:
list1.append(self.ID())
list2.append(self.ID())
list3.append(self.ID())
list4.append(self.ID())
list5.append(self.ID())
list6.append(self.ID())
return list1, list2, list3, list4, list5, list6
| 39.594527
| 120
| 0.540177
|
from cmath import *
from logicqubit.hilbert import *
class Gates(Hilbert):
def __init__(self, number_of_qubits=1):
self.__number_of_qubits = number_of_qubits
def ID(self):
M = Matrix([[1, 0], [0, 1]], self.getCuda())
return M
def P0(self):
M = Matrix([[1, 0], [0, 0]], self.getCuda()) return M
def P1(self):
M = Matrix([[0, 0], [0, 1]], self.getCuda()) return M
def L0(self):
M = Matrix([[0, 1], [0, 0]], self.getCuda()) return M
def L1(self):
M = Matrix([[0, 0], [1, 0]], self.getCuda()) return M
def X(self, target=1):
M = Matrix([[0, 1], [1, 0]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def Y(self, target=1):
M = Matrix([[0, -1j], [1j, 0]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def Z(self, target=1):
M = Matrix([[1, 0], [0, -1]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def V(self, target=1, adjoint=False):
M = Matrix([[1, -1j], [-1j, 1]], self.getCuda()) * ((1j + 1) / 2) if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def S(self, target=1, adjoint=False):
M = Matrix([[1, 0], [0, 1j]], self.getCuda()) if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def T(self, target=1, adjoint=False):
M = Matrix([[1, 0], [0, (1 + 1j) / sqrt(2)]], self.getCuda()) if adjoint:
M = M.adjoint()
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def H(self, target=1):
M = Matrix([[1, 1], [1, -1]], self.getCuda()) * (1 / sqrt(2))
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U(self, target, *argv): if len(argv) == 1:
M = Matrix(argv[0][0], self.getCuda())
else:
theta = argv[0]
phi = argv[1]
_lambda = argv[2]
M = Matrix(
[[exp(-1j * (phi + _lambda) / 2) * cos(theta / 2), -exp(-1j * (phi - _lambda) / 2) * sin(theta / 2)],
[exp(-1j * (phi - _lambda) / 2) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]],
self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U3(self, target, theta, phi, _lambda):
M = Matrix([[cos(theta / 2), -exp(1j * _lambda) * sin(theta / 2)],
[exp(1j * phi) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U2(self, target, phi, _lambda):
M = Matrix([[1, -exp(1j * _lambda)], [exp(1j * phi), exp(1j * (phi + _lambda))]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def U1(self, target, _lambda):
M = Matrix([[1, 0], [0, exp(1j * _lambda)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RX(self, target, theta):
M = Matrix([[cos(theta / 2), -1j * sin(theta / 2)],
[-1j * sin(theta / 2), cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RY(self, target, theta):
M = Matrix([[cos(theta / 2), -sin(theta / 2)],
[sin(theta / 2), cos(theta / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def RZ(self, target, phi):
M = Matrix([[exp(-1j * phi / 2), 0], [0, exp(1j * phi / 2)]], self.getCuda())
list = self.getOrdListSimpleGate(target, M)
operator = self.kronProduct(list)
return operator
def CX(self, control, target):
M = Matrix([[0, 1], [1, 0]], self.getCuda()) list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CNOT(self, control, target):
return self.CX(control, target)
def CY(self, control, target):
M = Matrix([[0, -1j], [1j, 0]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CZ(self, control, target):
M = Matrix([[1, 0], [0, -1]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CV(self, control, target, adjoint=False):
M = Matrix([[1, -1j], [-1j, 1]], self.getCuda()) * ((1j + 1) / 2) if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CS(self, control, target, adjoint=False):
M = Matrix([[1, 0], [0, 1j]], self.getCuda()) if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CT(self, control, target, adjoint=False):
M = Matrix([[1, 0], [0, (1 + 1j) / sqrt(2)]], self.getCuda()) if adjoint:
M = M.adjoint()
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRX(self, control, target, theta):
M = Matrix([[cos(theta / 2), -1j * sin(theta / 2)],
[-1j * sin(theta / 2), cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRY(self, control, target, theta):
M = Matrix([[cos(theta / 2), -sin(theta / 2)],
[sin(theta / 2), cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CRZ(self, control, target, phi):
M = Matrix([[exp(-1j * phi / 2), 0], [0, exp(1j * phi / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU(self, control, target, *argv): if len(argv) == 1:
M = Matrix(argv[0][0], self.getCuda())
else:
theta = argv[0]
phi = argv[1]
_lambda = argv[2]
M = Matrix(
[[exp(-1j * (phi + _lambda) / 2) * cos(theta / 2), -exp(-1j * (phi - _lambda) / 2) * sin(theta / 2)],
[exp(1j * (phi - _lambda) / 2) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]],
self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU3(self, control, target, theta, phi, _lambda):
M = Matrix([[cos(theta / 2), -exp(1j * _lambda) * sin(theta / 2)],
[exp(1j * phi) * sin(theta / 2), exp(1j * (phi + _lambda)) * cos(theta / 2)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU2(self, control, target, phi, _lambda):
M = Matrix([[1, -exp(1j * _lambda)], [exp(1j * phi), exp(1j * (phi + _lambda))]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def CU1(self, control, target, _lambda):
M = Matrix([[1, 0], [0, exp(1j * _lambda)]], self.getCuda())
list1, list2 = self.getOrdListCtrlGate(control, target, M)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def SWAP(self, target1, target2):
list1, list2, list3, list4 = self.getOrdListSWAP(target1, target2)
operator = self.kronProduct(list1) + self.kronProduct(list2) + self.kronProduct(list3) + self.kronProduct(list4)
return operator
def CCX(self, control1, control2, target):
Gate = Matrix([[0, 1], [1, 0]], self.getCuda()) - self.ID()
list1, list2 = self.getOrdListCtrl2Gate(control1, control2, target, Gate)
operator = self.kronProduct(list1) + self.kronProduct(list2)
return operator
def Toffoli(self, control1, control2, target):
return self.CCX(control1, control2, target)
def Fredkin(self, control, target1, target2):
list1, list2, list3, list4, list5, list6 = self.getOrdListFredkin(control, target1, target2)
ID = self.kronProduct(list1)
P1_SWAP = self.kronProduct(list2) + self.kronProduct(list3) + self.kronProduct(list4) + self.kronProduct(list5)
P1_ID = self.kronProduct(list6)
operator = ID + (P1_SWAP-P1_ID)
return operator
# orders the matrices for the tensor product of 1 qubit operations
def getOrdListSimpleGate(self, target, Gate):
list = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == target:
list.append(Gate)
else:
list.append(Matrix([[1, 0], [0, 1]], self.getCuda()))
return list
# orders the matrices for the tensor product of 2 qubits operations
def getOrdListCtrlGate(self, control, target, Gate):
list1 = []
list2 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control:
list1.append(self.P0()) # |0><0|
list2.append(self.P1()) # |1><1|
elif i == target:
list1.append(self.ID())
list2.append(Gate)
else:
list1.append(self.ID())
list2.append(self.ID())
return list1, list2
# orders the matrices for the tensor product of 3 qubits operations
def getOrdListCtrl2Gate(self, control1, control2, target, Gate):
list1 = []
list2 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control1 or i == control2:
list1.append(self.ID())
list2.append(self.P1()) # |1><1|
elif i == target:
list1.append(self.ID())
list2.append(Gate)
else:
list1.append(self.ID())
list2.append(self.ID())
return list1, list2
# orders the matrices for the tensor product of the SWAP gate operation
def getOrdListSWAP(self, target1, target2):
list1 = []
list2 = []
list3 = []
list4 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == target1:
list1.append(self.P0()) # |0><0|
list2.append(self.L0()) # |0><1|
list3.append(self.L1()) # |1><0|
list4.append(self.P1()) # |1><1|
elif i == target2:
list1.append(self.P0()) # |0><0|
list2.append(self.L1()) # |1><0|
list3.append(self.L0()) # |0><1|
list4.append(self.P1()) # |1><1|
else:
list1.append(self.ID())
list2.append(self.ID())
list3.append(self.ID())
list4.append(self.ID())
return list1, list2, list3, list4
# orders the matrices for the tensor product of the Fredkin gate operation
def getOrdListFredkin(self, control, target1, target2):
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list6 = []
if self.isFirstLeft():
plist = range(1, self.__number_of_qubits + 1)
else:
plist = reversed(range(1, self.__number_of_qubits + 1))
for i in plist:
if i == control:
list1.append(self.ID()) # ID
list2.append(self.P1()) # SWAP P0xP0
list3.append(self.P1()) # SWAP L0xL1
list4.append(self.P1()) # SWAP L1xL0
list5.append(self.P1()) # SWAP P1xP1
list6.append(self.P1()) # -ID
elif i == target1:
list1.append(self.ID())
list2.append(self.P0()) # |0><0|
list3.append(self.L0()) # |0><1|
list4.append(self.L1()) # |1><0|
list5.append(self.P1()) # |1><1|
list6.append(self.ID())
elif i == target2:
list1.append(self.ID())
list2.append(self.P0()) # |0><0|
list3.append(self.L1()) # |1><0|
list4.append(self.L0()) # |0><1|
list5.append(self.P1()) # |1><1|
list6.append(self.ID())
else:
list1.append(self.ID())
list2.append(self.ID())
list3.append(self.ID())
list4.append(self.ID())
list5.append(self.ID())
list6.append(self.ID())
return list1, list2, list3, list4, list5, list6
| true
| true
|
f7097ce5e9c1d1df1ec6a26a98baacb0e39f483a
| 2,709
|
py
|
Python
|
plugins/module_utils/zpa_trusted_networks.py
|
willguibr/zpacloud_ansible
|
1d95e004ffccbcef787640ccea625bb051083414
|
[
"Apache-2.0"
] | 1
|
2022-02-23T08:26:45.000Z
|
2022-02-23T08:26:45.000Z
|
plugins/module_utils/zpa_trusted_networks.py
|
willguibr/zpacloud_ansible
|
1d95e004ffccbcef787640ccea625bb051083414
|
[
"Apache-2.0"
] | 2
|
2022-02-23T21:08:09.000Z
|
2022-03-01T16:45:29.000Z
|
plugins/module_utils/zpa_trusted_networks.py
|
willguibr/zpacloud-ansible
|
1d95e004ffccbcef787640ccea625bb051083414
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.willguibr.zpacloud.plugins.module_utils.zpa_client import (
ZPAClientHelper,
delete_none,
)
class TrustedNetworksService:
def __init__(self, module, customer_id):
self.module = module
self.customer_id = customer_id
self.rest = ZPAClientHelper(module)
def getByIDOrName(self, id, name):
network = None
if id is not None:
network = self.getByID(id)
if network is None and name is not None:
network = self.getByName(name)
return network
def getByID(self, id):
response = self.rest.get(
"/mgmtconfig/v1/admin/customers/%s/network/%s" % (self.customer_id, id)
)
status_code = response.status_code
if status_code != 200:
return None
return self.mapRespJSONToApp(response.json)
def getAll(self):
list = self.rest.get_paginated_data(
base_url="/mgmtconfig/v2/admin/customers/%s/network" % (self.customer_id),
data_key_name="list",
)
networks = []
for network in list:
networks.append(self.mapRespJSONToApp(network))
return networks
def getByName(self, name):
networks = self.getAll()
for network in networks:
if network.get("name") == name:
return network
return None
@delete_none
def mapRespJSONToApp(self, resp_json):
if resp_json is None:
return {}
return {
"creation_time": resp_json.get("creationTime"),
"domain": resp_json.get("domain"),
"id": resp_json.get("id"),
"master_customer_id": resp_json.get("masterCustomerId"),
"modified_by": resp_json.get("modifiedBy"),
"modified_time": resp_json.get("modifiedTime"),
"name": resp_json.get("name"),
"network_id": resp_json.get("networkId"),
"zscaler_cloud": resp_json.get("zscalerCloud"),
}
@delete_none
def mapAppToJSON(self, network):
if network is None:
return {}
return {
"creationTime": network.get("creation_time"),
"domain": network.get("domain"),
"id": network.get("id"),
"masterCustomerId": network.get("master_customer_id"),
"modifiedBy": network.get("modified_by"),
"modifiedTime": network.get("modified_time"),
"name": network.get("name"),
"networkId": network.get("network_id"),
"zscalerCloud": network.get("zscaler_cloud"),
}
| 33.036585
| 86
| 0.590993
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.willguibr.zpacloud.plugins.module_utils.zpa_client import (
ZPAClientHelper,
delete_none,
)
class TrustedNetworksService:
def __init__(self, module, customer_id):
self.module = module
self.customer_id = customer_id
self.rest = ZPAClientHelper(module)
def getByIDOrName(self, id, name):
network = None
if id is not None:
network = self.getByID(id)
if network is None and name is not None:
network = self.getByName(name)
return network
def getByID(self, id):
response = self.rest.get(
"/mgmtconfig/v1/admin/customers/%s/network/%s" % (self.customer_id, id)
)
status_code = response.status_code
if status_code != 200:
return None
return self.mapRespJSONToApp(response.json)
def getAll(self):
list = self.rest.get_paginated_data(
base_url="/mgmtconfig/v2/admin/customers/%s/network" % (self.customer_id),
data_key_name="list",
)
networks = []
for network in list:
networks.append(self.mapRespJSONToApp(network))
return networks
def getByName(self, name):
networks = self.getAll()
for network in networks:
if network.get("name") == name:
return network
return None
@delete_none
def mapRespJSONToApp(self, resp_json):
if resp_json is None:
return {}
return {
"creation_time": resp_json.get("creationTime"),
"domain": resp_json.get("domain"),
"id": resp_json.get("id"),
"master_customer_id": resp_json.get("masterCustomerId"),
"modified_by": resp_json.get("modifiedBy"),
"modified_time": resp_json.get("modifiedTime"),
"name": resp_json.get("name"),
"network_id": resp_json.get("networkId"),
"zscaler_cloud": resp_json.get("zscalerCloud"),
}
@delete_none
def mapAppToJSON(self, network):
if network is None:
return {}
return {
"creationTime": network.get("creation_time"),
"domain": network.get("domain"),
"id": network.get("id"),
"masterCustomerId": network.get("master_customer_id"),
"modifiedBy": network.get("modified_by"),
"modifiedTime": network.get("modified_time"),
"name": network.get("name"),
"networkId": network.get("network_id"),
"zscalerCloud": network.get("zscaler_cloud"),
}
| true
| true
|
f7097d15c9ecb42b5428f69937bba55aab9bc051
| 204
|
py
|
Python
|
showdown/admin.py
|
sunshinejen/boardgameshowdown-
|
5e9b9765aba1680464d0a0ccb591019f510ed284
|
[
"Apache-2.0"
] | null | null | null |
showdown/admin.py
|
sunshinejen/boardgameshowdown-
|
5e9b9765aba1680464d0a0ccb591019f510ed284
|
[
"Apache-2.0"
] | null | null | null |
showdown/admin.py
|
sunshinejen/boardgameshowdown-
|
5e9b9765aba1680464d0a0ccb591019f510ed284
|
[
"Apache-2.0"
] | 1
|
2019-10-16T05:57:13.000Z
|
2019-10-16T05:57:13.000Z
|
from django.contrib import admin
from .models import BoardGame, Participant, Event
# Register your models here.
admin.site.register(BoardGame)
admin.site.register(Participant)
admin.site.register(Event)
| 25.5
| 49
| 0.818627
|
from django.contrib import admin
from .models import BoardGame, Participant, Event
admin.site.register(BoardGame)
admin.site.register(Participant)
admin.site.register(Event)
| true
| true
|
f7097e8a7d68efba02074bc095cee9e2b8faf45a
| 6,536
|
py
|
Python
|
chb/simulation/SimSharedMemory.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/simulation/SimSharedMemory.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/simulation/SimSharedMemory.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Represents a shared memory segment created by shmget.
From: https://pubs.opengroup.org/onlinepubs/9699919799/
int shmget(key_t key, size_t size, int shmflg);
The shmget() function shall return the shared memory identifier associated with key.
A share memory identifier, associate data structure, and share memory segment of
at least size bytes are created for key if one of the following is true:
- The argument key is equal to IPC_PRIVATE ( (key_t) 0).
- The argument key does not already have a shared memory identifier associated with
it and (shmflg & IPC_CREAT) is non_zero (#define IPC_CREAT 0001000)
Upon successfull completion, shmget() shall return a non-negative integer, a shared-
memory identifier.
"""
from typing import Dict, List, Optional, TYPE_CHECKING
from chb.simulation.SimMemory import SimMemory
import chb.simulation.SimSymbolicValue as SSV
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
if TYPE_CHECKING:
from chb.simulation.SimulationState import SimulationState
class SimSharedMemory(SimMemory):
def __init__(
self,
simstate: "SimulationState",
shmid: int,
key: str, # hex value
buffersize: int = 4096) -> None:
SimMemory.__init__(self, simstate, True, "shared:" + str(shmid))
self._shmid = shmid
self._buffersize = buffersize
# segments may be mapped in multiple locations
self._baseoffsets: List[int] = []
@property
def simstate(self) -> "SimulationState":
return self._simstate
@property
def shmid(self) -> int:
return self._shmid
@property
def bigendian(self) -> bool:
return self.simstate.bigendian
@property
def has_offset(self) -> bool:
return len(self._baseoffsets) > 0
@property
def baseoffsets(self) -> List[int]:
return self._baseoffsets
def set_baseoffset(self, offset: int) -> None:
self._baseoffsets.append(offset)
@property
def buffersize(self) -> int:
return self._buffersize
def has_address(self, addr: int) -> bool:
for offset in self.baseoffsets:
if addr >= offset and addr < offset + self.buffersize:
return True
else:
return False
def initialize(self, iaddr: str):
addr = SSV.mk_global_address(0, "shared")
for i in range(0, self.buffersize):
SimMemory.set(self, iaddr, addr.add_offset(i), SV.simZerobyte)
def set(self,
iaddr: str,
address: SSV.SimAddress,
srcval: SV.SimValue) -> None:
for base in self.baseoffsets:
if (
address.offsetvalue >= base
and address.offsetvalue < base + self.buffersize):
address = address.add_offset(-base)
SimMemory.set(self, iaddr, address, srcval)
break
else:
raise SU.CHBSimError(
self.simstate, iaddr, "Invalid shared memory address: " + str(address))
def get(self,
iaddr: str,
address: SSV.SimAddress,
size: int) -> SV.SimValue:
try:
for base in self.baseoffsets:
if (
address.offsetvalue >= base
and address.offsetvalue < base + self.buffersize):
address = address.add_offset(-base)
try:
memval = SimMemory.get(self, iaddr, address, size)
except SU.CHBSimError:
memval = SV.mk_simvalue(0, size=size)
return memval
else:
raise SU.CHBSimError(
self.simstate,
iaddr,
"invalid shared memory address: " + str(address))
except SU.CHBSimError as e:
print("Error in shared memory: " + str(e))
name = (self.name
+ '['
+ str(address.offsetvalue)
+ ']'
+ ' (value not retrieved: '
+ str(e)
+ ')')
return SSV.SimSymbol(name)
def __str__(self) -> str:
lines: List[str] = []
if self.has_offset:
try:
for a in range(0, self.buffersize, 4):
if a in self._mem:
address = self.mk_address(a)
try:
charstring = self.char_string("", address, 4)
except UF.CHBError:
charstring = "?"
memval = SimMemory.get(self, "0", address, 4)
lines.append(str(hex(a)).rjust(12)
+ " " + str(a).rjust(12)
+ " " + str(memval)
+ " " + str(charstring))
except Exception:
pass
return "\n".join(lines)
| 36.926554
| 87
| 0.564259
|
from typing import Dict, List, Optional, TYPE_CHECKING
from chb.simulation.SimMemory import SimMemory
import chb.simulation.SimSymbolicValue as SSV
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
if TYPE_CHECKING:
from chb.simulation.SimulationState import SimulationState
class SimSharedMemory(SimMemory):
def __init__(
self,
simstate: "SimulationState",
shmid: int,
key: str, buffersize: int = 4096) -> None:
SimMemory.__init__(self, simstate, True, "shared:" + str(shmid))
self._shmid = shmid
self._buffersize = buffersize
self._baseoffsets: List[int] = []
@property
def simstate(self) -> "SimulationState":
return self._simstate
@property
def shmid(self) -> int:
return self._shmid
@property
def bigendian(self) -> bool:
return self.simstate.bigendian
@property
def has_offset(self) -> bool:
return len(self._baseoffsets) > 0
@property
def baseoffsets(self) -> List[int]:
return self._baseoffsets
def set_baseoffset(self, offset: int) -> None:
self._baseoffsets.append(offset)
@property
def buffersize(self) -> int:
return self._buffersize
def has_address(self, addr: int) -> bool:
for offset in self.baseoffsets:
if addr >= offset and addr < offset + self.buffersize:
return True
else:
return False
def initialize(self, iaddr: str):
addr = SSV.mk_global_address(0, "shared")
for i in range(0, self.buffersize):
SimMemory.set(self, iaddr, addr.add_offset(i), SV.simZerobyte)
def set(self,
iaddr: str,
address: SSV.SimAddress,
srcval: SV.SimValue) -> None:
for base in self.baseoffsets:
if (
address.offsetvalue >= base
and address.offsetvalue < base + self.buffersize):
address = address.add_offset(-base)
SimMemory.set(self, iaddr, address, srcval)
break
else:
raise SU.CHBSimError(
self.simstate, iaddr, "Invalid shared memory address: " + str(address))
def get(self,
iaddr: str,
address: SSV.SimAddress,
size: int) -> SV.SimValue:
try:
for base in self.baseoffsets:
if (
address.offsetvalue >= base
and address.offsetvalue < base + self.buffersize):
address = address.add_offset(-base)
try:
memval = SimMemory.get(self, iaddr, address, size)
except SU.CHBSimError:
memval = SV.mk_simvalue(0, size=size)
return memval
else:
raise SU.CHBSimError(
self.simstate,
iaddr,
"invalid shared memory address: " + str(address))
except SU.CHBSimError as e:
print("Error in shared memory: " + str(e))
name = (self.name
+ '['
+ str(address.offsetvalue)
+ ']'
+ ' (value not retrieved: '
+ str(e)
+ ')')
return SSV.SimSymbol(name)
def __str__(self) -> str:
lines: List[str] = []
if self.has_offset:
try:
for a in range(0, self.buffersize, 4):
if a in self._mem:
address = self.mk_address(a)
try:
charstring = self.char_string("", address, 4)
except UF.CHBError:
charstring = "?"
memval = SimMemory.get(self, "0", address, 4)
lines.append(str(hex(a)).rjust(12)
+ " " + str(a).rjust(12)
+ " " + str(memval)
+ " " + str(charstring))
except Exception:
pass
return "\n".join(lines)
| true
| true
|
f7097ff6a7ec39265a96dc285dc2098161c3638d
| 7,274
|
py
|
Python
|
src/compas/datastructures/mesh/remesh.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/mesh/remesh.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/mesh/remesh.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.datastructures.mesh.smoothing import mesh_smooth_area
from compas.datastructures.mesh.operations import trimesh_collapse_edge
from compas.datastructures.mesh.operations import trimesh_swap_edge
from compas.datastructures.mesh.operations import trimesh_split_edge
__all__ = [
'trimesh_remesh',
]
def trimesh_remesh(mesh,
target,
kmax=100,
tol=0.1,
divergence=0.01,
verbose=False,
allow_boundary_split=False,
allow_boundary_swap=False,
allow_boundary_collapse=False,
smooth=True,
fixed=None,
callback=None,
callback_args=None):
"""Remesh until all edges have a specified target length.
Parameters
----------
mesh : Mesh
A triangle mesh.
target : float
The target length for the mesh edges.
kmax : int, optional [100]
The number of iterations.
tol : float, optional [0.1]
Length deviation tolerance.
divergence : float, optional [0.01]
??
verbose : bool, optional [False]
Print feedback messages.
allow_boundary_split : bool, optional [False]
Allow boundary edges to be split.
allow_boundary_swap : bool, optional [False]
Allow boundary edges or edges connected to the boundary to be swapped.
allow_boundary_collapse : bool, optional [False]
Allow boundary edges or edges connected to the boundary to be collapsed.
smooth : bool, optional [True]
Apply smoothing at every iteration.
fixed : list, optional [None]
A list of vertices that have to stay fixed.
callback : callable, optional [None]
A user-defined function that is called after every iteration.
callback_args : list, optional [None]
A list of additional parameters to be passed to the callback function.
Returns
-------
None
Notes
-----
This algorithm not only changes the geometry of the mesh, but also its
topology as needed to achieve the specified target lengths.
Topological changes are made such that vertex valencies are well-balanced
and close to six. This involves three operations:
* split edges that are longer than a maximum length,
* collapse edges that are shorter than a minimum length,
* swap edges if this improves the valency error.
The minimum and maximum lengths are calculated based on a desired target
length.
For more info, see [1]_.
References
----------
.. [1] Botsch, M. & Kobbelt, L., 2004. *A remeshing approach to multiresolution modeling*.
Proceedings of the 2004 Eurographics/ACM SIGGRAPH symposium on Geometry processing - SGP '04, p.185.
Available at: http://portal.acm.org/citation.cfm?doid=1057432.1057457.
Examples
--------
>>>
"""
if verbose:
print(target)
lmin = (1 - tol) * (4.0 / 5.0) * target
lmax = (1 + tol) * (4.0 / 3.0) * target
edge_lengths = [mesh.edge_length(u, v) for u, v in mesh.edges()]
target_start = max(edge_lengths) / 2.0
fac = target_start / target
boundary = set(mesh.vertices_on_boundary())
fixed = fixed or []
fixed = set(fixed)
count = 0
kmax_start = kmax / 2.0
for k in range(kmax):
if k <= kmax_start:
scale = fac * (1.0 - k / kmax_start)
dlmin = lmin * scale
dlmax = lmax * scale
else:
dlmin = 0
dlmax = 0
if verbose:
print(k)
count += 1
if k % 20 == 0:
num_vertices_1 = mesh.number_of_vertices()
# split
if count == 1:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
if mesh.edge_length(u, v) <= lmax + dlmax:
continue
if verbose:
print('split edge: {0} - {1}'.format(u, v))
trimesh_split_edge(mesh, u, v, allow_boundary=allow_boundary_split)
visited.add(u)
visited.add(v)
# collapse
elif count == 2:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
if mesh.edge_length(u, v) >= lmin - dlmin:
continue
if verbose:
print('collapse edge: {0} - {1}'.format(u, v))
trimesh_collapse_edge(mesh, u, v, allow_boundary=allow_boundary_collapse, fixed=fixed)
visited.add(u)
visited.add(v)
visited.update(mesh.halfedge[u])
# swap
elif count == 3:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
f1 = mesh.halfedge[u][v]
f2 = mesh.halfedge[v][u]
if f1 is None or f2 is None:
continue
face1 = mesh.face[f1]
face2 = mesh.face[f2]
v1 = face1[face1.index(u) - 1]
v2 = face2[face2.index(v) - 1]
valency1 = mesh.vertex_degree(u)
valency2 = mesh.vertex_degree(v)
valency3 = mesh.vertex_degree(v1)
valency4 = mesh.vertex_degree(v2)
if u in boundary:
valency1 += 2
if v in boundary:
valency2 += 2
if v1 in boundary:
valency3 += 2
if v2 in boundary:
valency4 += 2
current_error = abs(valency1 - 6) + abs(valency2 - 6) + abs(valency3 - 6) + abs(valency4 - 6)
flipped_error = abs(valency1 - 7) + abs(valency2 - 7) + abs(valency3 - 5) + abs(valency4 - 5)
if current_error <= flipped_error:
continue
if verbose:
print('swap edge: {0} - {1}'.format(u, v))
trimesh_swap_edge(mesh, u, v, allow_boundary=allow_boundary_swap)
visited.add(u)
visited.add(v)
# count
else:
count = 0
if (k - 10) % 20 == 0:
num_vertices_2 = mesh.number_of_vertices()
if abs(1 - num_vertices_1 / num_vertices_2) < divergence and k > kmax_start:
break
# smoothen
if smooth:
if allow_boundary_split:
boundary = set(mesh.vertices_on_boundary())
mesh_smooth_area(mesh, fixed=fixed.union(boundary), kmax=1)
# callback
if callback:
callback(mesh, k, callback_args)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 30.057851
| 111
| 0.532444
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.datastructures.mesh.smoothing import mesh_smooth_area
from compas.datastructures.mesh.operations import trimesh_collapse_edge
from compas.datastructures.mesh.operations import trimesh_swap_edge
from compas.datastructures.mesh.operations import trimesh_split_edge
__all__ = [
'trimesh_remesh',
]
def trimesh_remesh(mesh,
target,
kmax=100,
tol=0.1,
divergence=0.01,
verbose=False,
allow_boundary_split=False,
allow_boundary_swap=False,
allow_boundary_collapse=False,
smooth=True,
fixed=None,
callback=None,
callback_args=None):
if verbose:
print(target)
lmin = (1 - tol) * (4.0 / 5.0) * target
lmax = (1 + tol) * (4.0 / 3.0) * target
edge_lengths = [mesh.edge_length(u, v) for u, v in mesh.edges()]
target_start = max(edge_lengths) / 2.0
fac = target_start / target
boundary = set(mesh.vertices_on_boundary())
fixed = fixed or []
fixed = set(fixed)
count = 0
kmax_start = kmax / 2.0
for k in range(kmax):
if k <= kmax_start:
scale = fac * (1.0 - k / kmax_start)
dlmin = lmin * scale
dlmax = lmax * scale
else:
dlmin = 0
dlmax = 0
if verbose:
print(k)
count += 1
if k % 20 == 0:
num_vertices_1 = mesh.number_of_vertices()
if count == 1:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
if mesh.edge_length(u, v) <= lmax + dlmax:
continue
if verbose:
print('split edge: {0} - {1}'.format(u, v))
trimesh_split_edge(mesh, u, v, allow_boundary=allow_boundary_split)
visited.add(u)
visited.add(v)
elif count == 2:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
if mesh.edge_length(u, v) >= lmin - dlmin:
continue
if verbose:
print('collapse edge: {0} - {1}'.format(u, v))
trimesh_collapse_edge(mesh, u, v, allow_boundary=allow_boundary_collapse, fixed=fixed)
visited.add(u)
visited.add(v)
visited.update(mesh.halfedge[u])
elif count == 3:
visited = set()
for u, v in list(mesh.edges()):
if u in visited or v in visited:
continue
f1 = mesh.halfedge[u][v]
f2 = mesh.halfedge[v][u]
if f1 is None or f2 is None:
continue
face1 = mesh.face[f1]
face2 = mesh.face[f2]
v1 = face1[face1.index(u) - 1]
v2 = face2[face2.index(v) - 1]
valency1 = mesh.vertex_degree(u)
valency2 = mesh.vertex_degree(v)
valency3 = mesh.vertex_degree(v1)
valency4 = mesh.vertex_degree(v2)
if u in boundary:
valency1 += 2
if v in boundary:
valency2 += 2
if v1 in boundary:
valency3 += 2
if v2 in boundary:
valency4 += 2
current_error = abs(valency1 - 6) + abs(valency2 - 6) + abs(valency3 - 6) + abs(valency4 - 6)
flipped_error = abs(valency1 - 7) + abs(valency2 - 7) + abs(valency3 - 5) + abs(valency4 - 5)
if current_error <= flipped_error:
continue
if verbose:
print('swap edge: {0} - {1}'.format(u, v))
trimesh_swap_edge(mesh, u, v, allow_boundary=allow_boundary_swap)
visited.add(u)
visited.add(v)
else:
count = 0
if (k - 10) % 20 == 0:
num_vertices_2 = mesh.number_of_vertices()
if abs(1 - num_vertices_1 / num_vertices_2) < divergence and k > kmax_start:
break
if smooth:
if allow_boundary_split:
boundary = set(mesh.vertices_on_boundary())
mesh_smooth_area(mesh, fixed=fixed.union(boundary), kmax=1)
if callback:
callback(mesh, k, callback_args)
if __name__ == "__main__":
pass
| true
| true
|
f7098021d6e538e37a267ecd884e0a951ed9d92e
| 7,124
|
py
|
Python
|
tests/samsung_multiroom/service/tunein/test_player_tuneinplayer.py
|
krygal/samsung_multiroom
|
49172c4e54d092bfc6dab07245b526c442eb18bc
|
[
"MIT"
] | 6
|
2019-04-05T19:10:39.000Z
|
2021-11-23T17:26:49.000Z
|
tests/samsung_multiroom/service/tunein/test_player_tuneinplayer.py
|
krygal/samsung_multiroom
|
49172c4e54d092bfc6dab07245b526c442eb18bc
|
[
"MIT"
] | 3
|
2020-09-25T06:58:00.000Z
|
2021-12-13T19:57:50.000Z
|
tests/samsung_multiroom/service/tunein/test_player_tuneinplayer.py
|
krygal/samsung_multiroom
|
49172c4e54d092bfc6dab07245b526c442eb18bc
|
[
"MIT"
] | 4
|
2019-04-05T18:58:11.000Z
|
2021-07-22T19:54:56.000Z
|
import unittest
from unittest.mock import MagicMock
from samsung_multiroom.service import REPEAT_ALL
from samsung_multiroom.service import REPEAT_OFF
from samsung_multiroom.service.tunein import TuneInPlayer
def _get_player():
api = MagicMock()
api.get_preset_list.return_value = [
{
'kind': 'speaker',
'title': 'Radio 1',
'description': 'Radio 1 description',
'thumbnail': 'http://radio1.org/thumbnail.png',
'contentid': '0',
'mediaid': '1111',
},
{
'kind': 'speaker',
'title': 'Radio 2',
'description': 'Radio 2 description',
'thumbnail': 'http://radio2.org/thumbnail.png',
'contentid': '1',
'mediaid': '2222',
},
{
'kind': 'speaker',
'title': 'Radio 3',
'description': 'Radio 3 description',
'thumbnail': 'http://radio3.org/thumbnail.png',
'contentid': '2',
'mediaid': '3333',
},
{
'kind': 'my',
'title': 'Radio 4',
'description': 'Radio 4 description',
'thumbnail': 'http://radio4.org/thumbnail.png',
'contentid': '3',
'mediaid': '4444',
},
{
'kind': 'my',
'title': 'Radio 5',
'description': 'Radio 5 description',
'thumbnail': 'http://radio5.org/thumbnail.png',
'contentid': '4',
'mediaid': '5555',
},
]
api.get_radio_info.return_value = {
'cpname': 'TuneIn',
'root': 'Favorites',
'presetindex': '0',
'title': 'Radio 1',
'description': 'Radio 1 description',
'thumbnail': 'http://radio1.org/thumbnail.png',
'mediaid': '1111',
'allowfeedback': '0',
'timestamp': '2018-12-28T18:07:07Z',
'no_queue': '1',
'playstatus': 'play',
}
player = TuneInPlayer(api)
return (player, api)
class TestTuneInPlayer(unittest.TestCase):
def test_is_supported(self):
player, api = _get_player()
self.assertTrue(player.is_play_supported())
self.assertFalse(player.is_jump_supported())
self.assertTrue(player.is_resume_supported())
self.assertFalse(player.is_stop_supported())
self.assertTrue(player.is_pause_supported())
self.assertTrue(player.is_next_supported())
self.assertTrue(player.is_previous_supported())
self.assertFalse(player.is_repeat_supported())
self.assertFalse(player.is_shuffle_supported())
def test_play(self):
playlist = [
type('Item', (object, ), {
'object_id': '1',
'object_type': 'some_type',
'title': 'title 1',
}),
type('Item', (object, ), {
'object_id': '2',
'object_type': 'tunein_radio',
'title': 'radio 2',
}),
type('Item', (object, ), {
'object_id': '3',
'object_type': 'tunein_radio',
'title': 'radio 3',
}),
type('Item', (object, ), {
'object_id': '4',
'object_type': 'some_type2',
'title': 'title 4',
})
]
player, api = _get_player()
player.play(playlist)
api.set_play_select.assert_called_once_with('2')
def test_play_returns_false_for_unsupported_playlist(self):
playlist = [
type('Item', (object, ), {
'object_id': '1',
'object_type': 'some_type',
'title': 'title 1',
}),
type('Item', (object, ), {
'object_id': '4',
'object_type': 'some_type2',
'title': 'title 4',
})
]
player, api = _get_player()
self.assertFalse(player.play(playlist))
api.set_play_select.assert_not_called()
def test_jump(self):
player, api = _get_player()
player.jump(50)
api.set_search_time.assert_not_called()
def test_resume(self):
player, api = _get_player()
player.resume()
api.set_select_radio.assert_called_once()
@unittest.skip('Pending implementation')
def test_stop(self):
player, api = _get_player()
player.stop()
def test_pause(self):
player, api = _get_player()
player.pause()
api.set_playback_control.assert_called_once_with('pause')
@unittest.mock.patch('samsung_multiroom.api.api._get_callable_parameters')
def test_next(self, signature):
signature.return_value = ['start_index', 'list_count']
player, api = _get_player()
player.next()
api.get_preset_list.assert_called_once_with(start_index=0, list_count=30)
api.get_radio_info.assert_called_once()
api.set_play_preset.assert_called_once_with(1, 1)
api.set_select_radio.assert_called_once()
@unittest.mock.patch('samsung_multiroom.api.api._get_callable_parameters')
def test_previous(self, signature):
signature.return_value = ['start_index', 'list_count']
player, api = _get_player()
player.previous()
api.get_preset_list.assert_called_once_with(start_index=0, list_count=30)
api.get_radio_info.assert_called_once()
api.set_play_preset.assert_called_once_with(0, 4)
api.set_select_radio.assert_called_once()
def test_repeat(self):
player, api = _get_player()
player.repeat(REPEAT_ALL)
api.set_repeat_mode.assert_not_called()
def test_shuffle(self):
player, api = _get_player()
player.shuffle(True)
api.set_shuffle_mode.assert_not_called()
def test_get_repeat(self):
player, api = _get_player()
repeat = player.get_repeat()
self.assertEqual(repeat, REPEAT_OFF)
api.get_repeat_mode.assert_not_called()
def test_get_shuffle(self):
player, api = _get_player()
shuffle = player.get_shuffle()
self.assertFalse(shuffle)
api.get_repeat_mode.assert_not_called()
def test_get_current_track(self):
player, api = _get_player()
track = player.get_current_track()
api.get_radio_info.assert_called_once()
self.assertEqual(track.title, 'Radio 1 description')
self.assertEqual(track.artist, 'Radio 1')
self.assertEqual(track.album, None)
self.assertEqual(track.duration, None)
self.assertEqual(track.position, None)
self.assertEqual(track.thumbnail_url, 'http://radio1.org/thumbnail.png')
self.assertEqual(track.object_id, None)
self.assertEqual(track.object_type, 'tunein_radio')
def test_is_active(self):
player, api = _get_player()
self.assertTrue(player.is_active('wifi', 'cp'))
self.assertFalse(player.is_active('wifi', 'dlna'))
self.assertFalse(player.is_active('bt'))
| 29.196721
| 81
| 0.570606
|
import unittest
from unittest.mock import MagicMock
from samsung_multiroom.service import REPEAT_ALL
from samsung_multiroom.service import REPEAT_OFF
from samsung_multiroom.service.tunein import TuneInPlayer
def _get_player():
api = MagicMock()
api.get_preset_list.return_value = [
{
'kind': 'speaker',
'title': 'Radio 1',
'description': 'Radio 1 description',
'thumbnail': 'http://radio1.org/thumbnail.png',
'contentid': '0',
'mediaid': '1111',
},
{
'kind': 'speaker',
'title': 'Radio 2',
'description': 'Radio 2 description',
'thumbnail': 'http://radio2.org/thumbnail.png',
'contentid': '1',
'mediaid': '2222',
},
{
'kind': 'speaker',
'title': 'Radio 3',
'description': 'Radio 3 description',
'thumbnail': 'http://radio3.org/thumbnail.png',
'contentid': '2',
'mediaid': '3333',
},
{
'kind': 'my',
'title': 'Radio 4',
'description': 'Radio 4 description',
'thumbnail': 'http://radio4.org/thumbnail.png',
'contentid': '3',
'mediaid': '4444',
},
{
'kind': 'my',
'title': 'Radio 5',
'description': 'Radio 5 description',
'thumbnail': 'http://radio5.org/thumbnail.png',
'contentid': '4',
'mediaid': '5555',
},
]
api.get_radio_info.return_value = {
'cpname': 'TuneIn',
'root': 'Favorites',
'presetindex': '0',
'title': 'Radio 1',
'description': 'Radio 1 description',
'thumbnail': 'http://radio1.org/thumbnail.png',
'mediaid': '1111',
'allowfeedback': '0',
'timestamp': '2018-12-28T18:07:07Z',
'no_queue': '1',
'playstatus': 'play',
}
player = TuneInPlayer(api)
return (player, api)
class TestTuneInPlayer(unittest.TestCase):
def test_is_supported(self):
player, api = _get_player()
self.assertTrue(player.is_play_supported())
self.assertFalse(player.is_jump_supported())
self.assertTrue(player.is_resume_supported())
self.assertFalse(player.is_stop_supported())
self.assertTrue(player.is_pause_supported())
self.assertTrue(player.is_next_supported())
self.assertTrue(player.is_previous_supported())
self.assertFalse(player.is_repeat_supported())
self.assertFalse(player.is_shuffle_supported())
def test_play(self):
playlist = [
type('Item', (object, ), {
'object_id': '1',
'object_type': 'some_type',
'title': 'title 1',
}),
type('Item', (object, ), {
'object_id': '2',
'object_type': 'tunein_radio',
'title': 'radio 2',
}),
type('Item', (object, ), {
'object_id': '3',
'object_type': 'tunein_radio',
'title': 'radio 3',
}),
type('Item', (object, ), {
'object_id': '4',
'object_type': 'some_type2',
'title': 'title 4',
})
]
player, api = _get_player()
player.play(playlist)
api.set_play_select.assert_called_once_with('2')
def test_play_returns_false_for_unsupported_playlist(self):
playlist = [
type('Item', (object, ), {
'object_id': '1',
'object_type': 'some_type',
'title': 'title 1',
}),
type('Item', (object, ), {
'object_id': '4',
'object_type': 'some_type2',
'title': 'title 4',
})
]
player, api = _get_player()
self.assertFalse(player.play(playlist))
api.set_play_select.assert_not_called()
def test_jump(self):
player, api = _get_player()
player.jump(50)
api.set_search_time.assert_not_called()
def test_resume(self):
player, api = _get_player()
player.resume()
api.set_select_radio.assert_called_once()
@unittest.skip('Pending implementation')
def test_stop(self):
player, api = _get_player()
player.stop()
def test_pause(self):
player, api = _get_player()
player.pause()
api.set_playback_control.assert_called_once_with('pause')
@unittest.mock.patch('samsung_multiroom.api.api._get_callable_parameters')
def test_next(self, signature):
signature.return_value = ['start_index', 'list_count']
player, api = _get_player()
player.next()
api.get_preset_list.assert_called_once_with(start_index=0, list_count=30)
api.get_radio_info.assert_called_once()
api.set_play_preset.assert_called_once_with(1, 1)
api.set_select_radio.assert_called_once()
@unittest.mock.patch('samsung_multiroom.api.api._get_callable_parameters')
def test_previous(self, signature):
signature.return_value = ['start_index', 'list_count']
player, api = _get_player()
player.previous()
api.get_preset_list.assert_called_once_with(start_index=0, list_count=30)
api.get_radio_info.assert_called_once()
api.set_play_preset.assert_called_once_with(0, 4)
api.set_select_radio.assert_called_once()
def test_repeat(self):
player, api = _get_player()
player.repeat(REPEAT_ALL)
api.set_repeat_mode.assert_not_called()
def test_shuffle(self):
player, api = _get_player()
player.shuffle(True)
api.set_shuffle_mode.assert_not_called()
def test_get_repeat(self):
player, api = _get_player()
repeat = player.get_repeat()
self.assertEqual(repeat, REPEAT_OFF)
api.get_repeat_mode.assert_not_called()
def test_get_shuffle(self):
player, api = _get_player()
shuffle = player.get_shuffle()
self.assertFalse(shuffle)
api.get_repeat_mode.assert_not_called()
def test_get_current_track(self):
player, api = _get_player()
track = player.get_current_track()
api.get_radio_info.assert_called_once()
self.assertEqual(track.title, 'Radio 1 description')
self.assertEqual(track.artist, 'Radio 1')
self.assertEqual(track.album, None)
self.assertEqual(track.duration, None)
self.assertEqual(track.position, None)
self.assertEqual(track.thumbnail_url, 'http://radio1.org/thumbnail.png')
self.assertEqual(track.object_id, None)
self.assertEqual(track.object_type, 'tunein_radio')
def test_is_active(self):
player, api = _get_player()
self.assertTrue(player.is_active('wifi', 'cp'))
self.assertFalse(player.is_active('wifi', 'dlna'))
self.assertFalse(player.is_active('bt'))
| true
| true
|
f709807e8a6d21ec7d63e906aec6a61cbd27f4f0
| 7,733
|
py
|
Python
|
src/egypt_model_test.py
|
DylanFouche/EGYPT
|
baa3d7cd1dc657a89a4e072b4d175e8255114961
|
[
"MIT"
] | 1
|
2021-03-14T22:44:09.000Z
|
2021-03-14T22:44:09.000Z
|
src/egypt_model_test.py
|
DylanFouche/EGYPT
|
baa3d7cd1dc657a89a4e072b4d175e8255114961
|
[
"MIT"
] | null | null | null |
src/egypt_model_test.py
|
DylanFouche/EGYPT
|
baa3d7cd1dc657a89a4e072b4d175e8255114961
|
[
"MIT"
] | null | null | null |
import egypt_model
import unittest
class TestAggregateMethods(unittest.TestCase):
def test_aggregates(self):
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 9 * 5 * 5)
self.assertEqual(egypt_model.compute_total_wealth(model), 9 * 5 * 1000)
self.assertEqual(egypt_model.compute_mean_population(model), 5 * 5)
self.assertEqual(egypt_model.compute_mean_wealth(model), 5 * 1000)
model = egypt_model.EgyptModel(31, 30, starting_settlements=0, starting_households=5, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 0)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 0)
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=0, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 0)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 0)
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=0, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 9 * 5 * 1000)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 5 * 1000)
class TestSettlementMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.settlement = self.model.settlements[0]
def test_settlement_workers(self):
self.assertEqual(self.settlement.workers(), 5*5)
for household in self.settlement.households:
household.workers += 1
self.assertEqual(self.settlement.workers(), 5*6)
self.settlement.households.remove(self.settlement.households[0])
self.assertEqual(self.settlement.workers(), 4*6)
def test_settlement_grain(self):
self.assertEqual(self.settlement.grain(), 5*1000)
for household in self.settlement.households:
household.grain += 1
self.assertEqual(self.settlement.grain(), 5*1001)
self.settlement.households.remove(self.settlement.households[0])
self.assertEqual(self.settlement.grain(), 4*1001)
class TestHouseholdMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.household = self.model.households[0]
self.assertEqual(self.household.grain, 1000)
def test_workers(self):
self.assertEqual(self.household.workers, 5)
self.assertEqual(self.household.workers_worked, 0)
def test_storage_loss(self):
grain = 1000
self.household.grain = grain
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
def test_consume_grain(self):
workers = 5
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION + 1
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, grain - workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION)
self.assertEqual(self.household.workers, workers)
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, 0)
self.assertEqual(self.household.workers, workers - 1)
workers = 5
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION - 1
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, 0)
self.assertEqual(self.household.workers, workers - 1)
def test_competency_increase(self):
self.household.competency = 0.5
self.model.annual_competency_increase = 5
self.assertEqual(self.household.competency, 0.5)
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.525)
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.55125)
self.model.annual_competency_increase = 0
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.55125)
def test_generation_changeover(self):
self.model.min_ambition = 0.2
self.model.min_competency = 0.5
self.household.generation_changeover_countdown = 3
self.household.competency = 0.8
self.household.ambition = 0.4
self.household.generation_changeover()
self.assertEqual(self.household.competency, 0.8)
self.assertEqual(self.household.ambition, 0.4)
self.household.generation_changeover()
self.assertEqual(self.household.competency, 0.8)
self.assertEqual(self.household.ambition, 0.4)
self.household.generation_changeover()
self.assertNotEqual(self.household.competency, 0.8)
self.assertNotEqual(self.household.competency, 0.4)
self.assertTrue(self.household.competency >= 0.5 and self.household.competency <= 1)
self.assertTrue(self.household.ambition >= 0.2and self.household.ambition <= 1)
class TestFieldMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.model.fallow_limit = 10
self.household = self.model.households[0]
self.field = egypt_model.FieldAgent(1, self.model, self.household)
self.household.fields.append(self.field)
self.model.fields.append(self.field)
self.model.grid.position_agent(self.field, 0,0)
self.assertEqual(self.field.unique_id, 1)
self.assertEqual(self.field.years_fallowed,0)
self.assertFalse(self.field.harvested)
def test_changeover(self):
for i in range(10):
self.field.harvested = True
self.field.changeover()
self.assertEqual(self.field.years_fallowed, 0)
self.assertEqual(self.household, self.field.household)
for i in range(9):
self.field.changeover()
self.assertEqual(self.field.years_fallowed, 9)
self.assertEqual(self.household, self.field.household)
self.field.changeover()
self.assertEqual(self.field.years_fallowed, self.model.fallow_limit)
self.assertTrue(self.field not in self.household.fields)
self.assertTrue(self.field not in self.model.fields)
if __name__ == '__main__':
unittest.main()
| 43.201117
| 146
| 0.709298
|
import egypt_model
import unittest
class TestAggregateMethods(unittest.TestCase):
def test_aggregates(self):
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 9 * 5 * 5)
self.assertEqual(egypt_model.compute_total_wealth(model), 9 * 5 * 1000)
self.assertEqual(egypt_model.compute_mean_population(model), 5 * 5)
self.assertEqual(egypt_model.compute_mean_wealth(model), 5 * 1000)
model = egypt_model.EgyptModel(31, 30, starting_settlements=0, starting_households=5, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 0)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 0)
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=0, starting_household_size=5, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 0)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 0)
model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=0, starting_grain=1000)
self.assertEqual(egypt_model.compute_total_population(model), 0)
self.assertEqual(egypt_model.compute_total_wealth(model), 9 * 5 * 1000)
self.assertEqual(egypt_model.compute_mean_population(model), 0)
self.assertEqual(egypt_model.compute_mean_wealth(model), 5 * 1000)
class TestSettlementMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.settlement = self.model.settlements[0]
def test_settlement_workers(self):
self.assertEqual(self.settlement.workers(), 5*5)
for household in self.settlement.households:
household.workers += 1
self.assertEqual(self.settlement.workers(), 5*6)
self.settlement.households.remove(self.settlement.households[0])
self.assertEqual(self.settlement.workers(), 4*6)
def test_settlement_grain(self):
self.assertEqual(self.settlement.grain(), 5*1000)
for household in self.settlement.households:
household.grain += 1
self.assertEqual(self.settlement.grain(), 5*1001)
self.settlement.households.remove(self.settlement.households[0])
self.assertEqual(self.settlement.grain(), 4*1001)
class TestHouseholdMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.household = self.model.households[0]
self.assertEqual(self.household.grain, 1000)
def test_workers(self):
self.assertEqual(self.household.workers, 5)
self.assertEqual(self.household.workers_worked, 0)
def test_storage_loss(self):
grain = 1000
self.household.grain = grain
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
self.household.storage_loss()
grain -= grain * 0.1
self.assertEqual(self.household.grain, grain)
def test_consume_grain(self):
workers = 5
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION + 1
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, grain - workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION)
self.assertEqual(self.household.workers, workers)
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, 0)
self.assertEqual(self.household.workers, workers - 1)
workers = 5
grain = workers * egypt_model.ANNUAL_PER_PERSON_GRAIN_CONSUMPTION - 1
self.household.grain = grain
self.household.workers = workers
self.household.consume_grain()
self.assertEqual(self.household.grain, 0)
self.assertEqual(self.household.workers, workers - 1)
def test_competency_increase(self):
self.household.competency = 0.5
self.model.annual_competency_increase = 5
self.assertEqual(self.household.competency, 0.5)
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.525)
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.55125)
self.model.annual_competency_increase = 0
self.household.competency_increase()
self.assertEqual(self.household.competency, 0.55125)
def test_generation_changeover(self):
self.model.min_ambition = 0.2
self.model.min_competency = 0.5
self.household.generation_changeover_countdown = 3
self.household.competency = 0.8
self.household.ambition = 0.4
self.household.generation_changeover()
self.assertEqual(self.household.competency, 0.8)
self.assertEqual(self.household.ambition, 0.4)
self.household.generation_changeover()
self.assertEqual(self.household.competency, 0.8)
self.assertEqual(self.household.ambition, 0.4)
self.household.generation_changeover()
self.assertNotEqual(self.household.competency, 0.8)
self.assertNotEqual(self.household.competency, 0.4)
self.assertTrue(self.household.competency >= 0.5 and self.household.competency <= 1)
self.assertTrue(self.household.ambition >= 0.2and self.household.ambition <= 1)
class TestFieldMethods(unittest.TestCase):
def setUp(self):
self.model = egypt_model.EgyptModel(31, 30, starting_settlements=9, starting_households=5, starting_household_size=5, starting_grain=1000)
self.model.fallow_limit = 10
self.household = self.model.households[0]
self.field = egypt_model.FieldAgent(1, self.model, self.household)
self.household.fields.append(self.field)
self.model.fields.append(self.field)
self.model.grid.position_agent(self.field, 0,0)
self.assertEqual(self.field.unique_id, 1)
self.assertEqual(self.field.years_fallowed,0)
self.assertFalse(self.field.harvested)
def test_changeover(self):
for i in range(10):
self.field.harvested = True
self.field.changeover()
self.assertEqual(self.field.years_fallowed, 0)
self.assertEqual(self.household, self.field.household)
for i in range(9):
self.field.changeover()
self.assertEqual(self.field.years_fallowed, 9)
self.assertEqual(self.household, self.field.household)
self.field.changeover()
self.assertEqual(self.field.years_fallowed, self.model.fallow_limit)
self.assertTrue(self.field not in self.household.fields)
self.assertTrue(self.field not in self.model.fields)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f709816f89e0bcceb3dc86f45da921b110797cce
| 403
|
py
|
Python
|
lantz/drivers/sutter/__init__.py
|
noelniles/lantz
|
08b5d23674566e200e24d8baf13e454a0917cb58
|
[
"BSD-3-Clause"
] | 88
|
2015-02-10T09:49:53.000Z
|
2021-11-21T20:17:44.000Z
|
lantz/drivers/sutter/__init__.py
|
varses/awsch
|
6fbff4dd293d4110d002d477d2642a7e75cc9ace
|
[
"BSD-3-Clause"
] | 34
|
2015-02-08T05:35:21.000Z
|
2021-01-04T17:45:00.000Z
|
lantz/drivers/sutter/__init__.py
|
varses/awsch
|
6fbff4dd293d4110d002d477d2642a7e75cc9ace
|
[
"BSD-3-Clause"
] | 38
|
2015-02-11T07:25:52.000Z
|
2021-11-05T02:41:53.000Z
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.sutter
~~~~~~~~~~~~~~~~~~~~
:company: Sutter Instrument.
:description: Biomedical and scientific instrumentation.
:website: http://www.sutter.com/
---
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .lambda103 import Lambda103
__all__ = ['Lambda103', ]
| 21.210526
| 68
| 0.62531
|
from .lambda103 import Lambda103
__all__ = ['Lambda103', ]
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.