code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#Async code
#Async runs in the same thread
'''
Async uses CoRoutines which run on the same thread
"async" and "await"
'''
import logging
import multiprocessing
import threading
import time
import asyncio
import random
logging.basicConfig(format='%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
# Functions
def display(msg):
threadname = threading.current_thread().name
procname = multiprocessing.current_process().name
logging.info(f'{procname}/{threadname} : {msg}')
async def work(name):
display(name + ' starting')
#do something
await asyncio.sleep(random.randint(1,10))
display(name + ' finished')
async def run_async(max):
tasks = []
for x in range(max):
name = 'Item ' + str(x)
tasks.append(asyncio.ensure_future(work(name)))
await asyncio.gather(*tasks)
def main():
display('Main started')
loop = asyncio.get_event_loop()
loop.run_until_complete(run_async(50))
# loop.run_forever()
loop.close()
display('Main Finished')
if __name__ == '__main__':
main()
|
[
"asyncio.gather",
"multiprocessing.current_process",
"asyncio.get_event_loop",
"logging.basicConfig",
"random.randint",
"logging.info",
"threading.current_thread"
] |
[((232, 366), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s.%(msecs)03d - %(levelname)s - %(message)s', datefmt=\n '%H:%M:%S', level=logging.DEBUG)\n", (251, 366), False, 'import logging\n'), ((503, 551), 'logging.info', 'logging.info', (['f"""{procname}/{threadname} : {msg}"""'], {}), "(f'{procname}/{threadname} : {msg}')\n", (515, 551), False, 'import logging\n'), ((964, 988), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (986, 988), False, 'import asyncio\n'), ((409, 435), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (433, 435), False, 'import threading\n'), ((457, 490), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (488, 490), False, 'import multiprocessing\n'), ((883, 905), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (897, 905), False, 'import asyncio\n'), ((654, 675), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (668, 675), False, 'import random\n')]
|
# # simple script to batch convert collada to obj.
# # run as:
# # blender --background --python data_scripts/tpose_dae_to_obj.py
# # or:
# # ./run_data_preprocess.sh
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import shutil
import bpy
import mathutils
import math
import numpy as np
from data_scripts import config_data as cfg
def rotate_around_center(mat_rot, center):
return (mathutils.Matrix.Translation( center) *
mat_rot *
mathutils.Matrix.Translation(-center))
if __name__ == "__main__":
OVERWRITE = False
print()
identities = cfg.identities + cfg.identities_augmented
# identities = ["mannequin"]
# for i, character in enumerate(cfg.identities_consistent):
for i, character in enumerate(identities):
is_dae = True
dae_filename = f"{cfg.root_in}/data/mixamo/{character}/raw_dae/a_t_pose_000001.dae"
if not os.path.isfile(dae_filename):
# Maybe it's an .fbx
dae_filename = f"{cfg.root_in}/data/mixamo/{character}/raw_dae/a_t_pose_000001.fbx"
if not os.path.isfile(dae_filename):
print()
print()
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("Could not find", character)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print()
print()
continue
is_dae = False
# Create output folder
obj_dir = f"{cfg.root_in}/data/mixamo/{character}/obj/a_t_pose"
obj_filename = f"{obj_dir}/a_t_pose_000001.obj"
if not os.path.isdir(obj_dir):
os.makedirs(obj_dir)
# Skip if it already exists
if not OVERWRITE and os.path.isfile(obj_filename):
print()
print()
print("##############################################################################")
print("##############################################################################")
print("Skipping", character)
print("##############################################################################")
print("##############################################################################")
print()
print()
continue
print()
print()
print("##############################################################################")
print("##############################################################################")
print(character)
print("##############################################################################")
print("##############################################################################")
print()
print()
# Initialize blender (delete everything that is currently in the scene)
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
# Import our sequence
if is_dae:
bpy.ops.wm.collada_import(filepath=dae_filename)
else:
bpy.ops.import_scene.fbx(filepath=dae_filename)
########################################################################
C = bpy.context
# Deselect all objects
bpy.ops.object.select_all(action='DESELECT')
# Select the armature
C.scene.objects['Armature'].select_set(True)
# Also activate it
arm = C.window.scene.objects["Armature"]
C.view_layer.objects.active = arm
# Pose mode
bpy.ops.object.mode_set(mode='POSE')
for pbone in arm.pose.bones:
if "RightUpLeg" in pbone.name:
angle = 0.436332
elif "LeftUpLeg" in pbone.name:
angle = -0.436332
else:
continue
print(pbone.name, angle)
# pbone = arm.pose.bones["mixamorig_LeftUpLeg"]
pbone.bone.select = True
bpy.ops.transform.rotate(value=angle, orient_axis='Y', orient_type='GLOBAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='GLOBAL', constraint_axis=(False, True, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
pbone.bone.select = False
# Apply the rotation
for o in C.scene.objects:
o.select_set(True)
bpy.ops.object.convert(target='MESH')
# ########################################################################
# Export scene
bpy.ops.export_scene.obj(filepath=obj_filename, use_animation=False, use_materials=False)
|
[
"os.makedirs",
"mathutils.Matrix.Translation",
"os.path.isdir",
"os.path.dirname",
"bpy.ops.object.delete",
"os.path.isfile",
"bpy.ops.import_scene.fbx",
"bpy.ops.object.mode_set",
"bpy.ops.object.convert",
"bpy.ops.export_scene.obj",
"bpy.ops.transform.rotate",
"bpy.ops.wm.collada_import",
"bpy.ops.object.select_all"
] |
[((514, 551), 'mathutils.Matrix.Translation', 'mathutils.Matrix.Translation', (['(-center)'], {}), '(-center)\n', (542, 551), False, 'import mathutils\n'), ((3148, 3190), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (3173, 3190), False, 'import bpy\n'), ((3199, 3222), 'bpy.ops.object.delete', 'bpy.ops.object.delete', ([], {}), '()\n', (3220, 3222), False, 'import bpy\n'), ((3554, 3598), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (3579, 3598), False, 'import bpy\n'), ((3830, 3866), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""POSE"""'}), "(mode='POSE')\n", (3853, 3866), False, 'import bpy\n'), ((4764, 4801), 'bpy.ops.object.convert', 'bpy.ops.object.convert', ([], {'target': '"""MESH"""'}), "(target='MESH')\n", (4786, 4801), False, 'import bpy\n'), ((4918, 5011), 'bpy.ops.export_scene.obj', 'bpy.ops.export_scene.obj', ([], {'filepath': 'obj_filename', 'use_animation': '(False)', 'use_materials': '(False)'}), '(filepath=obj_filename, use_animation=False,\n use_materials=False)\n', (4942, 5011), False, 'import bpy\n'), ((228, 253), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (243, 253), False, 'import os, sys\n'), ((438, 474), 'mathutils.Matrix.Translation', 'mathutils.Matrix.Translation', (['center'], {}), '(center)\n', (466, 474), False, 'import mathutils\n'), ((958, 986), 'os.path.isfile', 'os.path.isfile', (['dae_filename'], {}), '(dae_filename)\n', (972, 986), False, 'import os, sys\n'), ((1878, 1900), 'os.path.isdir', 'os.path.isdir', (['obj_dir'], {}), '(obj_dir)\n', (1891, 1900), False, 'import os, sys\n'), ((1914, 1934), 'os.makedirs', 'os.makedirs', (['obj_dir'], {}), '(obj_dir)\n', (1925, 1934), False, 'import os, sys\n'), ((2013, 2041), 'os.path.isfile', 'os.path.isfile', (['obj_filename'], {}), '(obj_filename)\n', (2027, 2041), False, 'import os, sys\n'), ((3285, 3333), 'bpy.ops.wm.collada_import', 'bpy.ops.wm.collada_import', ([], {'filepath': 'dae_filename'}), '(filepath=dae_filename)\n', (3310, 3333), False, 'import bpy\n'), ((3360, 3407), 'bpy.ops.import_scene.fbx', 'bpy.ops.import_scene.fbx', ([], {'filepath': 'dae_filename'}), '(filepath=dae_filename)\n', (3384, 3407), False, 'import bpy\n'), ((4251, 4631), 'bpy.ops.transform.rotate', 'bpy.ops.transform.rotate', ([], {'value': 'angle', 'orient_axis': '"""Y"""', 'orient_type': '"""GLOBAL"""', 'orient_matrix': '((1, 0, 0), (0, 1, 0), (0, 0, 1))', 'orient_matrix_type': '"""GLOBAL"""', 'constraint_axis': '(False, True, False)', 'mirror': '(True)', 'use_proportional_edit': '(False)', 'proportional_edit_falloff': '"""SMOOTH"""', 'proportional_size': '(1)', 'use_proportional_connected': '(False)', 'use_proportional_projected': '(False)'}), "(value=angle, orient_axis='Y', orient_type='GLOBAL',\n orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type=\n 'GLOBAL', constraint_axis=(False, True, False), mirror=True,\n use_proportional_edit=False, proportional_edit_falloff='SMOOTH',\n proportional_size=1, use_proportional_connected=False,\n use_proportional_projected=False)\n", (4275, 4631), False, 'import bpy\n'), ((1150, 1178), 'os.path.isfile', 'os.path.isfile', (['dae_filename'], {}), '(dae_filename)\n', (1164, 1178), False, 'import os, sys\n')]
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
from sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart import \
InstantiatedParetoFrontOptimalChart
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.execution_engine.data_manager import DataManager
import numpy as np
import pandas as pd
from climateeconomics.sos_processes.iam.witness.witness_optim_sub_process.usecase_witness_optim_sub import OPTIM_NAME, COUPLING_NAME, EXTRA_NAME
def post_processing_filters(execution_engine, namespace):
filters = []
chart_list = ['Temperature vs Welfare',
'CO2 Emissions vs Welfare', 'CO2 Emissions vs min(Utility)',
'CO2 tax per scenario', 'Temperature per scenario', 'Welfare per scenario',
'Utility per scenario', 'CO2 emissions per scenario', 'ppm(mean) vs Welfare',
'Total production per scenario', 'ppm per scenario', 'invest per scenario']
scatter_scenario = 'optimization scenarios'
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
scenario_key = execution_engine.dm.get_data_id(
f'{namespace_w}.scenario_list')
scenario_list = execution_engine.dm.data_dict[scenario_key][DataManager.VALUE]
filters.append(ChartFilter('Charts', chart_list, chart_list, 'Charts'))
filters.append(ChartFilter('Scenarios', scenario_list,
scenario_list, 'Scenarios'))
return filters
def post_processings(execution_engine, namespace, filters):
instanciated_charts = []
scatter_scenario = 'optimization scenarios'
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
scenario_key = execution_engine.dm.get_data_id(
f'{namespace_w}.scenario_list')
scenario_list = execution_engine.dm.data_dict[scenario_key][DataManager.VALUE]
# Overload default value with chart filter
if filters is not None:
for chart_filter in filters:
if chart_filter.filter_key == 'Charts':
graphs_list = chart_filter.selected_values
if chart_filter.filter_key == 'Scenarios':
selected_scenarios = chart_filter.selected_values
else:
graphs_list = ['Temperature vs Welfare',
'CO2 Emissions vs Welfare', 'CO2 Emissions vs min(Utility)'
'CO2 tax per scenario', 'Temperature per scenario', 'Welfare per scenario',
'Utility per scenario', 'CO2 emissions per scenario', 'ppm(mean) vs Welfare',
'Total production per scenario', 'ppm per scenario', 'invest per scenario']
selected_scenarios = scenario_list
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.year_start',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.year_end', ]
year_start_dict, year_end_dict = get_df_per_scenario_dict(
execution_engine, df_paths, scenario_list)
year_start, year_end = year_start_dict[scenario_list[0]
], year_end_dict[scenario_list[0]]
years = np.arange(year_start, year_end).tolist()
"""
-------------
-------------
PARETO OPTIMAL CHART
-------------
-------------
"""
if 'Temperature vs Welfare' in graphs_list:
chart_name = f'Temperature in {year_end} vs Welfare'
x_axis_name = f'Temperature increase since industrial revolution in degree Celsius'
y_axis_name = 'Welfare'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Temperature_change.temperature_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df'
]
(temperature_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths, scenario_list)
last_temperature_dict, welfare_dict = {}, {}
for scenario in scenario_list:
last_temperature_dict[scenario] = temperature_df_dict[scenario]['temp_atmo'][year_end]
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(last_temperature_dict, welfare_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
if 'CO2 Emissions vs Welfare' in graphs_list:
chart_name = f'Sum of CO2 emissions vs Welfare'
x_axis_name = f'Summed CO2 emissions'
y_axis_name = f'Welfare in {year_end}'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carbon_emissions.CO2_emissions_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(co2_emissions_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths)
summed_co2_emissions_dict, welfare_dict = {}, {}
for scenario in scenario_list:
summed_co2_emissions_dict[scenario] = co2_emissions_df_dict[scenario]['total_emissions'].sum(
)
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(summed_co2_emissions_dict, welfare_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
if 'CO2 Emissions vs min(Utility)' in graphs_list:
chart_name = f'CO2 Emissions vs minimum of Utility'
x_axis_name = f'Summed CO2 emissions'
y_axis_name = 'min( Utility )'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carbon_emissions.CO2_emissions_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(co2_emissions_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths)
summed_co2_emissions_dict, min_utility_dict = {}, {}
for scenario in scenario_list:
summed_co2_emissions_dict[scenario] = co2_emissions_df_dict[scenario]['total_emissions'].sum(
)
min_utility_dict[scenario] = min(
utility_df_dict[scenario]['discounted_utility'])
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(summed_co2_emissions_dict, min_utility_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
if 'ppm(mean) vs Welfare' in graphs_list:
chart_name = f'mean ppm vs Welfare'
x_axis_name = f'Mean ppm'
y_axis_name = f'Welfare in {year_end}'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carboncycle.carboncycle_detail_df',
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(carboncycle_detail_df_dict, utility_df_dict) = get_df_per_scenario_dict(
execution_engine, df_paths)
mean_co2_ppm_dict, welfare_dict = {}, {}
for scenario in scenario_list:
mean_co2_ppm_dict[scenario] = carboncycle_detail_df_dict[scenario]['ppm'].mean(
)
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
new_pareto_chart = get_chart_pareto_front(mean_co2_ppm_dict, welfare_dict, scenario_list,
namespace_w, chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name)
instanciated_charts.append(new_pareto_chart)
"""
-------------
-------------
SCENARIO COMPARISON CHART
-------------
-------------
"""
if 'CO2 tax per scenario' in graphs_list:
chart_name = 'CO2 tax per scenario'
x_axis_name = 'Years'
y_axis_name = 'Price ($/tCO2)'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.CO2_taxes', ]
(co2_taxes_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
co2_tax_dict = {}
for scenario in scenario_list:
co2_tax_dict[scenario] = co2_taxes_df_dict[scenario]['CO2_tax'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, co2_tax_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'Temperature per scenario' in graphs_list:
chart_name = 'Atmosphere temperature evolution per scenario'
x_axis_name = 'Years'
y_axis_name = 'Temperature (degrees Celsius above preindustrial)'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Temperature_change.temperature_detail_df', ]
(temperature_detail_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
temperature_dict = {}
for scenario in scenario_list:
temperature_dict[scenario] = temperature_detail_df_dict[scenario]['temp_atmo'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, temperature_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'Welfare per scenario' in graphs_list:
chart_name = 'Welfare per scenario'
y_axis_name = f'Welfare in {year_end}'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df',
]
(utility_df_dict,) = get_df_per_scenario_dict(execution_engine, df_paths)
welfare_dict = {}
for scenario in scenario_list:
welfare_dict[scenario] = utility_df_dict[scenario]['welfare'][year_end]
min_y = min(list(welfare_dict.values()))
max_y = max(list(welfare_dict.values()))
new_chart = TwoAxesInstanciatedChart('', y_axis_name,
[], [
min_y * 0.95, max_y * 1.05],
chart_name)
for scenario, welfare in welfare_dict.items():
if scenario in selected_scenarios:
serie = InstanciatedSeries(
[''],
[welfare], scenario, 'bar')
new_chart.series.append(serie)
instanciated_charts.append(new_chart)
if 'Utility per scenario' in graphs_list:
chart_name = 'Utility per scenario'
x_axis_name = 'Years'
y_axis_name = 'Discounted Utility (trill $)'
df_paths = [f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.utility_df', ]
(utility_df_dict,) = get_df_per_scenario_dict(execution_engine, df_paths)
utility_dict = {}
for scenario in scenario_list:
utility_dict[scenario] = utility_df_dict[scenario]['discounted_utility'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, utility_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'CO2 emissions per scenario' in graphs_list:
chart_name = 'CO2 emissions per scenario'
x_axis_name = 'Years'
y_axis_name = 'Carbon emissions (Gtc)'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carbon_emissions.CO2_emissions_detail_df']
(co2_emissions_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
co2_emissions_dict = {}
for scenario in scenario_list:
co2_emissions_dict[scenario] = co2_emissions_df_dict[scenario]['total_emissions'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, co2_emissions_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'ppm per scenario' in graphs_list:
chart_name = 'Atmospheric concentrations parts per million per scenario'
x_axis_name = 'Years'
y_axis_name = 'Atmospheric concentrations parts per million'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.Carboncycle.carboncycle_detail_df']
(carboncycle_detail_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
co2_ppm_dict, welfare_dict = {}, {}
for scenario in scenario_list:
co2_ppm_dict[scenario] = carboncycle_detail_df_dict[scenario]['ppm'].values.tolist(
)
new_chart = get_scenario_comparison_chart(years, co2_ppm_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
# Rockstrom Limit
ordonate_data = [450] * int(len(years) / 5)
abscisse_data = np.linspace(
year_start, year_end, int(len(years) / 5))
new_series = InstanciatedSeries(
abscisse_data.tolist(), ordonate_data, 'Rockstrom limit', 'scatter')
note = {'Rockstrom limit': 'Scientifical limit of the Earth'}
new_chart.annotation_upper_left = note
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'Total production per scenario' in graphs_list:
chart_name = 'Total production per scenario'
x_axis_name = 'Years'
y_axis_name = 'Total production'
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.EnergyMix.energy_production_detailed']
(energy_production_detailed_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
energy_production_detailed_dict = {}
for scenario in scenario_list:
energy_production_detailed_dict[scenario] = energy_production_detailed_df_dict[
scenario]['Total production (uncut)'].values.tolist()
new_chart = get_scenario_comparison_chart(years, energy_production_detailed_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
if 'invest per scenario' in graphs_list:
chart_name = f'investments per scenario'
x_axis_name = 'Years'
y_axis_name = f'total energy investment'
# Get the total energy investment
df_paths = [
f'{OPTIM_NAME}.{COUPLING_NAME}.{EXTRA_NAME}.energy_investment']
(energy_investment_df_dict,) = get_df_per_scenario_dict(
execution_engine, df_paths)
energy_investment_dict = {}
for scenario in scenario_list:
energy_investment_dict[scenario] = energy_investment_df_dict[
scenario]['energy_investment'].values.tolist()
new_chart = get_scenario_comparison_chart(years, energy_investment_dict,
chart_name=chart_name,
x_axis_name=x_axis_name, y_axis_name=y_axis_name, selected_scenarios=selected_scenarios)
instanciated_charts.append(new_chart)
return instanciated_charts
def get_scenario_comparison_chart(x_list, y_dict, chart_name, x_axis_name, y_axis_name, selected_scenarios):
min_x = min(x_list)
max_x = max(x_list)
min_y = min([min(list(y)) for y in y_dict.values()])
max_y = max([max(list(y)) for y in y_dict.values()])
new_chart = TwoAxesInstanciatedChart(x_axis_name, y_axis_name,
[min_x - 5, max_x + 5], [
min_y - max_y * 0.05, max_y * 1.05],
chart_name)
for scenario, y_values in y_dict.items():
if scenario in selected_scenarios:
new_series = InstanciatedSeries(
x_list, y_values, scenario, 'lines', True)
new_chart.series.append(new_series)
return new_chart
def get_chart_pareto_front(x_dict, y_dict, scenario_list, namespace_w, chart_name='Pareto Front',
x_axis_name='x', y_axis_name='y'):
'''
Function that, given two dictionaries and a scenario_list, returns a pareto front
:params: x_dict, dict containing the data for the x axis of the pareto front per scenario
:type: dict
:params: y_dict, dict containing the data for the y axis of the pareto front per scenario
:type: dict
:params: scenario_list, list containing the name of the scenarios
:type: list
:params: namespace_w, namespace of scatter scenario
:type: string
:params: chart_name, name of the chart used as title
:type: string
:returns: new_pareto_chart, the chart object to be displayed
:type: InstantiatedParetoFrontOptimalChart
'''
min_x = min(list(x_dict.values()))
max_x = max(list(x_dict.values()))
max_y = max(list(y_dict.values()))
min_y = min(list(y_dict.values()))
new_pareto_chart = InstantiatedParetoFrontOptimalChart(
abscissa_axis_name=f'{x_axis_name}',
primary_ordinate_axis_name=f'{y_axis_name}',
abscissa_axis_range=[min_x - max_x * 0.05, max_x * 1.05],
primary_ordinate_axis_range=[
min_y - max_y * 0.03, max_y * 1.03],
chart_name=chart_name)
for scenario in scenario_list:
new_serie = InstanciatedSeries([x_dict[scenario]],
[y_dict[scenario]],
scenario, 'scatter',
custom_data=f'{namespace_w}.{scenario}')
new_pareto_chart.add_serie(new_serie)
# Calculating and adding pareto front
sorted_x = sorted(x_dict.values())
sorted_scenarios = []
for val in sorted_x:
for scen, x_val in x_dict.items():
if x_val == val:
sorted_scenarios.append(scen)
sorted_list = sorted([[x_dict[scenario], y_dict[scenario]]
for scenario in sorted_scenarios])
pareto_front = [sorted_list[0]]
for pair in sorted_list[1:]:
if pair[1] >= pareto_front[-1][1]:
pareto_front.append(pair)
pareto_front_serie = InstanciatedSeries(
[pair[0] for pair in pareto_front], [pair[1] for pair in pareto_front], 'Pareto front', 'lines')
new_pareto_chart.add_pareto_front_optimal(pareto_front_serie)
return new_pareto_chart
def get_df_per_scenario_dict(execution_engine, df_paths, scenario_list=None):
'''! Function to retrieve dataframes from all the scenarios given a specified path
@param execution_engine: Execution_engine, object from which the data is gathered
@param df_paths: list of string, containing the paths to access the df
@return df_per_scenario_dict: list of dict, with {key = scenario_name: value= requested_dataframe}
'''
df_per_scenario_dicts = [{} for _ in df_paths]
scatter_scenario = 'optimization scenarios'
namespace_w = f'{execution_engine.study_name}.{scatter_scenario}'
if not scenario_list:
scenario_key = execution_engine.dm.get_data_id(
f'{namespace_w}.scenario_list')
scenario_list = execution_engine.dm.data_dict[scenario_key][DataManager.VALUE]
for scenario in scenario_list:
for i, df_path in enumerate(df_paths):
df_per_scenario_dicts[i][scenario] = execution_engine.dm.get_value(
f'{namespace_w}.{scenario}.{df_path}')
return df_per_scenario_dicts
|
[
"sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart.InstantiatedParetoFrontOptimalChart",
"sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter",
"numpy.arange",
"sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart",
"sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries"
] |
[((17569, 17697), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['x_axis_name', 'y_axis_name', '[min_x - 5, max_x + 5]', '[min_y - max_y * 0.05, max_y * 1.05]', 'chart_name'], {}), '(x_axis_name, y_axis_name, [min_x - 5, max_x + 5],\n [min_y - max_y * 0.05, max_y * 1.05], chart_name)\n', (17593, 17697), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((19108, 19384), 'sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart.InstantiatedParetoFrontOptimalChart', 'InstantiatedParetoFrontOptimalChart', ([], {'abscissa_axis_name': 'f"""{x_axis_name}"""', 'primary_ordinate_axis_name': 'f"""{y_axis_name}"""', 'abscissa_axis_range': '[min_x - max_x * 0.05, max_x * 1.05]', 'primary_ordinate_axis_range': '[min_y - max_y * 0.03, max_y * 1.03]', 'chart_name': 'chart_name'}), "(abscissa_axis_name=f'{x_axis_name}',\n primary_ordinate_axis_name=f'{y_axis_name}', abscissa_axis_range=[min_x -\n max_x * 0.05, max_x * 1.05], primary_ordinate_axis_range=[min_y - max_y *\n 0.03, max_y * 1.03], chart_name=chart_name)\n", (19143, 19384), False, 'from sos_trades_core.tools.post_processing.pareto_front_optimal_charts.instanciated_pareto_front_optimal_chart import InstantiatedParetoFrontOptimalChart\n'), ((20319, 20438), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (['[pair[0] for pair in pareto_front]', '[pair[1] for pair in pareto_front]', '"""Pareto front"""', '"""lines"""'], {}), "([pair[0] for pair in pareto_front], [pair[1] for pair in\n pareto_front], 'Pareto front', 'lines')\n", (20337, 20438), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((1986, 2041), 'sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter', 'ChartFilter', (['"""Charts"""', 'chart_list', 'chart_list', '"""Charts"""'], {}), "('Charts', chart_list, chart_list, 'Charts')\n", (1997, 2041), False, 'from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\n'), ((2062, 2129), 'sos_trades_core.tools.post_processing.charts.chart_filter.ChartFilter', 'ChartFilter', (['"""Scenarios"""', 'scenario_list', 'scenario_list', '"""Scenarios"""'], {}), "('Scenarios', scenario_list, scenario_list, 'Scenarios')\n", (2073, 2129), False, 'from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter\n'), ((11511, 11602), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.TwoAxesInstanciatedChart', 'TwoAxesInstanciatedChart', (['""""""', 'y_axis_name', '[]', '[min_y * 0.95, max_y * 1.05]', 'chart_name'], {}), "('', y_axis_name, [], [min_y * 0.95, max_y * 1.05],\n chart_name)\n", (11535, 11602), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((19483, 19607), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (['[x_dict[scenario]]', '[y_dict[scenario]]', 'scenario', '"""scatter"""'], {'custom_data': 'f"""{namespace_w}.{scenario}"""'}), "([x_dict[scenario]], [y_dict[scenario]], scenario,\n 'scatter', custom_data=f'{namespace_w}.{scenario}')\n", (19501, 19607), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((3809, 3840), 'numpy.arange', 'np.arange', (['year_start', 'year_end'], {}), '(year_start, year_end)\n', (3818, 3840), True, 'import numpy as np\n'), ((17937, 17998), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (['x_list', 'y_values', 'scenario', '"""lines"""', '(True)'], {}), "(x_list, y_values, scenario, 'lines', True)\n", (17955, 17998), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n'), ((11866, 11918), 'sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart.InstanciatedSeries', 'InstanciatedSeries', (["['']", '[welfare]', 'scenario', '"""bar"""'], {}), "([''], [welfare], scenario, 'bar')\n", (11884, 11918), False, 'from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart\n')]
|
# Copyright (c) Facebook, Inc. All Rights Reserved
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ upstream/hubert/expert.py ]
# Synopsis [ the HuBERT wrapper ]
# Author [ <NAME> ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
from packaging import version
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
import fairseq
from ..interfaces import UpstreamBase
############
# CONSTANT #
############
SAMPLE_RATE = 16000
EXAMPLE_SEC = 5
###################
# UPSTREAM EXPERT #
###################
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
assert version.parse(fairseq.__version__) > version.parse(
"0.10.2"
), "Please install the fairseq master branch."
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[ckpt]
)
self.model = model[0]
self.task = task
if len(self.hooks) == 0:
module_name = "self.model.encoder.layers"
for module_id in range(len(eval(module_name))):
self.add_hook(
f"{module_name}[{module_id}]",
lambda input, output: input[0].transpose(0, 1),
)
self.add_hook("self.model.encoder", lambda input, output: output[0])
def postprocess(xs):
names, hiddens = zip(*xs)
unpad_len = min([hidden.size(1) for hidden in hiddens])
hiddens = [hidden[:, :unpad_len, :] for hidden in hiddens]
return list(zip(names, hiddens))
self.hook_postprocess = postprocess
def get_downsample_rates(self, key: str) -> int:
return 320
def forward(self, wavs):
if self.task.cfg.normalize:
wavs = [F.layer_norm(wav, wav.shape) for wav in wavs]
device = wavs[0].device
wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device)
wav_padding_mask = ~torch.lt(
torch.arange(max(wav_lengths)).unsqueeze(0).to(device),
wav_lengths.unsqueeze(1),
)
padded_wav = pad_sequence(wavs, batch_first=True)
features, feat_padding_mask = self.model.extract_features(
padded_wav,
padding_mask=wav_padding_mask,
mask=None,
)
# This forward function only does the model forward
# The return dict is then handled by UpstreamBase's hooks
|
[
"fairseq.checkpoint_utils.load_model_ensemble_and_task",
"torch.nn.utils.rnn.pad_sequence",
"packaging.version.parse",
"torch.nn.functional.layer_norm"
] |
[((1060, 1121), 'fairseq.checkpoint_utils.load_model_ensemble_and_task', 'fairseq.checkpoint_utils.load_model_ensemble_and_task', (['[ckpt]'], {}), '([ckpt])\n', (1113, 1121), False, 'import fairseq\n'), ((2407, 2443), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['wavs'], {'batch_first': '(True)'}), '(wavs, batch_first=True)\n', (2419, 2443), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((904, 938), 'packaging.version.parse', 'version.parse', (['fairseq.__version__'], {}), '(fairseq.__version__)\n', (917, 938), False, 'from packaging import version\n'), ((941, 964), 'packaging.version.parse', 'version.parse', (['"""0.10.2"""'], {}), "('0.10.2')\n", (954, 964), False, 'from packaging import version\n'), ((2075, 2103), 'torch.nn.functional.layer_norm', 'F.layer_norm', (['wav', 'wav.shape'], {}), '(wav, wav.shape)\n', (2087, 2103), True, 'import torch.nn.functional as F\n')]
|
import pruebas, prueba_simple
print(pruebas.Clase1_1().Clase1_2().firstn(5))
|
[
"pruebas.Clase1_1"
] |
[((37, 55), 'pruebas.Clase1_1', 'pruebas.Clase1_1', ([], {}), '()\n', (53, 55), False, 'import pruebas, prueba_simple\n')]
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
from matplotlib.pyplot import MultipleLocator
rc('mathtext', default='regular')
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.family'] = ['Times New Roman'] #
Name = ["REIN", "HEM", "Simple", "TAMA", "Ada-REIN", "OpIndex"]
x = ["30", "100", "300", "1k", "3k", "10k"]
HEM = [7.295973, 3.713577, 2.102555, 0.966343, 0.402103, 0.182992]
Simple = [18.883054, 12.119494, 6.735993, 5.690750, 6.650945, 15.245289]
Rein = [51.559147, 33.450221, 20.636215, 10.640785, 5.474277, 2.662649]
AdaREIN = [47.341016, 32.124697, 19.574878, 10.215780, 5.246019, 2.621369]
TAMA = [1.402612, 1.456788, 1.523113, 1.745815, 2.187047, 3.118311]
OpIndex = [1.018494, 1.065417, 1.129113, 1.413666, 2.117866, 5.711690]
Rein.reverse()
Simple.reverse()
AdaREIN.reverse()
HEM.reverse()
TAMA.reverse()
OpIndex.reverse()
lsize = 24
fig = plt.figure(figsize=(5, 4))
ax = fig.add_subplot(111)
ax.set_xlabel('Number of Attributes', fontsize=lsize)
ax.set_ylabel('Matching Time (ms)', fontsize=lsize)
# plt.xticks(range(0,10))
ax.plot(x, Rein, marker='v', color='r', label=Name[0])
ax.plot(x, HEM, marker='.', color='DODGERBLUE', label=Name[1])
# ax.plot(x, Simple, marker='D', color='deepskyblue', label=Name[2]) #
ax.plot(x, TAMA, marker='*', color='DarkCyan', label=Name[3])
ax.plot(x, AdaREIN, marker='x', color='DarkMagenta', label=Name[4])
ax.plot(x, OpIndex, marker='h', color='DimGray', label=Name[5]) # slategray
ax.legend(fontsize=11.5, ncol=2, loc='lower right')
ax.grid()
ax.set_xlim(0, 5)
ax.set_xticks([0, 1, 2, 3, 4, 5])
ax.set_xticklabels(x)
ax.set_yscale("log", base=4, subs=[2, 3])
# ax.set_ylim(0, 60)
ax.set_yticks([0.25, 1, 4, 16, 64])
ax.set_yticklabels(['0.25', '1', '4', '16', '64'])
# ax.set_yticks([0,2,8,32,128,256])
# ax.set_yticklabels(['-1', '0', '1'])
ax.set_zorder(0)
plt.tick_params(labelsize=22)
gcf = plt.gcf()
plt.show()
gcf.savefig('./exp9_d_VAS.pdf', format='pdf', bbox_inches='tight')
|
[
"matplotlib.rc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.gcf"
] |
[((142, 175), 'matplotlib.rc', 'rc', (['"""mathtext"""'], {'default': '"""regular"""'}), "('mathtext', default='regular')\n", (144, 175), False, 'from matplotlib import rc\n'), ((924, 950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (934, 950), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1916), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(22)'}), '(labelsize=22)\n', (1902, 1916), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1932), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1930, 1932), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1941, 1943), True, 'import matplotlib.pyplot as plt\n')]
|
"""
======================
CSA Histogram Approach
======================
Static Chemical Shift Powder Pattern using a Histogram Approach
The equation for the static powder pattern for a simple chemical shift anisotropy interation is given by the following equation
.. math::
H = \\delta_{iso} + \\frac {1}{2} \\delta \left ( 3 \\cos^2 \\theta - 1 \\right ) - \\delta \\eta \\sin^2 \\theta \\cos 2 \\phi
There are a number of conventions for the assignment of :math:`\eta` and :math:`\delta`, we have used Haeberlen's convention.
if :math:`\sigma_{xx}`, :math:`\sigma_{yy}` and :math:`\sigma_{zz}` are the principal components of the chemical shielding tensor then they must have the following order.
.. math::
\\left | \\sigma_{zz} - \\sigma_{iso} \\right | \\ge \\left | \\sigma_{xx} - \\sigma_{iso} \\right | \\ge \\left | \\sigma_{yy} - \\sigma_{iso} \\right |
where
.. math::
\\sigma_{iso} = \\frac {1}{3} \\left ( \\sigma_{xx} + \\sigma_{yy} + \\sigma_{zz} \\right )
and then :math:`\delta` and :math:`\eta` ared defined as follows
.. math::
\\delta = \\sigma_{zz} - \sigma_{iso}
and
.. math::
\\eta = \\frac {\\sigma_{xx}-\\sigma_{yy}}{\\sigma_{zz}-\\sigma_{iso}}
References
~~~~~~~~~~
- <NAME>, In Advances in Magnetic Resonance; Suppl. 1; <NAME>, Ed.; Academic Press, New York, 1976.
"""
import numpy as np
from matplotlib import pyplot as plt
import sys
def return_Hammersley_points_array( l, n, p):
"""
l is the power of x to go out to p^m
n is the maximun number of points
p is the order of the Hammersley point, 1,2,3,4,... etc
returns
--------
np.array of double
"""
vvv = np.zeros(n)
for m in range(n):
m1=1*m
if p == 1:
vvv[m] = m1/n
else:
v = 0.0
for j in range(l,-1,-1):
num = m1//p**j
if num > 0:
m1 -= num*p**j
v += num / ( p ** (j+1) )
vvv[m]=v
return(vvv)
def omega_cs( theta, phi, iso_cs=0.0, asymm_cs=100, eta_cs=1.0):
return (iso_cs +0.5* asymm_cs*(3.0 * (np.cos(theta)**2) -1.0 - eta_cs*(np.sin(theta)**2)*np.cos( 2.0 * phi ))), np.sin(theta)
if __name__ == "__main__":
# Define CSA powder pattern
# Principal components of the chemical shift shielding tensor
s_zz = -120.0
s_yy = -50.0
s_xx = 100.0
# Check for Haeberlens convention
iso_cs =(s_zz+s_yy+s_xx)/3.
if abs(s_zz-iso_cs) >= abs(s_xx-iso_cs) and abs(s_xx-iso_cs) >= abs(s_yy-iso_cs):
h_zz = s_zz
h_yy = s_yy
h_xx = s_xx
elif abs(s_zz-iso_cs) < abs(s_xx-iso_cs) and abs(s_xx-iso_cs) >= abs(s_yy-iso_cs):
h_zz = s_xx
h_yy = s_yy
h_xx = s_zz
else:
print("problem with assignment of cs tensors")
sys.exit()
asymm_cs = h_zz-iso_cs
eta_cs = (h_xx-h_yy)/(h_zz-iso_cs)
# Calculate Hammersley Points and Powder pattern
N_particles = 2**17
theta = return_Hammersley_points_array(22, N_particles, 2)
phi = return_Hammersley_points_array(22, N_particles, 3)
omega, solid_angle = omega_cs(theta*np.pi,2*np.pi*phi, eta_cs=eta_cs, iso_cs=iso_cs, asymm_cs=asymm_cs)
# Plot Powder pattern and use sin(theta) solid angle weighting
plt.hist(omega, bins = 200, weights=solid_angle, density=True);
plt.xlim(250.0, -250.0)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.yticks([])
plt.xlabel('Hz', fontsize=14)
ax.annotate('$\sigma_{xx}$',
xy=(s_xx+5, 0.0030), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=14)
ax.annotate('$\sigma_{yy}$',
xy=(s_yy+5, 0.012), xycoords='data',
xytext=(-50, 00), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=14)
ax.annotate('$\sigma_{zz}$',
xy=(s_zz-5, 0.0044), xycoords='data',
xytext=(50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize=14)
plt.title(f"{N_particles} Hammersley Pts CSA Calculated Histogram", fontsize=14);
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"numpy.sin",
"numpy.cos",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"sys.exit"
] |
[((1718, 1729), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1726, 1729), True, 'import numpy as np\n'), ((3505, 3565), 'matplotlib.pyplot.hist', 'plt.hist', (['omega'], {'bins': '(200)', 'weights': 'solid_angle', 'density': '(True)'}), '(omega, bins=200, weights=solid_angle, density=True)\n', (3513, 3565), True, 'from matplotlib import pyplot as plt\n'), ((3573, 3596), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(250.0)', '(-250.0)'], {}), '(250.0, -250.0)\n', (3581, 3596), True, 'from matplotlib import pyplot as plt\n'), ((3606, 3615), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3613, 3615), True, 'from matplotlib import pyplot as plt\n'), ((3743, 3757), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3753, 3757), True, 'from matplotlib import pyplot as plt\n'), ((3762, 3791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hz"""'], {'fontsize': '(14)'}), "('Hz', fontsize=14)\n", (3772, 3791), True, 'from matplotlib import pyplot as plt\n'), ((4411, 4496), 'matplotlib.pyplot.title', 'plt.title', (['f"""{N_particles} Hammersley Pts CSA Calculated Histogram"""'], {'fontsize': '(14)'}), "(f'{N_particles} Hammersley Pts CSA Calculated Histogram', fontsize=14\n )\n", (4420, 4496), True, 'from matplotlib import pyplot as plt\n'), ((4502, 4512), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4510, 4512), True, 'from matplotlib import pyplot as plt\n'), ((2335, 2348), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2341, 2348), True, 'import numpy as np\n'), ((3011, 3021), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3019, 3021), False, 'import sys\n'), ((2312, 2329), 'numpy.cos', 'np.cos', (['(2.0 * phi)'], {}), '(2.0 * phi)\n', (2318, 2329), True, 'import numpy as np\n'), ((2261, 2274), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2267, 2274), True, 'import numpy as np\n'), ((2294, 2307), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2300, 2307), True, 'import numpy as np\n')]
|
import logging
from ipaddress import ip_address
from django.conf import settings
from django.db import models
from django.utils.module_loading import import_string
from swapper import get_model_name, load_model
from openwisp_utils.base import UUIDModel
logger = logging.getLogger(__name__)
trigger_device_checks_path = 'openwisp_monitoring.device.tasks.trigger_device_checks'
class AbstractDeviceNode(UUIDModel):
node = models.OneToOneField(
get_model_name('topology', 'Node'), on_delete=models.CASCADE
)
device = models.ForeignKey(
get_model_name('config', 'Device'), on_delete=models.CASCADE
)
# relations will be auto-created only for these parsers
ENABLED_PARSERS = {
'netdiff.OpenvpnParser': {
'auto_create': 'auto_create_openvpn',
'link_down': 'link_down_openvpn',
'link_up': 'link_up_openvpn',
}
}
class Meta:
abstract = True
unique_together = ('node', 'device')
@classmethod
def auto_create(cls, node):
"""
Attempts to perform automatic creation of DeviceNode objects.
The right action to perform depends on the Topology used.
"""
opts = cls.ENABLED_PARSERS.get(node.topology.parser)
if opts:
return getattr(cls, opts['auto_create'])(node)
@classmethod
def auto_create_openvpn(cls, node):
"""
Implementation of the integration between
controller and network-topology modules
when using OpenVPN (using the common name)
"""
common_name = node.properties.get('common_name')
if not common_name:
return
Device = load_model('config', 'Device')
device_filter = models.Q(config__vpnclient__cert__common_name=common_name)
if node.organization_id:
device_filter &= models.Q(organization_id=node.organization_id)
device = (
Device.objects.only(
'id', 'name', 'last_ip', 'management_ip', 'organization_id'
)
.filter(device_filter)
.first()
)
if not device:
return
device_node = cls(device=device, node=node)
try:
device_node.full_clean()
device_node.save()
# Update organization of the node. This is required
# when topology is shared.
if node.organization_id is None:
node.organization_id = device.organization_id
node.save(update_fields=['organization_id'])
except Exception:
logger.exception('Exception raised during auto_create_openvpn')
return
else:
logger.info(f'DeviceNode relation created for {node.label} - {device.name}')
return device_node
def link_action(self, link, status):
"""
Performs clean-up operations when link goes down.
The right action to perform depends on the Topology used.
"""
opts = self.ENABLED_PARSERS.get(link.topology.parser)
if opts:
key = f'link_{status}'
return getattr(self, opts[key])()
def link_down_openvpn(self):
"""
Link down action for OpenVPN
"""
self.device.management_ip = None
self.device.save()
def link_up_openvpn(self):
"""
Link up action for OpenVPN
"""
addresses = self.node.addresses
try:
address = ip_address(addresses[1])
except (IndexError, ValueError) as e:
addresses = ', '.join(addresses)
logger.warning(
f'{e.__class__.__name__} raised while processing addresses: {addresses}'
)
else:
self.device.management_ip = str(address)
self.device.save()
@classmethod
def filter_by_link(cls, link):
"""
Returns a queryset which looks for a DeviceNode which is related
to the specified Link instance.
"""
return cls.objects.filter(
models.Q(node__source_link_set__pk=link.pk)
| models.Q(node__target_link_set__pk=link.pk)
).select_related('device', 'node')
@classmethod
def trigger_device_updates(cls, link):
"""
Used to refresh controller and monitoring information
whenever the status of a link changes
"""
if link.topology.parser not in cls.ENABLED_PARSERS:
return
for device_node in cls.filter_by_link(link):
device_node.link_action(link, link.status)
# triggers monitoring checks if OpenWISP Monitoring is enabled
if 'openwisp_monitoring.device' in settings.INSTALLED_APPS:
run_checks = import_string(trigger_device_checks_path)
run_checks.delay(device_node.device.pk, recovery=link.status == 'up')
|
[
"django.utils.module_loading.import_string",
"swapper.get_model_name",
"django.db.models.Q",
"ipaddress.ip_address",
"swapper.load_model",
"logging.getLogger"
] |
[((265, 292), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (282, 292), False, 'import logging\n'), ((460, 494), 'swapper.get_model_name', 'get_model_name', (['"""topology"""', '"""Node"""'], {}), "('topology', 'Node')\n", (474, 494), False, 'from swapper import get_model_name, load_model\n'), ((567, 601), 'swapper.get_model_name', 'get_model_name', (['"""config"""', '"""Device"""'], {}), "('config', 'Device')\n", (581, 601), False, 'from swapper import get_model_name, load_model\n'), ((1694, 1724), 'swapper.load_model', 'load_model', (['"""config"""', '"""Device"""'], {}), "('config', 'Device')\n", (1704, 1724), False, 'from swapper import get_model_name, load_model\n'), ((1749, 1807), 'django.db.models.Q', 'models.Q', ([], {'config__vpnclient__cert__common_name': 'common_name'}), '(config__vpnclient__cert__common_name=common_name)\n', (1757, 1807), False, 'from django.db import models\n'), ((1870, 1916), 'django.db.models.Q', 'models.Q', ([], {'organization_id': 'node.organization_id'}), '(organization_id=node.organization_id)\n', (1878, 1916), False, 'from django.db import models\n'), ((3506, 3530), 'ipaddress.ip_address', 'ip_address', (['addresses[1]'], {}), '(addresses[1])\n', (3516, 3530), False, 'from ipaddress import ip_address\n'), ((4789, 4830), 'django.utils.module_loading.import_string', 'import_string', (['trigger_device_checks_path'], {}), '(trigger_device_checks_path)\n', (4802, 4830), False, 'from django.utils.module_loading import import_string\n'), ((4088, 4131), 'django.db.models.Q', 'models.Q', ([], {'node__source_link_set__pk': 'link.pk'}), '(node__source_link_set__pk=link.pk)\n', (4096, 4131), False, 'from django.db import models\n'), ((4146, 4189), 'django.db.models.Q', 'models.Q', ([], {'node__target_link_set__pk': 'link.pk'}), '(node__target_link_set__pk=link.pk)\n', (4154, 4189), False, 'from django.db import models\n')]
|
from diskord.ext import commands
import diskord
from utils.functions import *
class prefix_cmds(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name="bump", description="Command to bumps your server")
async def _bump(self, ctx):
#add_command_stats("bump")
#await ctx.invoke(self.client.get_application_command("bump"))
await ctx.reply("Bumpy is now using slash commands. If you cant use them reinvite Bumpy to your server using this link\n https://dsc.gg/bumpy")
def setup(client):
client.add_cog(prefix_cmds(client))
|
[
"diskord.ext.commands.command"
] |
[((188, 261), 'diskord.ext.commands.command', 'commands.command', ([], {'name': '"""bump"""', 'description': '"""Command to bumps your server"""'}), "(name='bump', description='Command to bumps your server')\n", (204, 261), False, 'from diskord.ext import commands\n')]
|
"""Rewrited Legacy Email backend.
Original Legacy Email backend docs at:
http://psa.matiasaguirre.net/docs/backends/email.html
"""
from social_core.backends.legacy import LegacyAuth
from social_core.exceptions import AuthMissingParameter
from psa.custom_django_storage import CustomCode
class EmailAuth(LegacyAuth):
"""Legacy EmailAuth backend
Improved auth_complete method to update data by email
from code object.
"""
name = 'email'
ID_KEY = 'email'
REQUIRES_EMAIL_VALIDATION = True
EXTRA_DATA = ['email']
def auth_complete(self, *args, **kwargs):
"""
Completes loging process, must return user instance.
"""
if self.ID_KEY not in self.data:
code = (self.strategy.request.GET.get('verification_code') or
self.strategy.request.POST.get('verification_code'))
code_object = CustomCode.objects.filter(code=code, verified=False).first()
if code_object:
email = code_object.email
self.data.update({'email': email})
if code_object.next_page:
self.data['next'] = code_object.next_page
self.strategy.session_set('next', code_object.next_page)
else:
raise AuthMissingParameter(self, self.ID_KEY)
kwargs.update({'response': self.data, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
|
[
"psa.custom_django_storage.CustomCode.objects.filter",
"social_core.exceptions.AuthMissingParameter"
] |
[((1293, 1332), 'social_core.exceptions.AuthMissingParameter', 'AuthMissingParameter', (['self', 'self.ID_KEY'], {}), '(self, self.ID_KEY)\n', (1313, 1332), False, 'from social_core.exceptions import AuthMissingParameter\n'), ((890, 942), 'psa.custom_django_storage.CustomCode.objects.filter', 'CustomCode.objects.filter', ([], {'code': 'code', 'verified': '(False)'}), '(code=code, verified=False)\n', (915, 942), False, 'from psa.custom_django_storage import CustomCode\n')]
|
import os
def read_ids(ids_file):
file = open(ids_file, 'r', encoding='utf8')
ids_list = list()
for line in file.readlines():
params = line.strip('\n').split('\t')
assert len(params) == 2
id = int(params[0])
ids_list.append(id)
return ids_list
def embedding2file(embeddings, embeddings_out_file):
print("Embedding:", embeddings.shape)
fw = open(embeddings_out_file, 'w', encoding='utf8')
for i in range(embeddings.shape[0]):
line = ''
for j in range(embeddings.shape[1]):
line = line + str(embeddings[i, j]) + '\t'
fw.write(line.strip() + '\n')
fw.close()
def print_time(t):
print('time:{:.3f} s'.format(t))
def radio_2file(radio, folder):
path = folder + str(radio).replace('.', '_')
if not os.path.exists(path):
os.makedirs(path)
return path + '/'
def read_pairs(file_path):
file = open(file_path, 'r', encoding='utf8')
pairs = list()
for line in file.readlines():
params = line.strip('\n').split('\t')
assert len(params) == 2
pairs.append((params[0], params[1]))
file.close()
return pairs
def read_pair_ids(file_path):
file = open(file_path, 'r', encoding='utf8')
pairs = list()
for line in file.readlines():
params = line.strip('\n').split('\t')
assert len(params) == 2
pairs.append((int(params[0]), int(params[1])))
file.close()
return pairs
def pair_2set(pairs):
s1, s2 = set(), set()
for pair in pairs:
s1.add(pair[0])
s2.add(pair[1])
return s1, s2
def read_triples_ids(file_path):
triples = list()
file = open(file_path, 'r', encoding='utf8')
for line in file.readlines():
params = line.strip('\n').split('\t')
assert len(params) == 3
h = int(params[0])
r = int(params[1])
t = int(params[2])
triples.append((h, r, t))
return triples
def read_ref(file_path):
refs = list()
reft = list()
file = open(file_path, 'r', encoding='utf8')
for line in file.readlines():
params = line.strip('\n').split('\t')
assert len(params) == 2
e1 = int(params[0])
e2 = int(params[1])
refs.append(e1)
reft.append(e2)
assert len(refs) == len(reft)
return refs, reft
def read_ents_by_order(ids_file):
file = open(ids_file, 'r', encoding='utf8')
ids_list = list()
for line in file.readlines():
params = line.strip('\n').split('\t')
assert len(params) == 2
ids_list.append(int(params[0]))
return ids_list
def pair_2_rev_dict(pairs):
d = dict()
for pair in pairs:
if pair[1] not in d:
d[pair[1]] = pair[0]
else:
print("Error")
return d
def pair_2int_set(pairs):
s1, s2 = set(), set()
for pair in pairs:
s1.add(int(pair[0]))
s2.add(int(pair[1]))
return s1, s2
def div_list(ls, n):
ls_len = len(ls)
if n <= 0 or 0 == ls_len:
return []
if n > ls_len:
return []
elif n == ls_len:
return [[i] for i in ls]
else:
j = ls_len // n
k = ls_len % n
ls_return = []
for i in range(0, (n - 1) * j, j):
ls_return.append(ls[i:i + j])
ls_return.append(ls[(n - 1) * j:])
return ls_return
|
[
"os.path.exists",
"os.makedirs"
] |
[((812, 832), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (826, 832), False, 'import os\n'), ((842, 859), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (853, 859), False, 'import os\n')]
|
"""
A script to grid search all parameters provided in parameters.py
including both classifiers and regressors.
Note that the execution of this script may take hours to search the
best possible model parameters for various algorithms, feel free
to edit parameters.py on your need ( e.g remove some parameters for
faster search )
"""
import pickle
from emotion_recognition import EmotionRecognizer
from parameters import classification_grid_parameters, regression_grid_parameters
# emotion classes you want to perform grid search on
emotions = ['sad', 'neutral', 'happy']
# number of parallel jobs during the grid search
n_jobs = 4
best_estimators = []
for model, params in classification_grid_parameters.items():
if model.__class__.__name__ == "KNeighborsClassifier":
# in case of a K-Nearest neighbors algorithm
# set number of neighbors to the length of emotions
params['n_neighbors'] = [len(emotions)]
d = EmotionRecognizer(model, emotions=emotions)
d.load_data()
best_estimator, best_params, cv_best_score = d.grid_search(params=params, n_jobs=n_jobs)
best_estimators.append((best_estimator, best_params, cv_best_score))
print(f"{emotions} {best_estimator.__class__.__name__} achieved {cv_best_score:.3f} cross validation accuracy score!")
print(f"[+] Pickling best classifiers for {emotions}...")
pickle.dump(best_estimators, open(f"grid/best_classifiers.pickle", "wb"))
best_estimators = []
for model, params in regression_grid_parameters.items():
if model.__class__.__name__ == "KNeighborsRegressor":
# in case of a K-Nearest neighbors algorithm
# set number of neighbors to the length of emotions
params['n_neighbors'] = [len(emotions)]
d = EmotionRecognizer(model, emotions=emotions, classification=False)
d.load_data()
best_estimator, best_params, cv_best_score = d.grid_search(params=params, n_jobs=n_jobs)
best_estimators.append((best_estimator, best_params, cv_best_score))
print(f"{emotions} {best_estimator.__class__.__name__} achieved {cv_best_score:.3f} cross validation MAE score!")
print(f"[+] Pickling best regressors for {emotions}...")
pickle.dump(best_estimators, open(f"grid/best_regressors.pickle", "wb"))
# Best for SVC: C=0.001, gamma=0.001, kernel='poly'
# Best for AdaBoostClassifier: {'algorithm': 'SAMME', 'learning_rate': 0.8, 'n_estimators': 60}
# Best for RandomForestClassifier: {'max_depth': 7, 'max_features': 0.5, 'min_samples_leaf': 1, 'min_samples_split': 2, 'n_estimators': 40}
# Best for GradientBoostingClassifier: {'learning_rate': 0.3, 'max_depth': 7, 'max_features': None, 'min_samples_leaf': 1, 'min_samples_split': 2, 'n_estimators': 70, 'subsample': 0.7}
# Best for DecisionTreeClassifier: {'criterion': 'entropy', 'max_depth': 7, 'max_features': None, 'min_samples_leaf': 1, 'min_samples_split': 2}
# Best for KNeighborsClassifier: {'n_neighbors': 5, 'p': 1, 'weights': 'distance'}
# Best for MLPClassifier: {'alpha': 0.005, 'batch_size': 256, 'hidden_layer_sizes': (300,), 'learning_rate': 'adaptive', 'max_iter': 500}
|
[
"parameters.classification_grid_parameters.items",
"parameters.regression_grid_parameters.items",
"emotion_recognition.EmotionRecognizer"
] |
[((701, 739), 'parameters.classification_grid_parameters.items', 'classification_grid_parameters.items', ([], {}), '()\n', (737, 739), False, 'from parameters import classification_grid_parameters, regression_grid_parameters\n'), ((1513, 1547), 'parameters.regression_grid_parameters.items', 'regression_grid_parameters.items', ([], {}), '()\n', (1545, 1547), False, 'from parameters import classification_grid_parameters, regression_grid_parameters\n'), ((974, 1017), 'emotion_recognition.EmotionRecognizer', 'EmotionRecognizer', (['model'], {'emotions': 'emotions'}), '(model, emotions=emotions)\n', (991, 1017), False, 'from emotion_recognition import EmotionRecognizer\n'), ((1781, 1846), 'emotion_recognition.EmotionRecognizer', 'EmotionRecognizer', (['model'], {'emotions': 'emotions', 'classification': '(False)'}), '(model, emotions=emotions, classification=False)\n', (1798, 1846), False, 'from emotion_recognition import EmotionRecognizer\n')]
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadFactOperator(BaseOperator):
ui_color = '#F98866'
@apply_defaults
def __init__(self,
redshift_conn_id,
queries,
*args, **kwargs):
super(LoadFactOperator, self).__init__(*args, **kwargs)
self.conn_id = redshift_conn_id
self.queries = queries
def execute(self, context):
"""
"""
self.log.info('Starting LoadFactOperator for table songplays')
# get Redshift connection string
redshift_hook = PostgresHook(self.conn_id)
# create the corresponding table on Redshift
self.log.info(
f'Run CREATE statement for songplays table from helper class')
# the query is in the helper class
query = 'songplay_table_create'
sql_statement = getattr(self.queries, query)
self.log.info(sql_statement)
redshift_hook.run(sql_statement)
# load fact table from staging
self.log.info(f'Run INSERT statement for songplay from helper class')
# the query is in the helper class
query = 'songplay_table_insert'
sql_statement = getattr(self.queries, query)
self.log.info(sql_statement)
redshift_hook.run(sql_statement)
|
[
"airflow.hooks.postgres_hook.PostgresHook"
] |
[((681, 707), 'airflow.hooks.postgres_hook.PostgresHook', 'PostgresHook', (['self.conn_id'], {}), '(self.conn_id)\n', (693, 707), False, 'from airflow.hooks.postgres_hook import PostgresHook\n')]
|
import os
import pandas as pd
import torch
from torch.utils.data import Dataset
DATA_DIR = os.path.join("..", "data")
class Digits(Dataset):
def __init__(self, split: str):
self._load_data(split)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.images[idx], self.labels[idx]
def _load_data(self, split):
data = pd.read_csv(os.path.join(DATA_DIR, f"{split}.csv"))
if split == "train":
self.labels = torch.from_numpy(data.label.values).type(torch.LongTensor)
else:
self.labels = torch.zeros(data.index.max() + 1).type(torch.LongTensor)
self.images = torch.from_numpy(
data[[c for c in data.columns if "pixel" in c]]
.to_numpy()
.reshape((-1, 28, 28))
).type(torch.FloatTensor)
|
[
"os.path.join",
"torch.from_numpy"
] |
[((93, 119), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (105, 119), False, 'import os\n'), ((414, 452), 'os.path.join', 'os.path.join', (['DATA_DIR', 'f"""{split}.csv"""'], {}), "(DATA_DIR, f'{split}.csv')\n", (426, 452), False, 'import os\n'), ((510, 545), 'torch.from_numpy', 'torch.from_numpy', (['data.label.values'], {}), '(data.label.values)\n', (526, 545), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
from threads.models import Thread
# Create your models here.
class Poll(models.Model):
question = models.TextField()
thread = models.OneToOneField(Thread, null=True)
def __unicode__(self):
return self.question
class PollSubject(models.Model):
name = models.CharField(max_length=255)
poll = models.ForeignKey(Poll, related_name='subjects')
def __unicode__(self):
return self.name
class Vote(models.Model):
poll = models.ForeignKey(Poll, related_name="votes")
subject = models.ForeignKey(PollSubject, related_name="votes")
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='votes')
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.OneToOneField"
] |
[((191, 209), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (207, 209), False, 'from django.db import models\n'), ((223, 262), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Thread'], {'null': '(True)'}), '(Thread, null=True)\n', (243, 262), False, 'from django.db import models\n'), ((366, 398), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (382, 398), False, 'from django.db import models\n'), ((410, 458), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Poll'], {'related_name': '"""subjects"""'}), "(Poll, related_name='subjects')\n", (427, 458), False, 'from django.db import models\n'), ((551, 596), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Poll'], {'related_name': '"""votes"""'}), "(Poll, related_name='votes')\n", (568, 596), False, 'from django.db import models\n'), ((611, 663), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PollSubject'], {'related_name': '"""votes"""'}), "(PollSubject, related_name='votes')\n", (628, 663), False, 'from django.db import models\n'), ((675, 740), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""votes"""'}), "(settings.AUTH_USER_MODEL, related_name='votes')\n", (692, 740), False, 'from django.db import models\n')]
|
import logging
from datetime import datetime, timedelta
import requests
from flask import current_app as app
from iso8601 import ParseError, parse_date
from structlog import wrap_logger
from frontstage.exceptions.exceptions import ApiError
logger = wrap_logger(logging.getLogger(__name__))
date_format = "%d %b %Y"
def get_collection_exercise(collection_exercise_id):
logger.info("Attempting to retrieve collection exercise", collection_exercise_id=collection_exercise_id)
url = f"{app.config['COLLECTION_EXERCISE_URL']}/collectionexercises/{collection_exercise_id}"
response = requests.get(url, auth=app.config["BASIC_AUTH"])
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Failed to retrieve collection exercise", collection_exercise_id=collection_exercise_id)
raise ApiError(logger, response)
logger.info("Successfully retrieved collection exercise", collection_exercise_id=collection_exercise_id)
collection_exercise = response.json()
if collection_exercise["events"]:
collection_exercise["events"] = convert_events_to_new_format(collection_exercise["events"])
return collection_exercise
def get_collection_exercise_events(collection_exercise_id):
logger.info("Attempting to retrieve collection exercise events", collection_exercise_id=collection_exercise_id)
url = f"{app.config['COLLECTION_EXERCISE_URL']}/collectionexercises/{collection_exercise_id}/events"
response = requests.get(url, auth=app.config["BASIC_AUTH"])
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Failed to retrieve collection exercise events", collection_exercise_id=collection_exercise_id)
raise ApiError(logger, response)
logger.info("Successfully retrieved collection exercise events", collection_exercise_id=collection_exercise_id)
return response.json()
def get_collection_exercises_for_survey(survey_id, collex_url, collex_auth, live_only=None):
logger.info("Retrieving collection exercises for survey", survey_id=survey_id)
if live_only is True:
url = f"{collex_url}/collectionexercises/survey/{survey_id}?liveOnly=true"
else:
url = f"{collex_url}/collectionexercises/survey/{survey_id}"
response = requests.get(url, auth=collex_auth)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Failed to retrieve collection exercises for survey", survey_id=survey_id)
raise ApiError(logger, response)
logger.info("Successfully retrieved collection exercises for survey", survey_id=survey_id)
collection_exercises = response.json()
for collection_exercise in collection_exercises:
if collection_exercise["events"]:
collection_exercise["events"] = convert_events_to_new_format(collection_exercise["events"])
return collection_exercises
def get_live_collection_exercises_for_survey(survey_id, collex_url, collex_auth):
return get_collection_exercises_for_survey(survey_id, collex_url, collex_auth, True)
def convert_events_to_new_format(events):
formatted_events = {}
for event in events:
try:
date_time = parse_date(event["timestamp"])
except ParseError:
raise ParseError
formatted_events[event["tag"]] = {
"date": date_time.strftime(date_format),
"month": date_time.strftime("%m"),
"is_in_future": date_time > parse_date(datetime.now().isoformat()),
"formatted_date": ordinal_date_formatter("{S} %B %Y", date_time),
"due_time": due_date_converter(date_time),
}
return formatted_events
def suffix(day: int):
"""
This function creates the ordinal suffix
:param: day of the date time object
:return: ordinal suffix
"""
return "th" if 11 <= day <= 13 else {1: "st", 2: "nd", 3: "rd"}.get(day % 10, "th")
def ordinal_date_formatter(date_format_required: str, date_to_be_formatted: datetime):
"""
This function takes the required output format and date to be formatted and returns the ordinal date in required
format.
:param: date_format_required: output format in which date should be returned
:param: date_to_be_formatted: the datetime object which needs ordinal date
:return: formatted date
"""
return date_to_be_formatted.strftime(date_format_required).replace(
"{S}", str(date_to_be_formatted.day) + suffix(date_to_be_formatted.day)
)
def due_date_converter(date: datetime) -> str:
"""
This function provides the custom due date based on the difference between now and the date passed.
The logic for the due date is based on the following.
Due date is today - Due today
Due date is tomorrow - Due tomorrow
Due date is 2-28 days - Due in X days
Due date is 29-59 days - Due in a month
Due date is 60-89 days - Due in 2 months
Due date is 90-119 days - Due in 3 months
Due date is 120+ days - Due in over 3 months
:param: date: the datetime date for which due date is to be evaluated.
:return: due date
"""
now = datetime.now()
event_day = datetime(date.year, date.month, date.day)
today = return_date_time(now)
tomorrow = return_date_time(now + timedelta(days=1))
day_after = return_date_time(now + timedelta(days=2))
a_month = return_date_time(now + timedelta(days=29))
two_months = return_date_time(now + timedelta(days=60))
three_months = return_date_time(now + timedelta(days=90))
four_months = return_date_time(now + timedelta(days=120))
if event_day == today:
return "Due today"
if event_day == tomorrow:
return "Due tomorrow"
if day_after <= event_day < a_month:
delta = date.replace(tzinfo=None) - now.replace(tzinfo=None)
return f"Due in {delta.days} days"
if a_month <= event_day < two_months:
return "Due in a month"
if two_months <= event_day < three_months:
return "Due in 2 months"
if three_months <= event_day < four_months:
return "Due in 3 months"
if four_months <= event_day:
return "Due in over 3 months"
def return_date_time(timedelta_now: datetime) -> datetime:
"""
This function is a part of the refactor code to return date time in the following format
:timedelta_now: datetime with delta
:return:datetime
"""
return datetime(timedelta_now.year, timedelta_now.month, timedelta_now.day)
|
[
"frontstage.exceptions.exceptions.ApiError",
"iso8601.parse_date",
"datetime.datetime",
"datetime.timedelta",
"requests.get",
"datetime.datetime.now",
"logging.getLogger"
] |
[((264, 291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'import logging\n'), ((597, 645), 'requests.get', 'requests.get', (['url'], {'auth': "app.config['BASIC_AUTH']"}), "(url, auth=app.config['BASIC_AUTH'])\n", (609, 645), False, 'import requests\n'), ((1507, 1555), 'requests.get', 'requests.get', (['url'], {'auth': "app.config['BASIC_AUTH']"}), "(url, auth=app.config['BASIC_AUTH'])\n", (1519, 1555), False, 'import requests\n'), ((2329, 2364), 'requests.get', 'requests.get', (['url'], {'auth': 'collex_auth'}), '(url, auth=collex_auth)\n', (2341, 2364), False, 'import requests\n'), ((5207, 5221), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5219, 5221), False, 'from datetime import datetime, timedelta\n'), ((5238, 5279), 'datetime.datetime', 'datetime', (['date.year', 'date.month', 'date.day'], {}), '(date.year, date.month, date.day)\n', (5246, 5279), False, 'from datetime import datetime, timedelta\n'), ((6486, 6554), 'datetime.datetime', 'datetime', (['timedelta_now.year', 'timedelta_now.month', 'timedelta_now.day'], {}), '(timedelta_now.year, timedelta_now.month, timedelta_now.day)\n', (6494, 6554), False, 'from datetime import datetime, timedelta\n'), ((858, 884), 'frontstage.exceptions.exceptions.ApiError', 'ApiError', (['logger', 'response'], {}), '(logger, response)\n', (866, 884), False, 'from frontstage.exceptions.exceptions import ApiError\n'), ((1775, 1801), 'frontstage.exceptions.exceptions.ApiError', 'ApiError', (['logger', 'response'], {}), '(logger, response)\n', (1783, 1801), False, 'from frontstage.exceptions.exceptions import ApiError\n'), ((2563, 2589), 'frontstage.exceptions.exceptions.ApiError', 'ApiError', (['logger', 'response'], {}), '(logger, response)\n', (2571, 2589), False, 'from frontstage.exceptions.exceptions import ApiError\n'), ((3267, 3297), 'iso8601.parse_date', 'parse_date', (["event['timestamp']"], {}), "(event['timestamp'])\n", (3277, 3297), False, 'from iso8601 import ParseError, parse_date\n'), ((5352, 5369), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5361, 5369), False, 'from datetime import datetime, timedelta\n'), ((5410, 5427), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (5419, 5427), False, 'from datetime import datetime, timedelta\n'), ((5466, 5484), 'datetime.timedelta', 'timedelta', ([], {'days': '(29)'}), '(days=29)\n', (5475, 5484), False, 'from datetime import datetime, timedelta\n'), ((5526, 5544), 'datetime.timedelta', 'timedelta', ([], {'days': '(60)'}), '(days=60)\n', (5535, 5544), False, 'from datetime import datetime, timedelta\n'), ((5588, 5606), 'datetime.timedelta', 'timedelta', ([], {'days': '(90)'}), '(days=90)\n', (5597, 5606), False, 'from datetime import datetime, timedelta\n'), ((5649, 5668), 'datetime.timedelta', 'timedelta', ([], {'days': '(120)'}), '(days=120)\n', (5658, 5668), False, 'from datetime import datetime, timedelta\n'), ((3549, 3563), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3561, 3563), False, 'from datetime import datetime, timedelta\n')]
|
import pandas as pd
class LoadCSV():
'''Class for loading a CSV file into a Pandas Dataframe'''
def __init__(self, data_path, encoding=None):
'''
Initialization Method
:param data_path: local disk path where the data is located
:param encoding: CSV encoding
#Python standard encodings list. Source: https://docs.python.org/3/library/codecs.html#standard-encodings
'''
self.data_path = data_path
self.encoding = encoding
self.df = pd.DataFrame()
super().__init__()
def __call__(self):
'''
Perform data activity here
:return: dataframe object
'''
applicable_encodings=[]
if self.encoding is None:
applicable_encodings=self.detect_encoding(file_name=self.data_path)
else:
applicable_encodings.append(self.encoding)
for l,l_encoding in enumerate(applicable_encodings):
try:
self.df = pd.read_csv(self.data_path, encoding=l_encoding)
#if encoding was sucessfuly read break from the applicable_encodings loop
break
except Exception as e:
print(e)
return self.df
def detect_encoding(self,file_name):
'''
This function detects the Python standard encoding of an input file.
:param file_name: Input file name in column format (.csv)
:return: list of applicable encodings for this file
'''
#Python standard encodings list. Source: https://docs.python.org/3/library/codecs.html#standard-encodings
encoding_list = ['utf-8', 'ISO-8859-1','utf_8_sig', 'ascii', 'latin-1', 'cp-424', 'big5', 'big5hkscs', 'cp037', 'cp273', 'cp424', 'cp437',
'cp500', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861',
'cp862','cp863', 'cp864', 'cp865', 'cp866', 'cp869', 'cp874', 'cp875', 'cp932', 'cp949', 'cp950', 'cp1006',
'cp1026', 'cp1125', 'cp1140', 'cp1250', 'cp1251', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
'cp1257', 'cp1257', 'cp65001', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr', 'gb2312', 'gbk',
'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_2004', 'iso2022_jp_3',
'iso2022_jp_ext', 'iso2022_kr', 'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_8', 'iso8859_9', 'iso8859_10', 'iso8859_11', 'iso8859_12', 'iso8859_13', 'iso8859_14',
'iso8859_15', 'iso8859_16', 'johab', 'koi8_r', 'koi8_t', 'koi8_u', 'kz1048', 'mac_cyrillic', 'mac_greek',
'mac_iceland', 'mac_latin2', 'mac_roman', 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004',
'shift_jisx0213', 'utf_32', 'utf_32_be', 'utf_32_le', 'utf_16', 'utf_16_be', 'utf_16_le', 'utf_7']
valid_encodings=[]
for i,l_encoding in enumerate(encoding_list):
try:
df = pd.read_csv(file_name, encoding=l_encoding)
valid_encodings.append(l_encoding)
break
except Exception as e:
print('detect_encoding(): Encoding '+l_encoding+' is not applicable')
return valid_encodings
|
[
"pandas.DataFrame",
"pandas.read_csv"
] |
[((511, 525), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (523, 525), True, 'import pandas as pd\n'), ((991, 1039), 'pandas.read_csv', 'pd.read_csv', (['self.data_path'], {'encoding': 'l_encoding'}), '(self.data_path, encoding=l_encoding)\n', (1002, 1039), True, 'import pandas as pd\n'), ((3161, 3204), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'encoding': 'l_encoding'}), '(file_name, encoding=l_encoding)\n', (3172, 3204), True, 'import pandas as pd\n')]
|
"""
Exposes regular rest commands as services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/hassio/
"""
import asyncio
import logging
import os
import aiohttp
from aiohttp import web
from aiohttp.web_exceptions import HTTPBadGateway
import async_timeout
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.components.http import HomeAssistantView
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
DOMAIN = 'hassio'
DEPENDENCIES = ['http']
_LOGGER = logging.getLogger(__name__)
LONG_TASK_TIMEOUT = 900
DEFAULT_TIMEOUT = 10
SERVICE_HOST_SHUTDOWN = 'host_shutdown'
SERVICE_HOST_REBOOT = 'host_reboot'
SERVICE_HOST_UPDATE = 'host_update'
SERVICE_HOMEASSISTANT_UPDATE = 'homeassistant_update'
SERVICE_SUPERVISOR_UPDATE = 'supervisor_update'
SERVICE_SUPERVISOR_RELOAD = 'supervisor_reload'
SERVICE_ADDON_INSTALL = 'addon_install'
SERVICE_ADDON_UNINSTALL = 'addon_uninstall'
SERVICE_ADDON_UPDATE = 'addon_update'
SERVICE_ADDON_START = 'addon_start'
SERVICE_ADDON_STOP = 'addon_stop'
ATTR_ADDON = 'addon'
ATTR_VERSION = 'version'
SCHEMA_SERVICE_UPDATE = vol.Schema({
vol.Optional(ATTR_VERSION): cv.string,
})
SCHEMA_SERVICE_ADDONS = vol.Schema({
vol.Required(ATTR_ADDON): cv.slug,
})
SCHEMA_SERVICE_ADDONS_VERSION = SCHEMA_SERVICE_ADDONS.extend({
vol.Optional(ATTR_VERSION): cv.string,
})
SERVICE_MAP = {
SERVICE_HOST_SHUTDOWN: None,
SERVICE_HOST_REBOOT: None,
SERVICE_HOST_UPDATE: SCHEMA_SERVICE_UPDATE,
SERVICE_HOMEASSISTANT_UPDATE: SCHEMA_SERVICE_UPDATE,
SERVICE_SUPERVISOR_UPDATE: SCHEMA_SERVICE_UPDATE,
SERVICE_SUPERVISOR_RELOAD: None,
SERVICE_ADDON_INSTALL: SCHEMA_SERVICE_ADDONS_VERSION,
SERVICE_ADDON_UNINSTALL: SCHEMA_SERVICE_ADDONS,
SERVICE_ADDON_START: SCHEMA_SERVICE_ADDONS,
SERVICE_ADDON_STOP: SCHEMA_SERVICE_ADDONS,
SERVICE_ADDON_UPDATE: SCHEMA_SERVICE_ADDONS_VERSION,
}
@asyncio.coroutine
def async_setup(hass, config):
"""Setup the hassio component."""
try:
host = os.environ['HASSIO']
except KeyError:
_LOGGER.error("No HassIO supervisor detect!")
return False
websession = async_get_clientsession(hass)
hassio = HassIO(hass.loop, websession, host)
api_ok = yield from hassio.is_connected()
if not api_ok:
_LOGGER.error("Not connected with HassIO!")
return False
# register base api views
for base in ('host', 'homeassistant'):
hass.http.register_view(HassIOBaseView(hassio, base))
for base in ('supervisor', 'network'):
hass.http.register_view(HassIOBaseEditView(hassio, base))
# register view for addons
hass.http.register_view(HassIOAddonsView(hassio))
@asyncio.coroutine
def async_service_handler(service):
"""Handle HassIO service calls."""
addon = service.data.get(ATTR_ADDON)
if ATTR_VERSION in service.data:
version = {ATTR_VERSION: service.data[ATTR_VERSION]}
else:
version = None
# map to api call
if service.service == SERVICE_HOST_UPDATE:
yield from hassio.send_command(
"/host/update", payload=version)
elif service.service == SERVICE_HOST_REBOOT:
yield from hassio.send_command("/host/reboot")
elif service.service == SERVICE_HOST_SHUTDOWN:
yield from hassio.send_command("/host/shutdown")
elif service.service == SERVICE_SUPERVISOR_UPDATE:
yield from hassio.send_command(
"/supervisor/update", payload=version)
elif service.service == SERVICE_SUPERVISOR_RELOAD:
yield from hassio.send_command(
"/supervisor/reload", timeout=LONG_TASK_TIMEOUT)
elif service.service == SERVICE_HOMEASSISTANT_UPDATE:
yield from hassio.send_command(
"/homeassistant/update", payload=version,
timeout=LONG_TASK_TIMEOUT)
elif service.service == SERVICE_ADDON_INSTALL:
yield from hassio.send_command(
"/addons/{}/install".format(addon), payload=version,
timeout=LONG_TASK_TIMEOUT)
elif service.service == SERVICE_ADDON_UNINSTALL:
yield from hassio.send_command(
"/addons/{}/uninstall".format(addon))
elif service.service == SERVICE_ADDON_START:
yield from hassio.send_command("/addons/{}/start".format(addon))
elif service.service == SERVICE_ADDON_STOP:
yield from hassio.send_command(
"/addons/{}/stop".format(addon), timeout=LONG_TASK_TIMEOUT)
elif service.service == SERVICE_ADDON_UPDATE:
yield from hassio.send_command(
"/addons/{}/update".format(addon), payload=version,
timeout=LONG_TASK_TIMEOUT)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
for service, schema in SERVICE_MAP.items():
hass.services.async_register(
DOMAIN, service, async_service_handler,
descriptions[DOMAIN][service], schema=schema)
return True
class HassIO(object):
"""Small API wrapper for HassIO."""
def __init__(self, loop, websession, ip):
"""Initialze HassIO api."""
self.loop = loop
self.websession = websession
self._ip = ip
def is_connected(self):
"""Return True if it connected to HassIO supervisor.
Return a coroutine.
"""
return self.send_command("/supervisor/ping")
@asyncio.coroutine
def send_command(self, cmd, payload=None, timeout=DEFAULT_TIMEOUT):
"""Send request to API."""
answer = yield from self.send_raw(
cmd, payload=payload, timeout=timeout
)
if answer and answer['result'] == 'ok':
return answer['data'] if answer['data'] else True
_LOGGER.error("%s return error %s.", cmd, answer['message'])
return False
@asyncio.coroutine
def send_raw(self, cmd, payload=None, timeout=DEFAULT_TIMEOUT):
"""Send raw request to API."""
try:
with async_timeout.timeout(timeout, loop=self.loop):
request = yield from self.websession.get(
"http://{}{}".format(self._ip, cmd),
timeout=None, json=payload
)
if request.status != 200:
_LOGGER.error("%s return code %d.", cmd, request.status)
return
return (yield from request.json())
except asyncio.TimeoutError:
_LOGGER.error("Timeout on api request %s.", cmd)
except aiohttp.ClientError:
_LOGGER.error("Client error on api request %s.", cmd)
class HassIOBaseView(HomeAssistantView):
"""HassIO view to handle base part."""
requires_auth = True
def __init__(self, hassio, base):
"""Initialize a hassio base view."""
self.hassio = hassio
self._url_info = "/{}/info".format(base)
self.url = "/api/hassio/{}".format(base)
self.name = "api:hassio:{}".format(base)
@asyncio.coroutine
def get(self, request):
"""Get base data."""
data = yield from self.hassio.send_command(self._url_info)
if not data:
raise HTTPBadGateway()
return web.json_response(data)
class HassIOBaseEditView(HassIOBaseView):
"""HassIO view to handle base with options support."""
def __init__(self, hassio, base):
"""Initialize a hassio base edit view."""
super().__init__(hassio, base)
self._url_options = "/{}/options".format(base)
@asyncio.coroutine
def post(self, request):
"""Set options on host."""
data = yield from request.json()
response = yield from self.hassio.send_raw(
self._url_options, payload=data)
if not response:
raise HTTPBadGateway()
return web.json_response(response)
class HassIOAddonsView(HomeAssistantView):
"""HassIO view to handle addons part."""
requires_auth = True
url = "/api/hassio/addons/{addon}"
name = "api:hassio:addons"
def __init__(self, hassio):
"""Initialize a hassio addon view."""
self.hassio = hassio
@asyncio.coroutine
def get(self, request, addon):
"""Get addon data."""
data = yield from self.hassio.send_command(
"/addons/{}/info".format(addon))
if not data:
raise HTTPBadGateway()
return web.json_response(data)
@asyncio.coroutine
def post(self, request, addon):
"""Set options on host."""
data = yield from request.json()
response = yield from self.hassio.send_raw(
"/addons/{}/options".format(addon), payload=data)
if not response:
raise HTTPBadGateway()
return web.json_response(response)
|
[
"voluptuous.Optional",
"aiohttp.web_exceptions.HTTPBadGateway",
"voluptuous.Required",
"os.path.dirname",
"aiohttp.web.json_response",
"async_timeout.timeout",
"logging.getLogger",
"homeassistant.helpers.aiohttp_client.async_get_clientsession"
] |
[((644, 671), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (661, 671), False, 'import logging\n'), ((2289, 2318), 'homeassistant.helpers.aiohttp_client.async_get_clientsession', 'async_get_clientsession', (['hass'], {}), '(hass)\n', (2312, 2318), False, 'from homeassistant.helpers.aiohttp_client import async_get_clientsession\n'), ((1266, 1292), 'voluptuous.Optional', 'vol.Optional', (['ATTR_VERSION'], {}), '(ATTR_VERSION)\n', (1278, 1292), True, 'import voluptuous as vol\n'), ((1350, 1374), 'voluptuous.Required', 'vol.Required', (['ATTR_ADDON'], {}), '(ATTR_ADDON)\n', (1362, 1374), True, 'import voluptuous as vol\n'), ((1456, 1482), 'voluptuous.Optional', 'vol.Optional', (['ATTR_VERSION'], {}), '(ATTR_VERSION)\n', (1468, 1482), True, 'import voluptuous as vol\n'), ((7558, 7581), 'aiohttp.web.json_response', 'web.json_response', (['data'], {}), '(data)\n', (7575, 7581), False, 'from aiohttp import web\n'), ((8170, 8197), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (8187, 8197), False, 'from aiohttp import web\n'), ((8749, 8772), 'aiohttp.web.json_response', 'web.json_response', (['data'], {}), '(data)\n', (8766, 8772), False, 'from aiohttp import web\n'), ((9099, 9126), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (9116, 9126), False, 'from aiohttp import web\n'), ((7526, 7542), 'aiohttp.web_exceptions.HTTPBadGateway', 'HTTPBadGateway', ([], {}), '()\n', (7540, 7542), False, 'from aiohttp.web_exceptions import HTTPBadGateway\n'), ((8138, 8154), 'aiohttp.web_exceptions.HTTPBadGateway', 'HTTPBadGateway', ([], {}), '()\n', (8152, 8154), False, 'from aiohttp.web_exceptions import HTTPBadGateway\n'), ((8717, 8733), 'aiohttp.web_exceptions.HTTPBadGateway', 'HTTPBadGateway', ([], {}), '()\n', (8731, 8733), False, 'from aiohttp.web_exceptions import HTTPBadGateway\n'), ((9067, 9083), 'aiohttp.web_exceptions.HTTPBadGateway', 'HTTPBadGateway', ([], {}), '()\n', (9081, 9083), False, 'from aiohttp.web_exceptions import HTTPBadGateway\n'), ((5067, 5092), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5082, 5092), False, 'import os\n'), ((6337, 6383), 'async_timeout.timeout', 'async_timeout.timeout', (['timeout'], {'loop': 'self.loop'}), '(timeout, loop=self.loop)\n', (6358, 6383), False, 'import async_timeout\n')]
|
# terraform-provider-vcloud-director
# Copyright (c) 2017 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
from setuptools import setup
setup(
setup_requires=['pbr>=1.9', 'setuptools>=17.1'],
pbr=True,
)
|
[
"setuptools.setup"
] |
[((163, 227), 'setuptools.setup', 'setup', ([], {'setup_requires': "['pbr>=1.9', 'setuptools>=17.1']", 'pbr': '(True)'}), "(setup_requires=['pbr>=1.9', 'setuptools>=17.1'], pbr=True)\n", (168, 227), False, 'from setuptools import setup\n')]
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE
"""
Source and Resource for plain file handle physical I/O.
"""
from __future__ import absolute_import
import os
import os.path
import uproot4.source.chunk
import uproot4.source.futures
class FileResource(uproot4.source.chunk.Resource):
"""
Resource wrapping a plain file handle.
"""
def __init__(self, file_path):
self._file_path = file_path
self._file = open(self._file_path, "rb")
@property
def file_path(self):
return self._file_path
@property
def file(self):
return self._file
@property
def closed(self):
return self._file.closed
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self._file.__exit__(exception_type, exception_value, traceback)
def get(self, start, stop):
self._file.seek(start)
return self._file.read(stop - start)
class FileSource(uproot4.source.chunk.MultithreadedSource):
"""
Source managing one synchronous or multiple asynchronous file handles as a
context manager.
"""
def __init__(self, file_path, **options):
"""
Args:
file_path (str): Path to the file.
num_workers (int): If 0, one synchronous ResourceExecutor is
created; if 1 or more, a collection of asynchronous
ThreadResourceExecutors are created.
"""
num_workers = options["num_workers"]
self._num_requests = 0
self._num_requested_chunks = 0
self._num_requested_bytes = 0
self._file_path = file_path
self._resource = FileResource(file_path)
self._num_bytes = os.path.getsize(self._file_path)
if num_workers == 0:
self._executor = uproot4.source.futures.ResourceExecutor(self._resource)
else:
self._executor = uproot4.source.futures.ThreadResourceExecutor(
[FileResource(file_path) for x in range(num_workers)]
)
@property
def num_bytes(self):
"""
The number of bytes in the file.
"""
return self._num_bytes
|
[
"os.path.getsize"
] |
[((1776, 1808), 'os.path.getsize', 'os.path.getsize', (['self._file_path'], {}), '(self._file_path)\n', (1791, 1808), False, 'import os\n')]
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('paste_content')
@click.argument("content", type=str)
@click.argument("history_id", type=str)
@pass_context
@custom_exception
@dict_output
def cli(ctx, content, history_id):
"""Upload a string to a new dataset in the history specified by ``history_id``.
Output:
"""
return ctx.gi.tools.paste_content(content, history_id)
|
[
"click.argument",
"click.command"
] |
[((135, 165), 'click.command', 'click.command', (['"""paste_content"""'], {}), "('paste_content')\n", (148, 165), False, 'import click\n'), ((167, 202), 'click.argument', 'click.argument', (['"""content"""'], {'type': 'str'}), "('content', type=str)\n", (181, 202), False, 'import click\n'), ((204, 242), 'click.argument', 'click.argument', (['"""history_id"""'], {'type': 'str'}), "('history_id', type=str)\n", (218, 242), False, 'import click\n')]
|
from gtts import gTTS
import os
tts = gTTS(text='Hello sir, how can I help you?', lang='en')
tts.save("good.mp3")
os.system("mpg321 good.mp3")
|
[
"gtts.gTTS",
"os.system"
] |
[((38, 92), 'gtts.gTTS', 'gTTS', ([], {'text': '"""Hello sir, how can I help you?"""', 'lang': '"""en"""'}), "(text='Hello sir, how can I help you?', lang='en')\n", (42, 92), False, 'from gtts import gTTS\n'), ((114, 142), 'os.system', 'os.system', (['"""mpg321 good.mp3"""'], {}), "('mpg321 good.mp3')\n", (123, 142), False, 'import os\n')]
|
"""Using as Instance properties"""
import weakref
class IntegerValue:
def __init__(self):
self.values = weakref.WeakKeyDictionary()
def __set__(self, instance, value):
self.values[instance] = int(value)
def __get__(self, instance, owner):
if instance is None:
return self
else:
return self.values.get(instance)
class Point:
x = IntegerValue()
p = Point()
p.x = 100.1
print(p.x)
class IntegerValue2:
"""This solves the problem if the hash-ability of the class
- However: when delete, the ID is still in the dictionary
"""
def __init__(self):
self.values = weakref.WeakKeyDictionary()
def __set__(self, instance, value):
self.values[id(instance)] = int(value)
def __get__(self, instance, owner):
if instance is None:
return self
else:
return self.values.get(id(instance))
class Point2:
x = IntegerValue2()
p1 = Point()
p1.x = 100.1
print(p1.x)
class IntegerValue3:
"""This solves the problem if the hash-ability of the class
& when delete, the ID is still deleted by a callback function
"""
def __init__(self):
self.values = {}
def __set__(self, instance, value):
self.values[id(instance)] = (weakref.ref(instance, self._remove_object),
int(value))
def __get__(self, instance, owner):
if instance is None:
return self
else:
return self.values[id(instance)][1]
def _remove_object(self, weak_ref):
print(f'remove dead entry for {weak_ref}')
reverse_lookup = [key for key, value in self.values.items()
if value[0] is weak_ref]
if reverse_lookup:
key = reverse_lookup[0]
del self.values[key]
class Point3:
x = IntegerValue3()
p3 = Point3()
p3.x = 100.1
print(p3.x)
del p3
print(Point3.x.values)
class ValidString:
def __init__(self, min_length=0, max_length=255):
self.data = {}
self._min_length = min_length
self._max_length = max_length
def __set__(self, instance, value):
if not isinstance(value, str):
raise ValueError('Value must be a string')
if len(value) < self._min_length:
raise ValueError(f'Value should be at least '
f'{self._min_length} chars')
if len(value) > self._max_length:
raise ValueError(f'Value cannot exceed {self._max_length} chars')
self.data[id(instance)] = (weakref.ref(instance,
self._finalize_instance),
value)
def __get__(self, instance, owner):
if instance is None:
return self
else:
value_tuple = self.data.get(id(instance))
return value_tuple[1]
def _finalize_instance(self, weak_ref):
reverse_lookup = [key for key, value in self.data.items()
if value[0] is weak_ref]
if reverse_lookup:
key = reverse_lookup[0]
del self.data[key]
class Person:
first_name = ValidString(5, 255)
last_name = ValidString(5, 255)
def __eq__(self, other):
return isinstance(other, Person) and self.first_name == other.first_name
p1 = Person()
p1.first_name = 'Tancdasc'
print(p1.first_name)
|
[
"weakref.ref",
"weakref.WeakKeyDictionary"
] |
[((118, 145), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (143, 145), False, 'import weakref\n'), ((661, 688), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (686, 688), False, 'import weakref\n'), ((1302, 1344), 'weakref.ref', 'weakref.ref', (['instance', 'self._remove_object'], {}), '(instance, self._remove_object)\n', (1313, 1344), False, 'import weakref\n'), ((2592, 2638), 'weakref.ref', 'weakref.ref', (['instance', 'self._finalize_instance'], {}), '(instance, self._finalize_instance)\n', (2603, 2638), False, 'import weakref\n')]
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Provide a pyqt widget for the FGDC component with a shortname matching this
file's name.
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import os
import platform
import subprocess
from subprocess import Popen
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QMessageBox
from pymdwizard.core import utils
from pymdwizard.gui import wiz_widget
from pymdwizard.gui.ui_files import UI_jupyterstarter
class JupyterStarter(QDialog):
def __init__(
self,
previous_dnames=[],
last_kernel="",
default_kernel="pymdwizard <<default>>",
update_function=None,
parent=None,
):
super(self.__class__, self).__init__(parent=parent)
self.build_ui()
self.connect_events()
self.kernels = {}
self.previous_dnames = previous_dnames
if previous_dnames:
self.ui.dname.setCurrentText(previous_dnames[0])
self.default_kernel = default_kernel
self.populate_kernels()
self.update_function = update_function
for dname in self.previous_dnames:
self.ui.dname.addItem(dname)
if last_kernel and last_kernel in self.kernels:
index = self.ui.kernel.findText(last_kernel)
if index >= 0:
self.ui.kernel.setCurrentIndex(index)
utils.set_window_icon(self)
self.setStyleSheet(wiz_widget.NORMAL_STYLE)
try:
import jupyterlab
self.ui.usejupyterframe.setEnabled(True)
except ImportError:
self.ui.usejupyterframe.setEnabled(False)
def build_ui(self):
"""
Build and modify this widget's GUI
Returns
-------
None
"""
self.ui = UI_jupyterstarter.Ui_Form()
self.ui.setupUi(self)
def populate_kernels(self):
self.ui.kernel.addItem("pymdwizard <<default>>")
self.kernels["pymdwizard <<default>>"] = utils.get_install_dname("python")
try:
conda_exe = os.path.join(self.get_conda_root()[0], "Scripts", "conda.exe")
if os.path.exists(conda_exe):
kernels = subprocess.check_output([conda_exe, "env", "list"])
else:
kernels = subprocess.check_output(["conda", "env", "list"])
for line in kernels.split(b"\n"):
if line and not line.strip().startswith(b"#"):
try:
parts = line.split()
if parts[1] == b"*":
parts = [parts[0], parts[2]]
name, path = parts
self.ui.kernel.addItem(str(name)[2:-1])
self.kernels[str(name)[2:-1]] = str(path)
except (ValueError, IndexError):
pass
except:
pass
def connect_events(self):
"""
Connect the appropriate GUI components with the corresponding functions
Returns
-------
None
"""
self.ui.btn_browse.clicked.connect(self.browse)
self.ui.btn_cancel.clicked.connect(self.close_form)
self.ui.btn_launch.clicked.connect(self.launch)
def browse(self):
if not self.previous_dnames:
dname = ""
else:
dname = self.previous_dnames[0]
jupyter_dname = QFileDialog.getExistingDirectory(
self, "Select Directory to launch Jupyter from", dname
)
if jupyter_dname:
self.ui.dname.setCurrentText(jupyter_dname)
def get_conda_root(self):
try:
from conda.core.envs_manager import list_all_known_prefixes
prefixes = list_all_known_prefixes()
return prefixes[0], os.path.join(prefixes[0], "envs")
except:
try:
conda_info = subprocess.check_output(["conda", "info"]).decode("utf-8")
info = {}
for line in conda_info.split("\n")[1:]:
try:
key, value = line.strip().split(" : ")
info[key] = value
except ValueError:
pass
envs_dname = info["envs directories"]
try:
root_dname = (
info["root environment"].replace("(writable)", "").strip()
)
except KeyError:
root_dname = (
info["base environment"].replace("(writable)", "").strip()
)
return str(root_dname), str(envs_dname)
except:
return "", ""
def launch(self):
jupyter_dname = self.ui.dname.currentText()
if not os.path.exists(jupyter_dname):
msg = "The selected directory to lauch jupyter in does not exists."
msg += "\nPlease check the location before launching Jupyter."
QMessageBox.information(self, "Missing Directory", msg)
return
if self.ui.kernel.currentText() == self.default_kernel:
# this is pymdwizard specific
python_dir = utils.get_install_dname("python")
if platform.system() == "Darwin":
jupyterexe = os.path.join(python_dir, "jupyter")
my_env = os.environ.copy()
my_env["PYTHONPATH"] = os.path.join(python_dir, "python")
p = Popen([jupyterexe, "notebook"], cwd=jupyter_dname)
else:
root_dir = utils.get_install_dname("root")
jupyterexe = os.path.join(python_dir, "scripts", "jupyter.exe")
my_env = os.environ.copy()
my_env["PYTHONPATH"] = os.path.join(root_dir, "Python36_64")
if self.ui.usejupyterlab.isChecked():
jupytertype = "lab"
else:
jupytertype = "notebook"
p = Popen([jupyterexe, jupytertype], cwd=jupyter_dname, env=my_env)
else:
root_dname, envs_dname = self.get_conda_root()
activate_fname = os.path.join(root_dname, "Scripts", "activate.bat")
jupyter_exe_name = os.path.join(
envs_dname, self.ui.kernel.currentText(), "Scripts", "jupyter.exe"
)
if not os.path.exists(jupyter_exe_name):
jupyter_exe_name = os.path.join(root_dname, "Scripts", "jupyter.exe")
if self.ui.usejupyterlab.isChecked():
jupytertype = "lab"
else:
jupytertype = "notebook"
if self.ui.kernel.currentText() == "root":
cmds = ['"{}"'.format(jupyter_exe_name), jupytertype]
else:
cmds = [
'"{}"'.format(activate_fname),
self.ui.kernel.currentText(),
"&&",
'"{}"'.format(jupyter_exe_name),
jupytertype,
]
Popen(" ".join(cmds), cwd=jupyter_dname)
msg = "Jupyter launching...\nJupyter will start momentarily in a new tab in your default internet browser."
QMessageBox.information(self, "Launching Jupyter", msg)
self.update_function(self.ui.kernel.currentText(), self.ui.dname.currentText())
self.close()
def close_form(self):
self.parent = None
self.deleteLater()
self.close()
if __name__ == "__main__":
utils.launch_widget(
JupyterStarter, "JupyterStarter", previous_dnames=[r"c:\temp", r"c:\temp\junk"]
)
|
[
"subprocess.Popen",
"pymdwizard.gui.ui_files.UI_jupyterstarter.Ui_Form",
"pymdwizard.core.utils.get_install_dname",
"PyQt5.QtWidgets.QFileDialog.getExistingDirectory",
"subprocess.check_output",
"os.environ.copy",
"os.path.exists",
"pymdwizard.core.utils.set_window_icon",
"conda.core.envs_manager.list_all_known_prefixes",
"pymdwizard.core.utils.launch_widget",
"platform.system",
"PyQt5.QtWidgets.QMessageBox.information",
"os.path.join"
] |
[((9744, 9850), 'pymdwizard.core.utils.launch_widget', 'utils.launch_widget', (['JupyterStarter', '"""JupyterStarter"""'], {'previous_dnames': "['c:\\\\temp', 'c:\\\\temp\\\\junk']"}), "(JupyterStarter, 'JupyterStarter', previous_dnames=[\n 'c:\\\\temp', 'c:\\\\temp\\\\junk'])\n", (9763, 9850), False, 'from pymdwizard.core import utils\n'), ((3557, 3584), 'pymdwizard.core.utils.set_window_icon', 'utils.set_window_icon', (['self'], {}), '(self)\n', (3578, 3584), False, 'from pymdwizard.core import utils\n'), ((3973, 4000), 'pymdwizard.gui.ui_files.UI_jupyterstarter.Ui_Form', 'UI_jupyterstarter.Ui_Form', ([], {}), '()\n', (3998, 4000), False, 'from pymdwizard.gui.ui_files import UI_jupyterstarter\n'), ((4170, 4203), 'pymdwizard.core.utils.get_install_dname', 'utils.get_install_dname', (['"""python"""'], {}), "('python')\n", (4193, 4203), False, 'from pymdwizard.core import utils\n'), ((5607, 5699), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', (['self', '"""Select Directory to launch Jupyter from"""', 'dname'], {}), "(self,\n 'Select Directory to launch Jupyter from', dname)\n", (5639, 5699), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((9443, 9498), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Launching Jupyter"""', 'msg'], {}), "(self, 'Launching Jupyter', msg)\n", (9466, 9498), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((4320, 4345), 'os.path.exists', 'os.path.exists', (['conda_exe'], {}), '(conda_exe)\n', (4334, 4345), False, 'import os\n'), ((5940, 5965), 'conda.core.envs_manager.list_all_known_prefixes', 'list_all_known_prefixes', ([], {}), '()\n', (5963, 5965), False, 'from conda.core.envs_manager import list_all_known_prefixes\n'), ((7020, 7049), 'os.path.exists', 'os.path.exists', (['jupyter_dname'], {}), '(jupyter_dname)\n', (7034, 7049), False, 'import os\n'), ((7218, 7273), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Missing Directory"""', 'msg'], {}), "(self, 'Missing Directory', msg)\n", (7241, 7273), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((7426, 7459), 'pymdwizard.core.utils.get_install_dname', 'utils.get_install_dname', (['"""python"""'], {}), "('python')\n", (7449, 7459), False, 'from pymdwizard.core import utils\n'), ((8386, 8437), 'os.path.join', 'os.path.join', (['root_dname', '"""Scripts"""', '"""activate.bat"""'], {}), "(root_dname, 'Scripts', 'activate.bat')\n", (8398, 8437), False, 'import os\n'), ((4373, 4424), 'subprocess.check_output', 'subprocess.check_output', (["[conda_exe, 'env', 'list']"], {}), "([conda_exe, 'env', 'list'])\n", (4396, 4424), False, 'import subprocess\n'), ((4469, 4518), 'subprocess.check_output', 'subprocess.check_output', (["['conda', 'env', 'list']"], {}), "(['conda', 'env', 'list'])\n", (4492, 4518), False, 'import subprocess\n'), ((5998, 6031), 'os.path.join', 'os.path.join', (['prefixes[0]', '"""envs"""'], {}), "(prefixes[0], 'envs')\n", (6010, 6031), False, 'import os\n'), ((7475, 7492), 'platform.system', 'platform.system', ([], {}), '()\n', (7490, 7492), False, 'import platform\n'), ((7535, 7570), 'os.path.join', 'os.path.join', (['python_dir', '"""jupyter"""'], {}), "(python_dir, 'jupyter')\n", (7547, 7570), False, 'import os\n'), ((7596, 7613), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (7611, 7613), False, 'import os\n'), ((7653, 7687), 'os.path.join', 'os.path.join', (['python_dir', '"""python"""'], {}), "(python_dir, 'python')\n", (7665, 7687), False, 'import os\n'), ((7708, 7758), 'subprocess.Popen', 'Popen', (["[jupyterexe, 'notebook']"], {'cwd': 'jupyter_dname'}), "([jupyterexe, 'notebook'], cwd=jupyter_dname)\n", (7713, 7758), False, 'from subprocess import Popen\n'), ((7804, 7835), 'pymdwizard.core.utils.get_install_dname', 'utils.get_install_dname', (['"""root"""'], {}), "('root')\n", (7827, 7835), False, 'from pymdwizard.core import utils\n'), ((7866, 7916), 'os.path.join', 'os.path.join', (['python_dir', '"""scripts"""', '"""jupyter.exe"""'], {}), "(python_dir, 'scripts', 'jupyter.exe')\n", (7878, 7916), False, 'import os\n'), ((7942, 7959), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (7957, 7959), False, 'import os\n'), ((7999, 8036), 'os.path.join', 'os.path.join', (['root_dir', '"""Python36_64"""'], {}), "(root_dir, 'Python36_64')\n", (8011, 8036), False, 'import os\n'), ((8219, 8282), 'subprocess.Popen', 'Popen', (['[jupyterexe, jupytertype]'], {'cwd': 'jupyter_dname', 'env': 'my_env'}), '([jupyterexe, jupytertype], cwd=jupyter_dname, env=my_env)\n', (8224, 8282), False, 'from subprocess import Popen\n'), ((8599, 8631), 'os.path.exists', 'os.path.exists', (['jupyter_exe_name'], {}), '(jupyter_exe_name)\n', (8613, 8631), False, 'import os\n'), ((8668, 8718), 'os.path.join', 'os.path.join', (['root_dname', '"""Scripts"""', '"""jupyter.exe"""'], {}), "(root_dname, 'Scripts', 'jupyter.exe')\n", (8680, 8718), False, 'import os\n'), ((6095, 6137), 'subprocess.check_output', 'subprocess.check_output', (["['conda', 'info']"], {}), "(['conda', 'info'])\n", (6118, 6137), False, 'import subprocess\n')]
|
#!/usr/bin/env python3
from setuptools import setup
from sys import version_info
if version_info < (3, 5, 2):
# 3.5.2 is when __aiter__ became a synchronous function
raise SystemExit('Sorry! asks requires python 3.5.2 or later.')
setup(
name='asks',
description='asks - async http',
long_description='asks is an async http lib for curio and trio',
license='MIT',
version='2.0.0',
author='<NAME> - aka theelous3',
url='https://github.com/theelous3/asks',
packages=['asks'],
install_requires=['h11', 'async_generator', 'multio>=0.2.3'],
tests_require=['pytest', 'curio', 'trio'],
classifiers=[
'Programming Language :: Python :: 3',
'Framework :: Trio',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
]
)
|
[
"setuptools.setup"
] |
[((241, 862), 'setuptools.setup', 'setup', ([], {'name': '"""asks"""', 'description': '"""asks - async http"""', 'long_description': '"""asks is an async http lib for curio and trio"""', 'license': '"""MIT"""', 'version': '"""2.0.0"""', 'author': '"""<NAME> - aka theelous3"""', 'url': '"""https://github.com/theelous3/asks"""', 'packages': "['asks']", 'install_requires': "['h11', 'async_generator', 'multio>=0.2.3']", 'tests_require': "['pytest', 'curio', 'trio']", 'classifiers': "['Programming Language :: Python :: 3', 'Framework :: Trio',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Internet :: WWW/HTTP']"}), "(name='asks', description='asks - async http', long_description=\n 'asks is an async http lib for curio and trio', license='MIT', version=\n '2.0.0', author='<NAME> - aka theelous3', url=\n 'https://github.com/theelous3/asks', packages=['asks'],\n install_requires=['h11', 'async_generator', 'multio>=0.2.3'],\n tests_require=['pytest', 'curio', 'trio'], classifiers=[\n 'Programming Language :: Python :: 3', 'Framework :: Trio',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Internet :: WWW/HTTP'])\n", (246, 862), False, 'from setuptools import setup\n')]
|
# Generated by Django 2.2.16 on 2020-09-12 22:38
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Feedback",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.CharField(default="null", max_length=2000)),
("created_on", models.DateTimeField(auto_now_add=True)),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"type",
models.PositiveIntegerField(
choices=[
(1, "Bug Report"),
(2, "Feature Request"),
(3, "General Feedback"),
]
),
),
("url", models.CharField(max_length=200)),
],
options={"verbose_name": "Feedback", "verbose_name_plural": "Feedback"},
)
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((239, 296), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (270, 296), False, 'from django.db import migrations, models\n'), ((464, 557), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (480, 557), False, 'from django.db import migrations, models\n'), ((718, 767), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""null"""', 'max_length': '(2000)'}), "(default='null', max_length=2000)\n", (734, 767), False, 'from django.db import migrations, models\n'), ((801, 840), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (821, 840), False, 'from django.db import migrations, models\n'), ((911, 1018), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL)\n', (928, 1018), False, 'from django.db import migrations, models\n'), ((1196, 1305), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'choices': "[(1, 'Bug Report'), (2, 'Feature Request'), (3, 'General Feedback')]"}), "(choices=[(1, 'Bug Report'), (2,\n 'Feature Request'), (3, 'General Feedback')])\n", (1223, 1305), False, 'from django.db import migrations, models\n'), ((1503, 1535), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1519, 1535), False, 'from django.db import migrations, models\n')]
|
import asyncio
from kuknos_sdk import AiohttpClient, Server
HORIZON_URL = "https://horizon.kuknos.org"
async def payments():
async with Server(HORIZON_URL, AiohttpClient()) as server:
async for payment in server.payments().cursor(cursor="now").stream():
print(f"Payment: {payment}")
async def effects():
async with Server(HORIZON_URL, AiohttpClient()) as server:
async for effect in server.effects().cursor(cursor="now").stream():
print(f"Effect: {effect}")
async def operations():
async with Server(HORIZON_URL, AiohttpClient()) as server:
async for operation in server.operations().cursor(cursor="now").stream():
print(f"Operation: {operation}")
async def transactions():
async with Server(HORIZON_URL, AiohttpClient()) as server:
async for transaction in server.transactions().cursor(cursor="now").stream():
print(f"Transaction: {transaction}")
async def listen():
await asyncio.gather(
payments(),
effects(),
operations(),
transactions()
)
if __name__ == '__main__':
asyncio.run(listen())
|
[
"kuknos_sdk.AiohttpClient"
] |
[((164, 179), 'kuknos_sdk.AiohttpClient', 'AiohttpClient', ([], {}), '()\n', (177, 179), False, 'from kuknos_sdk import AiohttpClient, Server\n'), ((369, 384), 'kuknos_sdk.AiohttpClient', 'AiohttpClient', ([], {}), '()\n', (382, 384), False, 'from kuknos_sdk import AiohttpClient, Server\n'), ((573, 588), 'kuknos_sdk.AiohttpClient', 'AiohttpClient', ([], {}), '()\n', (586, 588), False, 'from kuknos_sdk import AiohttpClient, Server\n'), ((791, 806), 'kuknos_sdk.AiohttpClient', 'AiohttpClient', ([], {}), '()\n', (804, 806), False, 'from kuknos_sdk import AiohttpClient, Server\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2006 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
since 1.9: mutagen.m4a is deprecated; use mutagen.mp4 instead.
since 1.31: mutagen.m4a will no longer work; any operation that could fail
will fail now.
"""
import warnings
from mutagen import FileType, Tags, StreamInfo
from ._util import DictProxy, MutagenError, loadfile
warnings.warn(
"mutagen.m4a is deprecated; use mutagen.mp4 instead.",
DeprecationWarning)
class error(MutagenError):
pass
class M4AMetadataError(error):
pass
class M4AStreamInfoError(error):
pass
class M4AMetadataValueError(error):
pass
__all__ = ['M4A', 'Open', 'delete', 'M4ACover']
class M4ACover(bytes):
FORMAT_JPEG = 0x0D
FORMAT_PNG = 0x0E
def __new__(cls, data, imageformat=None):
self = bytes.__new__(cls, data)
if imageformat is None:
imageformat = M4ACover.FORMAT_JPEG
self.imageformat = imageformat
return self
class M4ATags(DictProxy, Tags):
def load(self, atoms, fileobj):
raise error("deprecated")
def save(self, filename):
raise error("deprecated")
def delete(self, filename):
raise error("deprecated")
def pprint(self):
return u""
class M4AInfo(StreamInfo):
bitrate = 0
def __init__(self, atoms, fileobj):
raise error("deprecated")
def pprint(self):
return u""
class M4A(FileType):
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
@loadfile()
def load(self, filething):
raise error("deprecated")
def add_tags(self):
self.tags = M4ATags()
@staticmethod
def score(filename, fileobj, header):
return 0
Open = M4A
def delete(filename):
raise error("deprecated")
|
[
"warnings.warn"
] |
[((591, 683), 'warnings.warn', 'warnings.warn', (['"""mutagen.m4a is deprecated; use mutagen.mp4 instead."""', 'DeprecationWarning'], {}), "('mutagen.m4a is deprecated; use mutagen.mp4 instead.',\n DeprecationWarning)\n", (604, 683), False, 'import warnings\n')]
|
import argparse
import sys
import asyncio
import sc2
from sc2 import Race
from sc2.player import Bot
from zerg.zerg_rush import ZergRushBot
def main():
portconfig = sc2.portconfig.Portconfig()
print(portconfig.as_json)
player_config = [
Bot(Race.Zerg, ZergRushBot()),
Bot(Race.Zerg, None)
]
for g in sc2.main._host_game_iter(
sc2.maps.get("Abyssal Reef LE"),
player_config,
realtime=False,
portconfig=portconfig
):
print(g)
if __name__ == "__main__":
main()
|
[
"sc2.player.Bot",
"zerg.zerg_rush.ZergRushBot",
"sc2.portconfig.Portconfig",
"sc2.maps.get"
] |
[((173, 200), 'sc2.portconfig.Portconfig', 'sc2.portconfig.Portconfig', ([], {}), '()\n', (198, 200), False, 'import sc2\n'), ((301, 321), 'sc2.player.Bot', 'Bot', (['Race.Zerg', 'None'], {}), '(Race.Zerg, None)\n', (304, 321), False, 'from sc2.player import Bot\n'), ((376, 407), 'sc2.maps.get', 'sc2.maps.get', (['"""Abyssal Reef LE"""'], {}), "('Abyssal Reef LE')\n", (388, 407), False, 'import sc2\n'), ((277, 290), 'zerg.zerg_rush.ZergRushBot', 'ZergRushBot', ([], {}), '()\n', (288, 290), False, 'from zerg.zerg_rush import ZergRushBot\n')]
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from atexit import register, unregister
from logging import getLogger
from threading import Lock
from typing import Optional, Sequence
# This kind of import is needed to avoid Sphinx errors.
import opentelemetry.sdk.metrics
from opentelemetry.metrics import Counter as APICounter
from opentelemetry.metrics import Histogram as APIHistogram
from opentelemetry.metrics import Meter as APIMeter
from opentelemetry.metrics import MeterProvider as APIMeterProvider
from opentelemetry.metrics import NoOpMeter
from opentelemetry.metrics import ObservableCounter as APIObservableCounter
from opentelemetry.metrics import ObservableGauge as APIObservableGauge
from opentelemetry.metrics import (
ObservableUpDownCounter as APIObservableUpDownCounter,
)
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
from opentelemetry.sdk.metrics._internal.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk.metrics._internal.measurement_consumer import (
MeasurementConsumer,
SynchronousMeasurementConsumer,
)
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
SdkConfiguration,
)
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
from opentelemetry.util._once import Once
from opentelemetry.util._time import _time_ns
_logger = getLogger(__name__)
class Meter(APIMeter):
"""See `opentelemetry.metrics.Meter`."""
def __init__(
self,
instrumentation_scope: InstrumentationScope,
measurement_consumer: MeasurementConsumer,
):
super().__init__(instrumentation_scope)
self._instrumentation_scope = instrumentation_scope
self._measurement_consumer = measurement_consumer
self._instrument_id_instrument = {}
self._instrument_id_instrument_lock = Lock()
def create_counter(self, name, unit="", description="") -> APICounter:
(
is_instrument_registered,
instrument_id,
) = self._is_instrument_registered(name, Counter, unit, description)
if is_instrument_registered:
# FIXME #2558 go through all views here and check if this
# instrument registration conflict can be fixed. If it can be, do
# not log the following warning.
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
APICounter.__name__,
unit,
description,
)
with self._instrument_id_instrument_lock:
return self._instrument_id_instrument[instrument_id]
instrument = Counter(
name,
self._instrumentation_scope,
self._measurement_consumer,
unit,
description,
)
with self._instrument_id_instrument_lock:
self._instrument_id_instrument[instrument_id] = instrument
return instrument
def create_up_down_counter(
self, name, unit="", description=""
) -> APIUpDownCounter:
(
is_instrument_registered,
instrument_id,
) = self._is_instrument_registered(
name, UpDownCounter, unit, description
)
if is_instrument_registered:
# FIXME #2558 go through all views here and check if this
# instrument registration conflict can be fixed. If it can be, do
# not log the following warning.
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
APIUpDownCounter.__name__,
unit,
description,
)
with self._instrument_id_instrument_lock:
return self._instrument_id_instrument[instrument_id]
instrument = UpDownCounter(
name,
self._instrumentation_scope,
self._measurement_consumer,
unit,
description,
)
with self._instrument_id_instrument_lock:
self._instrument_id_instrument[instrument_id] = instrument
return instrument
def create_observable_counter(
self, name, callbacks=None, unit="", description=""
) -> APIObservableCounter:
(
is_instrument_registered,
instrument_id,
) = self._is_instrument_registered(
name, ObservableCounter, unit, description
)
if is_instrument_registered:
# FIXME #2558 go through all views here and check if this
# instrument registration conflict can be fixed. If it can be, do
# not log the following warning.
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
APIObservableCounter.__name__,
unit,
description,
)
with self._instrument_id_instrument_lock:
return self._instrument_id_instrument[instrument_id]
instrument = ObservableCounter(
name,
self._instrumentation_scope,
self._measurement_consumer,
callbacks,
unit,
description,
)
self._measurement_consumer.register_asynchronous_instrument(instrument)
with self._instrument_id_instrument_lock:
self._instrument_id_instrument[instrument_id] = instrument
return instrument
def create_histogram(self, name, unit="", description="") -> APIHistogram:
(
is_instrument_registered,
instrument_id,
) = self._is_instrument_registered(name, Histogram, unit, description)
if is_instrument_registered:
# FIXME #2558 go through all views here and check if this
# instrument registration conflict can be fixed. If it can be, do
# not log the following warning.
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
APIHistogram.__name__,
unit,
description,
)
with self._instrument_id_instrument_lock:
return self._instrument_id_instrument[instrument_id]
instrument = Histogram(
name,
self._instrumentation_scope,
self._measurement_consumer,
unit,
description,
)
with self._instrument_id_instrument_lock:
self._instrument_id_instrument[instrument_id] = instrument
return instrument
def create_observable_gauge(
self, name, callbacks=None, unit="", description=""
) -> APIObservableGauge:
(
is_instrument_registered,
instrument_id,
) = self._is_instrument_registered(
name, ObservableGauge, unit, description
)
if is_instrument_registered:
# FIXME #2558 go through all views here and check if this
# instrument registration conflict can be fixed. If it can be, do
# not log the following warning.
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
APIObservableGauge.__name__,
unit,
description,
)
with self._instrument_id_instrument_lock:
return self._instrument_id_instrument[instrument_id]
instrument = ObservableGauge(
name,
self._instrumentation_scope,
self._measurement_consumer,
callbacks,
unit,
description,
)
self._measurement_consumer.register_asynchronous_instrument(instrument)
with self._instrument_id_instrument_lock:
self._instrument_id_instrument[instrument_id] = instrument
return instrument
def create_observable_up_down_counter(
self, name, callbacks=None, unit="", description=""
) -> APIObservableUpDownCounter:
(
is_instrument_registered,
instrument_id,
) = self._is_instrument_registered(name, Counter, unit, description)
if is_instrument_registered:
# FIXME #2558 go through all views here and check if this
# instrument registration conflict can be fixed. If it can be, do
# not log the following warning.
_logger.warning(
"An instrument with name %s, type %s, unit %s and "
"description %s has been created already.",
name,
APIObservableUpDownCounter.__name__,
unit,
description,
)
with self._instrument_id_instrument_lock:
return self._instrument_id_instrument[instrument_id]
instrument = ObservableUpDownCounter(
name,
self._instrumentation_scope,
self._measurement_consumer,
callbacks,
unit,
description,
)
self._measurement_consumer.register_asynchronous_instrument(instrument)
with self._instrument_id_instrument_lock:
self._instrument_id_instrument[instrument_id] = instrument
return instrument
class MeterProvider(APIMeterProvider):
r"""See `opentelemetry.metrics.MeterProvider`.
Args:
metric_readers: Register metric readers to collect metrics from the SDK
on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is
completely independent and will collect separate streams of
metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push
exporters here.
resource: The resource representing what the metrics emitted from the SDK pertain to.
shutdown_on_exit: If true, registers an `atexit` handler to call
`MeterProvider.shutdown`
views: The views to configure the metric output the SDK
By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s
are provided) will report metrics with the default aggregation for the
instrument's kind. To disable instruments by default, configure a match-all
:class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable
individual instruments:
.. code-block:: python
:caption: Disable default views
MeterProvider(
views=[
View(instrument_name="*", aggregation=DropAggregation()),
View(instrument_name="mycounter"),
],
# ...
)
"""
_all_metric_readers_lock = Lock()
_all_metric_readers = set()
def __init__(
self,
metric_readers: Sequence[
"opentelemetry.sdk.metrics.export.MetricReader"
] = (),
resource: Resource = Resource.create({}),
shutdown_on_exit: bool = True,
views: Sequence["opentelemetry.sdk.metrics.view.View"] = (),
):
self._lock = Lock()
self._meter_lock = Lock()
self._atexit_handler = None
self._sdk_config = SdkConfiguration(
resource=resource,
metric_readers=metric_readers,
views=views,
)
self._measurement_consumer = SynchronousMeasurementConsumer(
sdk_config=self._sdk_config
)
if shutdown_on_exit:
self._atexit_handler = register(self.shutdown)
self._meters = {}
for metric_reader in self._sdk_config.metric_readers:
with self._all_metric_readers_lock:
if metric_reader in self._all_metric_readers:
raise Exception(
f"MetricReader {metric_reader} has been registered "
"already in other MeterProvider instance"
)
self._all_metric_readers.add(metric_reader)
metric_reader._set_collect_callback(
self._measurement_consumer.collect
)
self._shutdown_once = Once()
self._shutdown = False
def force_flush(self, timeout_millis: float = 10_000) -> bool:
deadline_ns = _time_ns() + timeout_millis * 10**6
metric_reader_error = {}
for metric_reader in self._sdk_config.metric_readers:
current_ts = _time_ns()
try:
if current_ts >= deadline_ns:
raise Exception("Timed out while flushing metric readers")
metric_reader.collect(
timeout_millis=(deadline_ns - current_ts) / 10**6
)
# pylint: disable=broad-except
except Exception as error:
metric_reader_error[metric_reader] = error
if metric_reader_error:
metric_reader_error_string = "\n".join(
[
f"{metric_reader.__class__.__name__}: {repr(error)}"
for metric_reader, error in metric_reader_error.items()
]
)
raise Exception(
"MeterProvider.force_flush failed because the following "
"metric readers failed during collect:\n"
f"{metric_reader_error_string}"
)
return True
def shutdown(self, timeout_millis: float = 30_000):
deadline_ns = _time_ns() + timeout_millis * 10**6
def _shutdown():
self._shutdown = True
did_shutdown = self._shutdown_once.do_once(_shutdown)
if not did_shutdown:
_logger.warning("shutdown can only be called once")
return
metric_reader_error = {}
for metric_reader in self._sdk_config.metric_readers:
current_ts = _time_ns()
try:
if current_ts >= deadline_ns:
raise Exception(
"Didn't get to execute, deadline already exceeded"
)
metric_reader.shutdown(
timeout_millis=(deadline_ns - current_ts) / 10**6
)
# pylint: disable=broad-except
except Exception as error:
metric_reader_error[metric_reader] = error
if self._atexit_handler is not None:
unregister(self._atexit_handler)
self._atexit_handler = None
if metric_reader_error:
metric_reader_error_string = "\n".join(
[
f"{metric_reader.__class__.__name__}: {repr(error)}"
for metric_reader, error in metric_reader_error.items()
]
)
raise Exception(
(
"MeterProvider.shutdown failed because the following "
"metric readers failed during shutdown:\n"
f"{metric_reader_error_string}"
)
)
def get_meter(
self,
name: str,
version: Optional[str] = None,
schema_url: Optional[str] = None,
) -> Meter:
if self._shutdown:
_logger.warning(
"A shutdown `MeterProvider` can not provide a `Meter`"
)
return NoOpMeter(name, version=version, schema_url=schema_url)
if not name:
_logger.warning("Meter name cannot be None or empty.")
return NoOpMeter(name, version=version, schema_url=schema_url)
info = InstrumentationScope(name, version, schema_url)
with self._meter_lock:
if not self._meters.get(info):
# FIXME #2558 pass SDKConfig object to meter so that the meter
# has access to views.
self._meters[info] = Meter(
info,
self._measurement_consumer,
)
return self._meters[info]
|
[
"opentelemetry.sdk.metrics._internal.sdk_configuration.SdkConfiguration",
"opentelemetry.sdk.util.instrumentation.InstrumentationScope",
"opentelemetry.sdk.metrics._internal.instrument.UpDownCounter",
"opentelemetry.sdk.metrics._internal.instrument.ObservableUpDownCounter",
"atexit.register",
"opentelemetry.util._once.Once",
"atexit.unregister",
"opentelemetry.metrics.NoOpMeter",
"opentelemetry.sdk.metrics._internal.instrument.Histogram",
"threading.Lock",
"opentelemetry.sdk.metrics._internal.instrument.ObservableGauge",
"opentelemetry.sdk.metrics._internal.instrument.ObservableCounter",
"opentelemetry.sdk.metrics._internal.measurement_consumer.SynchronousMeasurementConsumer",
"opentelemetry.sdk.metrics._internal.instrument.Counter",
"opentelemetry.sdk.resources.Resource.create",
"opentelemetry.util._time._time_ns",
"logging.getLogger"
] |
[((2032, 2051), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (2041, 2051), False, 'from logging import getLogger\n'), ((11936, 11942), 'threading.Lock', 'Lock', ([], {}), '()\n', (11940, 11942), False, 'from threading import Lock\n'), ((2522, 2528), 'threading.Lock', 'Lock', ([], {}), '()\n', (2526, 2528), False, 'from threading import Lock\n'), ((3415, 3508), 'opentelemetry.sdk.metrics._internal.instrument.Counter', 'Counter', (['name', 'self._instrumentation_scope', 'self._measurement_consumer', 'unit', 'description'], {}), '(name, self._instrumentation_scope, self._measurement_consumer, unit,\n description)\n', (3422, 3508), False, 'from opentelemetry.sdk.metrics._internal.instrument import Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter\n'), ((4676, 4775), 'opentelemetry.sdk.metrics._internal.instrument.UpDownCounter', 'UpDownCounter', (['name', 'self._instrumentation_scope', 'self._measurement_consumer', 'unit', 'description'], {}), '(name, self._instrumentation_scope, self._measurement_consumer,\n unit, description)\n', (4689, 4775), False, 'from opentelemetry.sdk.metrics._internal.instrument import Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter\n'), ((5974, 6089), 'opentelemetry.sdk.metrics._internal.instrument.ObservableCounter', 'ObservableCounter', (['name', 'self._instrumentation_scope', 'self._measurement_consumer', 'callbacks', 'unit', 'description'], {}), '(name, self._instrumentation_scope, self.\n _measurement_consumer, callbacks, unit, description)\n', (5991, 6089), False, 'from opentelemetry.sdk.metrics._internal.instrument import Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter\n'), ((7295, 7390), 'opentelemetry.sdk.metrics._internal.instrument.Histogram', 'Histogram', (['name', 'self._instrumentation_scope', 'self._measurement_consumer', 'unit', 'description'], {}), '(name, self._instrumentation_scope, self._measurement_consumer,\n unit, description)\n', (7304, 7390), False, 'from opentelemetry.sdk.metrics._internal.instrument import Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter\n'), ((8580, 8693), 'opentelemetry.sdk.metrics._internal.instrument.ObservableGauge', 'ObservableGauge', (['name', 'self._instrumentation_scope', 'self._measurement_consumer', 'callbacks', 'unit', 'description'], {}), '(name, self._instrumentation_scope, self.\n _measurement_consumer, callbacks, unit, description)\n', (8595, 8693), False, 'from opentelemetry.sdk.metrics._internal.instrument import Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter\n'), ((9972, 10093), 'opentelemetry.sdk.metrics._internal.instrument.ObservableUpDownCounter', 'ObservableUpDownCounter', (['name', 'self._instrumentation_scope', 'self._measurement_consumer', 'callbacks', 'unit', 'description'], {}), '(name, self._instrumentation_scope, self.\n _measurement_consumer, callbacks, unit, description)\n', (9995, 10093), False, 'from opentelemetry.sdk.metrics._internal.instrument import Counter, Histogram, ObservableCounter, ObservableGauge, ObservableUpDownCounter, UpDownCounter\n'), ((12147, 12166), 'opentelemetry.sdk.resources.Resource.create', 'Resource.create', (['{}'], {}), '({})\n', (12162, 12166), False, 'from opentelemetry.sdk.resources import Resource\n'), ((12304, 12310), 'threading.Lock', 'Lock', ([], {}), '()\n', (12308, 12310), False, 'from threading import Lock\n'), ((12338, 12344), 'threading.Lock', 'Lock', ([], {}), '()\n', (12342, 12344), False, 'from threading import Lock\n'), ((12408, 12487), 'opentelemetry.sdk.metrics._internal.sdk_configuration.SdkConfiguration', 'SdkConfiguration', ([], {'resource': 'resource', 'metric_readers': 'metric_readers', 'views': 'views'}), '(resource=resource, metric_readers=metric_readers, views=views)\n', (12424, 12487), False, 'from opentelemetry.sdk.metrics._internal.sdk_configuration import SdkConfiguration\n'), ((12572, 12631), 'opentelemetry.sdk.metrics._internal.measurement_consumer.SynchronousMeasurementConsumer', 'SynchronousMeasurementConsumer', ([], {'sdk_config': 'self._sdk_config'}), '(sdk_config=self._sdk_config)\n', (12602, 12631), False, 'from opentelemetry.sdk.metrics._internal.measurement_consumer import MeasurementConsumer, SynchronousMeasurementConsumer\n'), ((13353, 13359), 'opentelemetry.util._once.Once', 'Once', ([], {}), '()\n', (13357, 13359), False, 'from opentelemetry.util._once import Once\n'), ((16776, 16823), 'opentelemetry.sdk.util.instrumentation.InstrumentationScope', 'InstrumentationScope', (['name', 'version', 'schema_url'], {}), '(name, version, schema_url)\n', (16796, 16823), False, 'from opentelemetry.sdk.util.instrumentation import InstrumentationScope\n'), ((12719, 12742), 'atexit.register', 'register', (['self.shutdown'], {}), '(self.shutdown)\n', (12727, 12742), False, 'from atexit import register, unregister\n'), ((13481, 13491), 'opentelemetry.util._time._time_ns', '_time_ns', ([], {}), '()\n', (13489, 13491), False, 'from opentelemetry.util._time import _time_ns\n'), ((13639, 13649), 'opentelemetry.util._time._time_ns', '_time_ns', ([], {}), '()\n', (13647, 13649), False, 'from opentelemetry.util._time import _time_ns\n'), ((14670, 14680), 'opentelemetry.util._time._time_ns', '_time_ns', ([], {}), '()\n', (14678, 14680), False, 'from opentelemetry.util._time import _time_ns\n'), ((15064, 15074), 'opentelemetry.util._time._time_ns', '_time_ns', ([], {}), '()\n', (15072, 15074), False, 'from opentelemetry.util._time import _time_ns\n'), ((15601, 15633), 'atexit.unregister', 'unregister', (['self._atexit_handler'], {}), '(self._atexit_handler)\n', (15611, 15633), False, 'from atexit import register, unregister\n'), ((16540, 16595), 'opentelemetry.metrics.NoOpMeter', 'NoOpMeter', (['name'], {'version': 'version', 'schema_url': 'schema_url'}), '(name, version=version, schema_url=schema_url)\n', (16549, 16595), False, 'from opentelemetry.metrics import NoOpMeter\n'), ((16704, 16759), 'opentelemetry.metrics.NoOpMeter', 'NoOpMeter', (['name'], {'version': 'version', 'schema_url': 'schema_url'}), '(name, version=version, schema_url=schema_url)\n', (16713, 16759), False, 'from opentelemetry.metrics import NoOpMeter\n')]
|
from __future__ import absolute_import, unicode_literals
import csv
import datetime
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.encoding import smart_str
from django.utils.translation import ungettext
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.forms import SelectDateForm
from wagtail.wagtailforms.models import get_forms_for_user
def index(request):
form_pages = get_forms_for_user(request.user)
paginator, form_pages = paginate(request, form_pages)
return render(request, 'wagtailforms/index.html', {
'form_pages': form_pages,
})
def delete_submissions(request, page_id):
if not get_forms_for_user(request.user).filter(id=page_id).exists():
raise PermissionDenied
page = get_object_or_404(Page, id=page_id).specific
# Get submissions
submission_ids = request.GET.getlist('selected-submissions')
submissions = page.get_submission_class()._default_manager.filter(id__in=submission_ids)
if request.method == 'POST':
count = submissions.count()
submissions.delete()
messages.success(
request,
ungettext(
"One submission has been deleted.",
"%(count)d submissions have been deleted.",
count
) % {
'count': count,
}
)
return redirect('wagtailforms:list_submissions', page_id)
return render(request, 'wagtailforms/confirm_delete.html', {
'page': page,
'submissions': submissions,
})
def list_submissions(request, page_id):
if not get_forms_for_user(request.user).filter(id=page_id).exists():
raise PermissionDenied
form_page = get_object_or_404(Page, id=page_id).specific
form_submission_class = form_page.get_submission_class()
data_fields = form_page.get_data_fields()
submissions = form_submission_class.objects.filter(page=form_page).order_by('submit_time')
data_headings = [label for name, label in data_fields]
select_date_form = SelectDateForm(request.GET)
if select_date_form.is_valid():
date_from = select_date_form.cleaned_data.get('date_from')
date_to = select_date_form.cleaned_data.get('date_to')
# careful: date_to should be increased by 1 day since the submit_time
# is a time so it will always be greater
if date_to:
date_to += datetime.timedelta(days=1)
if date_from and date_to:
submissions = submissions.filter(submit_time__range=[date_from, date_to])
elif date_from and not date_to:
submissions = submissions.filter(submit_time__gte=date_from)
elif not date_from and date_to:
submissions = submissions.filter(submit_time__lte=date_to)
if request.GET.get('action') == 'CSV':
# return a CSV instead
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment;filename=export.csv'
# Prevents UnicodeEncodeError for labels with non-ansi symbols
data_headings = [smart_str(label) for label in data_headings]
writer = csv.writer(response)
writer.writerow(data_headings)
for s in submissions:
data_row = []
form_data = s.get_data()
for name, label in data_fields:
val = form_data.get(name)
if isinstance(val, list):
val = ', '.join(val)
data_row.append(smart_str(val))
writer.writerow(data_row)
return response
paginator, submissions = paginate(request, submissions)
data_rows = []
for s in submissions:
form_data = s.get_data()
data_row = []
for name, label in data_fields:
val = form_data.get(name)
if isinstance(val, list):
val = ', '.join(val)
data_row.append(val)
data_rows.append({
"model_id": s.id,
"fields": data_row
})
return render(request, 'wagtailforms/index_submissions.html', {
'form_page': form_page,
'select_date_form': select_date_form,
'submissions': submissions,
'data_headings': data_headings,
'data_rows': data_rows
})
|
[
"wagtail.utils.pagination.paginate",
"django.http.HttpResponse",
"csv.writer",
"django.shortcuts.redirect",
"django.utils.translation.ungettext",
"django.utils.encoding.smart_str",
"django.shortcuts.get_object_or_404",
"datetime.timedelta",
"wagtail.wagtailforms.models.get_forms_for_user",
"django.shortcuts.render",
"wagtail.wagtailforms.forms.SelectDateForm"
] |
[((616, 648), 'wagtail.wagtailforms.models.get_forms_for_user', 'get_forms_for_user', (['request.user'], {}), '(request.user)\n', (634, 648), False, 'from wagtail.wagtailforms.models import get_forms_for_user\n'), ((678, 707), 'wagtail.utils.pagination.paginate', 'paginate', (['request', 'form_pages'], {}), '(request, form_pages)\n', (686, 707), False, 'from wagtail.utils.pagination import paginate\n'), ((720, 790), 'django.shortcuts.render', 'render', (['request', '"""wagtailforms/index.html"""', "{'form_pages': form_pages}"], {}), "(request, 'wagtailforms/index.html', {'form_pages': form_pages})\n", (726, 790), False, 'from django.shortcuts import get_object_or_404, redirect, render\n'), ((1649, 1748), 'django.shortcuts.render', 'render', (['request', '"""wagtailforms/confirm_delete.html"""', "{'page': page, 'submissions': submissions}"], {}), "(request, 'wagtailforms/confirm_delete.html', {'page': page,\n 'submissions': submissions})\n", (1655, 1748), False, 'from django.shortcuts import get_object_or_404, redirect, render\n'), ((2263, 2290), 'wagtail.wagtailforms.forms.SelectDateForm', 'SelectDateForm', (['request.GET'], {}), '(request.GET)\n', (2277, 2290), False, 'from wagtail.wagtailforms.forms import SelectDateForm\n'), ((3842, 3872), 'wagtail.utils.pagination.paginate', 'paginate', (['request', 'submissions'], {}), '(request, submissions)\n', (3850, 3872), False, 'from wagtail.utils.pagination import paginate\n'), ((4271, 4481), 'django.shortcuts.render', 'render', (['request', '"""wagtailforms/index_submissions.html"""', "{'form_page': form_page, 'select_date_form': select_date_form,\n 'submissions': submissions, 'data_headings': data_headings, 'data_rows':\n data_rows}"], {}), "(request, 'wagtailforms/index_submissions.html', {'form_page':\n form_page, 'select_date_form': select_date_form, 'submissions':\n submissions, 'data_headings': data_headings, 'data_rows': data_rows})\n", (4277, 4481), False, 'from django.shortcuts import get_object_or_404, redirect, render\n'), ((966, 1001), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Page'], {'id': 'page_id'}), '(Page, id=page_id)\n', (983, 1001), False, 'from django.shortcuts import get_object_or_404, redirect, render\n'), ((1586, 1636), 'django.shortcuts.redirect', 'redirect', (['"""wagtailforms:list_submissions"""', 'page_id'], {}), "('wagtailforms:list_submissions', page_id)\n", (1594, 1636), False, 'from django.shortcuts import get_object_or_404, redirect, render\n'), ((1931, 1966), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Page'], {'id': 'page_id'}), '(Page, id=page_id)\n', (1948, 1966), False, 'from django.shortcuts import get_object_or_404, redirect, render\n'), ((3092, 3144), 'django.http.HttpResponse', 'HttpResponse', ([], {'content_type': '"""text/csv; charset=utf-8"""'}), "(content_type='text/csv; charset=utf-8')\n", (3104, 3144), False, 'from django.http import HttpResponse\n'), ((3380, 3400), 'csv.writer', 'csv.writer', (['response'], {}), '(response)\n', (3390, 3400), False, 'import csv\n'), ((2627, 2653), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2645, 2653), False, 'import datetime\n'), ((3317, 3333), 'django.utils.encoding.smart_str', 'smart_str', (['label'], {}), '(label)\n', (3326, 3333), False, 'from django.utils.encoding import smart_str\n'), ((1351, 1451), 'django.utils.translation.ungettext', 'ungettext', (['"""One submission has been deleted."""', '"""%(count)d submissions have been deleted."""', 'count'], {}), "('One submission has been deleted.',\n '%(count)d submissions have been deleted.', count)\n", (1360, 1451), False, 'from django.utils.translation import ungettext\n'), ((3734, 3748), 'django.utils.encoding.smart_str', 'smart_str', (['val'], {}), '(val)\n', (3743, 3748), False, 'from django.utils.encoding import smart_str\n'), ((861, 893), 'wagtail.wagtailforms.models.get_forms_for_user', 'get_forms_for_user', (['request.user'], {}), '(request.user)\n', (879, 893), False, 'from wagtail.wagtailforms.models import get_forms_for_user\n'), ((1821, 1853), 'wagtail.wagtailforms.models.get_forms_for_user', 'get_forms_for_user', (['request.user'], {}), '(request.user)\n', (1839, 1853), False, 'from wagtail.wagtailforms.models import get_forms_for_user\n')]
|
from flask import Flask, render_template, url_for, request, redirect, flash
from werkzeug.utils import secure_filename
from utils import classify_image
import os
from flask_wtf.csrf import CSRFProtect
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
csrf = CSRFProtect(app)
# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 5000
port = int (os.getenv("VCAP_APP_PORT", 5000))
@app.route('/', methods=["GET", "POST"])
def root():
if request.method == "GET":
return render_template("index.html")
else:
if "file" not in request.files:
flash("No file part")
return redirect(request.url)
f = request.files["file"]
if f.filename == "":
flash("No selected file")
return redirect(request.url)
if f:
filename = secure_filename(f.filename)
# before saving this file, delete the old files in the sub-folder
img_path = "image-file"
img_path_images = os.getcwd() + "/" + img_path
test_file_names=[os.path.join(img_path_images, f) for f in os.listdir(img_path_images) if f.endswith(".jpg") or f.endswith(".png")]
for file_path in test_file_names:
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
f.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
api_key_user = app.config["API_KEY"]
classifier_id = app.config["CLASSIFIER_ID"]
results = classify_image(api_key_user, classifier_id)
return render_template("classfied_results.html", results=results)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=port)
|
[
"os.listdir",
"flask.flash",
"os.unlink",
"flask_wtf.csrf.CSRFProtect",
"flask.redirect",
"os.getcwd",
"flask.Flask",
"werkzeug.utils.secure_filename",
"utils.classify_image",
"os.path.isfile",
"flask.render_template",
"os.path.join",
"os.getenv"
] |
[((236, 251), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (241, 251), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((290, 306), 'flask_wtf.csrf.CSRFProtect', 'CSRFProtect', (['app'], {}), '(app)\n', (301, 306), False, 'from flask_wtf.csrf import CSRFProtect\n'), ((476, 508), 'os.getenv', 'os.getenv', (['"""VCAP_APP_PORT"""', '(5000)'], {}), "('VCAP_APP_PORT', 5000)\n", (485, 508), False, 'import os\n'), ((611, 640), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (626, 640), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((711, 732), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (716, 732), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((756, 777), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (764, 777), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((865, 890), 'flask.flash', 'flash', (['"""No selected file"""'], {}), "('No selected file')\n", (870, 890), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((914, 935), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (922, 935), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((981, 1008), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (996, 1008), False, 'from werkzeug.utils import secure_filename\n'), ((1871, 1914), 'utils.classify_image', 'classify_image', (['api_key_user', 'classifier_id'], {}), '(api_key_user, classifier_id)\n', (1885, 1914), False, 'from utils import classify_image\n'), ((1938, 1996), 'flask.render_template', 'render_template', (['"""classfied_results.html"""'], {'results': 'results'}), "('classfied_results.html', results=results)\n", (1953, 1996), False, 'from flask import Flask, render_template, url_for, request, redirect, flash\n'), ((1261, 1293), 'os.path.join', 'os.path.join', (['img_path_images', 'f'], {}), '(img_path_images, f)\n', (1273, 1293), False, 'import os\n'), ((1679, 1730), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (1691, 1730), False, 'import os\n'), ((1199, 1210), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1208, 1210), False, 'import os\n'), ((1303, 1330), 'os.listdir', 'os.listdir', (['img_path_images'], {}), '(img_path_images)\n', (1313, 1330), False, 'import os\n'), ((1487, 1512), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1501, 1512), False, 'import os\n'), ((1542, 1562), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (1551, 1562), False, 'import os\n')]
|
from .config import *
from contextlib import contextmanager
from functools import wraps
import json
import os
import pathlib
import random
import shutil
import string
import tempfile
import bpy
import io_scene_previz
from previz import PrevizProject
class ApiDecorators(object):
def __init__(
self,
api_token,
api_root,
team_id,
new_project_prefix = 'ut-',
new_scene_prefix='ut-'):
self.api_root = api_root
self.api_token = api_token
self.team_id = team_id
self.new_project_prefix = new_project_prefix
self.new_scene_prefix = new_scene_prefix
@contextmanager
def project_context(self, title):
pp = PrevizProject(self.api_root, self.api_token)
p = pp.new_project(self.new_project_prefix+title, self.team_id)
try:
yield p
finally:
pp.project_id = p['id']
pp.delete_project()
@contextmanager
def scene_context(self, project_id, title):
pp = PrevizProject(self.api_root, self.api_token, project_id)
s = pp.new_scene(self.new_scene_prefix+title)
try:
yield s
finally:
pp.delete_scene(s['id'])
def credentials(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
func(api_root=self.api_root, api_token=self.api_token, *args, **kwargs)
return wrapper
def get_team_id(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
func(team_id=self.team_id, *args, **kwargs)
return wrapper
def project(self, project_id):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
p = PrevizProject(self.api_root, self.api_token, project_id)
project = p.project(include=['scenes'])
func(project=project, *args, **kwargs)
return wrapper
return decorator
def tempproject(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
with self.project_context(func.__qualname__) as project:
func(project=project, *args, **kwargs)
return wrapper
def scene(self, scene_id):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
project_id = kwargs['project']['id']
p = PrevizProject(self.api_root, self.api_token, project_id)
scene = p.scene(scene_id, include=[])
func(scene=scene, *args, **kwargs)
#p = PrevizProject(self.api_root, self.api_token, project_id)
#func(project=p.project(include=['scenes']), *args, **kwargs)
return wrapper
return decorator
def tempscene(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
project_id = kwargs['project']['id']
with self.scene_context(project_id, func.__qualname__) as scene:
func(scene=scene, *args, **kwargs)
return wrapper
class MakeTempDirectories(object):
def __init__(self, prefix):
self.tmpdir = pathlib.Path(tempfile.mkdtemp(prefix=prefix + '-'))
print('Made temporary directory {!r}'.format(str(self.tmpdir)))
def __del__(self):
if len(list(self.tmpdir.iterdir())) > 0:
print('Temporary directory {!r} is not empty: not removing it.'.format(str(self.tmpdir)))
return
self.tmpdir.rmdir()
print('Removed temporary directory {!r}'.format(str(self.tmpdir)))
def __call__(self, func):
tmpdir = self.tmpdir
@wraps(func)
def func_wrapper(*args, **kwargs):
d = tmpdir / func.__qualname__
d.mkdir()
kwargs['tmpdir'] = pathlib.Path(d)
func(*args, **kwargs)
shutil.rmtree(str(d))
return func_wrapper
def build_api_decorators():
return ApiDecorators(os.environ[ENV_PREVIZ_API_TOKEN],
os.environ[ENV_PREVIZ_API_ROOT],
os.environ[ENV_PREVIZ_TEAM_UUID])
def scene(name):
"""Decorator that loads a test .blend scene before a test"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
# Load .blend
scenepath = pathlib.Path(__file__).with_name(BLENDS_DIR_NAME) / name
bpy.ops.wm.open_mainfile(filepath=str(scenepath),
load_ui=False) # load_ui=True crashes blender in --background mode
# Set API token
prefs = bpy.context.preferences.addons[io_scene_previz.__name__].preferences
prefs.api_root = os.environ[ENV_PREVIZ_API_ROOT]
prefs.api_token = os.environ[ENV_PREVIZ_API_TOKEN]
# Run test
kwargs['scenepath'] = scenepath
return func(*args, **kwargs)
return func_wrapper
return decorator
def object_names(objects = None):
if objects is None:
objects = bpy.data.objects
return sorted([o.name for o in objects])
def load_three_js_json(path, strip_uuids=False):
def strip(value):
values = []
if type(value) is dict:
for key in ['geometry', 'uuid']:
if key in value.keys():
del value[key]
values = value.values()
if type(value) is list:
if len(value) > 0 and type(value[0]) is dict:
child = value[0]
if 'name' in child:
value.sort(key=lambda x: x['name'])
if 'data' in child:
value.sort(key=lambda x: x['data']['name'])
values = value
for value in values:
strip(value)
with path.open() as fp:
ret = json.load(fp)
if strip_uuids:
strip(ret)
return ret
def run_previz_exporter(
project_id=None,
scene_id=None,
debug_run_api_requests=True,
debug_tmpdir=None,
debug_cleanup=False,
debug_run_modal=False):
api_root, api_token = io_scene_previz.previz_preferences(bpy.context)
kwargs = {
'api_root': api_root,
'api_token': api_token,
'debug_run_api_requests': debug_run_api_requests,
'debug_cleanup': debug_cleanup,
'debug_run_modal': debug_run_modal,
'project_id': project_id,
'scene_id': scene_id
}
if debug_tmpdir is not None:
kwargs['debug_tmpdir'] = str(debug_tmpdir)
return bpy.ops.export_scene.previz(**kwargs)
def run_create_project(project_name):
api_root, api_token = io_scene_previz.previz_preferences(bpy.context)
bpy.ops.export_scene.previz_new_project(api_root=api_root, api_token=api_token, project_name=project_name)
return max(PrevizProject(api_root, api_token).projects(), key=lambda p: p['id'])['id']
def delete_project(project_id):
api_root, api_token = io_scene_previz.previz_preferences(bpy.context)
PrevizProject(api_root, api_token, project_id).delete_project()
def set_project_state(project_id, state):
api_root, api_token = io_scene_previz.previz_preferences(bpy.context)
PrevizProject(api_root, api_token, project_id).set_state(state)
def random_project_name():
token = ''.join(random.choice(string.ascii_lowercase) for i in range(10))
return 'Test-Project-{}'.format(token)
|
[
"bpy.ops.export_scene.previz",
"json.load",
"previz.PrevizProject",
"random.choice",
"pathlib.Path",
"tempfile.mkdtemp",
"bpy.ops.export_scene.previz_new_project",
"functools.wraps",
"io_scene_previz.previz_preferences"
] |
[((6142, 6189), 'io_scene_previz.previz_preferences', 'io_scene_previz.previz_preferences', (['bpy.context'], {}), '(bpy.context)\n', (6176, 6189), False, 'import io_scene_previz\n'), ((6575, 6612), 'bpy.ops.export_scene.previz', 'bpy.ops.export_scene.previz', ([], {}), '(**kwargs)\n', (6602, 6612), False, 'import bpy\n'), ((6679, 6726), 'io_scene_previz.previz_preferences', 'io_scene_previz.previz_preferences', (['bpy.context'], {}), '(bpy.context)\n', (6713, 6726), False, 'import io_scene_previz\n'), ((6731, 6842), 'bpy.ops.export_scene.previz_new_project', 'bpy.ops.export_scene.previz_new_project', ([], {'api_root': 'api_root', 'api_token': 'api_token', 'project_name': 'project_name'}), '(api_root=api_root, api_token=\n api_token, project_name=project_name)\n', (6770, 6842), False, 'import bpy\n'), ((6990, 7037), 'io_scene_previz.previz_preferences', 'io_scene_previz.previz_preferences', (['bpy.context'], {}), '(bpy.context)\n', (7024, 7037), False, 'import io_scene_previz\n'), ((7176, 7223), 'io_scene_previz.previz_preferences', 'io_scene_previz.previz_preferences', (['bpy.context'], {}), '(bpy.context)\n', (7210, 7223), False, 'import io_scene_previz\n'), ((709, 753), 'previz.PrevizProject', 'PrevizProject', (['self.api_root', 'self.api_token'], {}), '(self.api_root, self.api_token)\n', (722, 753), False, 'from previz import PrevizProject\n'), ((1026, 1082), 'previz.PrevizProject', 'PrevizProject', (['self.api_root', 'self.api_token', 'project_id'], {}), '(self.api_root, self.api_token, project_id)\n', (1039, 1082), False, 'from previz import PrevizProject\n'), ((1267, 1278), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1272, 1278), False, 'from functools import wraps\n'), ((1467, 1478), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1472, 1478), False, 'from functools import wraps\n'), ((2011, 2022), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2016, 2022), False, 'from functools import wraps\n'), ((2820, 2831), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2825, 2831), False, 'from functools import wraps\n'), ((3652, 3663), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3657, 3663), False, 'from functools import wraps\n'), ((4239, 4250), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4244, 4250), False, 'from functools import wraps\n'), ((5836, 5849), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (5845, 5849), False, 'import json\n'), ((1674, 1685), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1679, 1685), False, 'from functools import wraps\n'), ((2282, 2293), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2287, 2293), False, 'from functools import wraps\n'), ((3174, 3211), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': "(prefix + '-')"}), "(prefix=prefix + '-')\n", (3190, 3211), False, 'import tempfile\n'), ((3803, 3818), 'pathlib.Path', 'pathlib.Path', (['d'], {}), '(d)\n', (3815, 3818), False, 'import pathlib\n'), ((7042, 7088), 'previz.PrevizProject', 'PrevizProject', (['api_root', 'api_token', 'project_id'], {}), '(api_root, api_token, project_id)\n', (7055, 7088), False, 'from previz import PrevizProject\n'), ((7228, 7274), 'previz.PrevizProject', 'PrevizProject', (['api_root', 'api_token', 'project_id'], {}), '(api_root, api_token, project_id)\n', (7241, 7274), False, 'from previz import PrevizProject\n'), ((7341, 7378), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (7354, 7378), False, 'import random\n'), ((1748, 1804), 'previz.PrevizProject', 'PrevizProject', (['self.api_root', 'self.api_token', 'project_id'], {}), '(self.api_root, self.api_token, project_id)\n', (1761, 1804), False, 'from previz import PrevizProject\n'), ((2409, 2465), 'previz.PrevizProject', 'PrevizProject', (['self.api_root', 'self.api_token', 'project_id'], {}), '(self.api_root, self.api_token, project_id)\n', (2422, 2465), False, 'from previz import PrevizProject\n'), ((6854, 6888), 'previz.PrevizProject', 'PrevizProject', (['api_root', 'api_token'], {}), '(api_root, api_token)\n', (6867, 6888), False, 'from previz import PrevizProject\n'), ((4345, 4367), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (4357, 4367), False, 'import pathlib\n')]
|
import typing
import gettext
import numpy
from nion.data import Core
from nion.data import DataAndMetadata
from nion.swift.model import Symbolic
from nion.typeshed import API_1_0
_ = gettext.gettext
class AlignMultiDimensionalSequence:
label = _("Align multi-dimensional sequence")
inputs = {"si_sequence_data_item": {"label": _("Multi-dimensional sequence data item")},
"haadf_sequence_data_item": {"label": _("HAADF sequence data item")},
"align_index": {"label": _("Align to this slice")},
"align_region": {"label": _("Alignment bounds")},
}
outputs = {"aligned_haadf": {"label": _("Aligned HAADF sequence")},
"aligned_si": {"label": _("Aligned multi-dimensional sequence")}}
def __init__(self, computation, **kwargs):
self.computation = computation
def execute(self, si_sequence_data_item: API_1_0.DataItem, haadf_sequence_data_item: API_1_0.DataItem,
align_index: int, align_region: API_1_0.Graphic):
haadf_xdata = haadf_sequence_data_item.xdata
si_xdata = si_sequence_data_item.xdata
bounds = align_region.bounds
translations = Core.function_sequence_measure_relative_translation(haadf_xdata,
haadf_xdata[align_index],
10, True, bounds=bounds)
sequence_shape = haadf_sequence_data_item.xdata.sequence_dimension_shape
c = int(numpy.product(sequence_shape))
haadf_result_data = numpy.empty_like(haadf_xdata.data)
si_result_data = numpy.empty_like(si_xdata.data)
align_data_shape = haadf_sequence_data_item.xdata.datum_dimension_shape
align_axes_start_index = None
for i in range(len(si_xdata.data_shape) - 1):
if align_data_shape == si_xdata.data_shape[i:i+2]:
align_axes_start_index = i
break
else:
raise RuntimeError('Could not find axes that match the HAADF shape in SI data item.')
si_translation = [0.0] * (len(si_xdata.data_shape) - len(sequence_shape))
align_axes_start_index -= len(sequence_shape)
assert align_axes_start_index >= 0
for i in range(c):
ii = numpy.unravel_index(i, sequence_shape)
current_xdata = DataAndMetadata.new_data_and_metadata(haadf_xdata.data[ii])
translation = translations.data[ii]
haadf_result_data[ii] = Core.function_shift(current_xdata, tuple(translation)).data
current_xdata = DataAndMetadata.new_data_and_metadata(si_xdata.data[ii])
si_translation[align_axes_start_index] = translation[0]
si_translation[align_axes_start_index+1] = translation[1]
si_result_data[ii] = Core.function_shift(current_xdata, tuple(si_translation)).data
self.__aligned_haadf_sequence = DataAndMetadata.new_data_and_metadata(haadf_result_data,
intensity_calibration=haadf_xdata.intensity_calibration,
dimensional_calibrations=haadf_xdata.dimensional_calibrations,
metadata=haadf_xdata.metadata,
data_descriptor=haadf_xdata.data_descriptor)
self.__aligned_si_sequence = DataAndMetadata.new_data_and_metadata(si_result_data,
intensity_calibration=si_xdata.intensity_calibration,
dimensional_calibrations=si_xdata.dimensional_calibrations,
metadata=si_xdata.metadata,
data_descriptor=si_xdata.data_descriptor)
def commit(self):
self.computation.set_referenced_xdata("aligned_haadf", self.__aligned_haadf_sequence)
self.computation.set_referenced_xdata("aligned_si", self.__aligned_si_sequence)
def align_multi_si(api: API_1_0.API, window: API_1_0.DocumentWindow):
selected_display_items = window._document_controller._get_two_data_sources()
error_msg = "Select a sequence of spectrum images and a sequence of scanned images in order to use this computation."
assert selected_display_items[0][0] is not None, error_msg
assert selected_display_items[1][0] is not None, error_msg
assert selected_display_items[0][0].data_item is not None, error_msg
assert selected_display_items[1][0].data_item is not None, error_msg
assert selected_display_items[0][0].data_item.is_sequence, error_msg
assert selected_display_items[1][0].data_item.is_sequence, error_msg
di_1 = selected_display_items[0][0].data_item
di_2 = selected_display_items[1][0].data_item
haadf_footprint = (2, True, 0, True)
di_1_footprint = (di_1.datum_dimension_count, di_1.is_sequence, di_1.collection_dimension_count,
di_1.metadata.get("hardware_source", {}).get("harwdare_source_id", "") == "superscan")
di_2_footprint = (di_2.datum_dimension_count, di_2.is_sequence, di_2.collection_dimension_count,
di_2.metadata.get("hardware_source", {}).get("harwdare_source_id", "") == "superscan")
di_1_points = 0
di_2_points = 0
print(di_1_footprint, di_2_footprint)
for i in range(len(haadf_footprint)):
di_1_points -= abs(haadf_footprint[i] - di_1_footprint[i])
di_2_points -= abs(haadf_footprint[i] - di_2_footprint[i])
print(di_1_points, di_2_points)
if di_1_points > di_2_points:
assert di_1_footprint[:-1] == haadf_footprint[:-1], error_msg
haadf_sequence_data_item = api._new_api_object(di_1)
si_sequence_data_item = api._new_api_object(di_2)
elif di_2_points > di_1_points:
assert di_2_footprint[:-1] == haadf_footprint[:-1], error_msg
haadf_sequence_data_item = api._new_api_object(di_2)
si_sequence_data_item = api._new_api_object(di_1)
else:
raise ValueError(error_msg)
print('here')
align_region = None
for graphic in haadf_sequence_data_item.graphics:
if graphic.graphic_type == 'rect-graphic':
align_region = graphic
break
if align_region is None:
align_region = haadf_sequence_data_item.add_rectangle_region(0.5, 0.5, 0.75, 0.75)
align_region.label = 'Alignment bounds'
print('here2')
align_index = haadf_sequence_data_item.display._display.display_data_channel.sequence_index
aligned_haadf = api.library.create_data_item_from_data(numpy.zeros((1,1,1)), title="Aligned {}".format(haadf_sequence_data_item.title))
aligned_si = api.library.create_data_item_from_data(numpy.zeros((1,1,1)), title="Aligned {}".format(si_sequence_data_item.title))
inputs = {"si_sequence_data_item": si_sequence_data_item,
"haadf_sequence_data_item": haadf_sequence_data_item,
"align_index": align_index,
"align_region": align_region}
computation = api.library.create_computation("nion.align_multi_d_sequence",
inputs=inputs,
outputs={"aligned_haadf": aligned_haadf,
"aligned_si": aligned_si})
computation._computation.source = aligned_si._data_item
window.display_data_item(aligned_haadf)
window.display_data_item(aligned_si)
print('here3')
Symbolic.register_computation_type("nion.align_multi_d_sequence", AlignMultiDimensionalSequence)
class AlignSequenceMenuItemDelegate:
def __init__(self, api):
self.__api = api
self.menu_id = "processing_menu" # required, specify menu_id where this item will go
self.menu_name = _("Processing") # optional, specify default name if not a standard menu
self.menu_before_id = "window_menu" # optional, specify before menu_id if not a standard menu
self.menu_item_name = _("Align sequence of multi-dimensional data") # menu item name
def menu_item_execute(self, window):
align_multi_si(self.__api, window)
class AlignSequenceExtension:
# required for Swift to recognize this as an extension class.
extension_id = "nion.experimental.align_multi_d_sequence"
def __init__(self, api_broker):
# grab the api object.
api = api_broker.get_api(version="~1.0")
self.__align_sequence_menu_item_ref = api.create_menu_item(AlignSequenceMenuItemDelegate(api))
def close(self):
# close will be called when the extension is unloaded. in turn, close any references so they get closed. this
# is not strictly necessary since the references will be deleted naturally when this object is deleted.
self.__align_sequence_menu_item_ref.close()
self.__align_sequence_menu_item_ref = None
|
[
"nion.swift.model.Symbolic.register_computation_type",
"nion.data.Core.function_sequence_measure_relative_translation",
"nion.data.DataAndMetadata.new_data_and_metadata",
"numpy.zeros",
"numpy.empty_like",
"numpy.unravel_index",
"numpy.product"
] |
[((7815, 7915), 'nion.swift.model.Symbolic.register_computation_type', 'Symbolic.register_computation_type', (['"""nion.align_multi_d_sequence"""', 'AlignMultiDimensionalSequence'], {}), "('nion.align_multi_d_sequence',\n AlignMultiDimensionalSequence)\n", (7849, 7915), False, 'from nion.swift.model import Symbolic\n'), ((1187, 1306), 'nion.data.Core.function_sequence_measure_relative_translation', 'Core.function_sequence_measure_relative_translation', (['haadf_xdata', 'haadf_xdata[align_index]', '(10)', '(True)'], {'bounds': 'bounds'}), '(haadf_xdata,\n haadf_xdata[align_index], 10, True, bounds=bounds)\n', (1238, 1306), False, 'from nion.data import Core\n'), ((1609, 1643), 'numpy.empty_like', 'numpy.empty_like', (['haadf_xdata.data'], {}), '(haadf_xdata.data)\n', (1625, 1643), False, 'import numpy\n'), ((1669, 1700), 'numpy.empty_like', 'numpy.empty_like', (['si_xdata.data'], {}), '(si_xdata.data)\n', (1685, 1700), False, 'import numpy\n'), ((2970, 3235), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['haadf_result_data'], {'intensity_calibration': 'haadf_xdata.intensity_calibration', 'dimensional_calibrations': 'haadf_xdata.dimensional_calibrations', 'metadata': 'haadf_xdata.metadata', 'data_descriptor': 'haadf_xdata.data_descriptor'}), '(haadf_result_data,\n intensity_calibration=haadf_xdata.intensity_calibration,\n dimensional_calibrations=haadf_xdata.dimensional_calibrations, metadata\n =haadf_xdata.metadata, data_descriptor=haadf_xdata.data_descriptor)\n', (3007, 3235), False, 'from nion.data import DataAndMetadata\n'), ((3572, 3824), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['si_result_data'], {'intensity_calibration': 'si_xdata.intensity_calibration', 'dimensional_calibrations': 'si_xdata.dimensional_calibrations', 'metadata': 'si_xdata.metadata', 'data_descriptor': 'si_xdata.data_descriptor'}), '(si_result_data, intensity_calibration\n =si_xdata.intensity_calibration, dimensional_calibrations=si_xdata.\n dimensional_calibrations, metadata=si_xdata.metadata, data_descriptor=\n si_xdata.data_descriptor)\n', (3609, 3824), False, 'from nion.data import DataAndMetadata\n'), ((6899, 6921), 'numpy.zeros', 'numpy.zeros', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (6910, 6921), False, 'import numpy\n'), ((7036, 7058), 'numpy.zeros', 'numpy.zeros', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (7047, 7058), False, 'import numpy\n'), ((1550, 1579), 'numpy.product', 'numpy.product', (['sequence_shape'], {}), '(sequence_shape)\n', (1563, 1579), False, 'import numpy\n'), ((2339, 2377), 'numpy.unravel_index', 'numpy.unravel_index', (['i', 'sequence_shape'], {}), '(i, sequence_shape)\n', (2358, 2377), False, 'import numpy\n'), ((2406, 2465), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['haadf_xdata.data[ii]'], {}), '(haadf_xdata.data[ii])\n', (2443, 2465), False, 'from nion.data import DataAndMetadata\n'), ((2638, 2694), 'nion.data.DataAndMetadata.new_data_and_metadata', 'DataAndMetadata.new_data_and_metadata', (['si_xdata.data[ii]'], {}), '(si_xdata.data[ii])\n', (2675, 2694), False, 'from nion.data import DataAndMetadata\n')]
|
import sys, re, requests
from bs4 import BeautifulSoup as BS
url = f'https://cctld.uz/whois/?domain={sys.argv[1]}&zone=uz'
r = requests.get(url)
html = BS(r.content, 'html.parser')
if len(html.select('.table-responsive'))>0:
for el in html.select('tbody > tr:nth-child(5) > td:nth-child(2)'):
date = '-'.join(el.text[1:11].split('.')[::-1])
re.search(r'[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9]', date).group(0)
print(date, end='')
else:
print('0', end='')
|
[
"bs4.BeautifulSoup",
"re.search",
"requests.get"
] |
[((129, 146), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (141, 146), False, 'import sys, re, requests\n'), ((154, 182), 'bs4.BeautifulSoup', 'BS', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (156, 182), True, 'from bs4 import BeautifulSoup as BS\n'), ((364, 420), 're.search', 're.search', (['"""[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9]"""', 'date'], {}), "('[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9]', date)\n", (373, 420), False, 'import sys, re, requests\n')]
|
import unittest
from katas.kyu_7.genetic_algorithm_series_1_generate import generate
class GenerateTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(len(generate(16)), 16)
def test_equals_2(self):
self.assertEqual(len(generate(32)), 32)
def test_equals_3(self):
self.assertEqual(len(generate(64)), 64)
|
[
"katas.kyu_7.genetic_algorithm_series_1_generate.generate"
] |
[((187, 199), 'katas.kyu_7.genetic_algorithm_series_1_generate.generate', 'generate', (['(16)'], {}), '(16)\n', (195, 199), False, 'from katas.kyu_7.genetic_algorithm_series_1_generate import generate\n'), ((265, 277), 'katas.kyu_7.genetic_algorithm_series_1_generate.generate', 'generate', (['(32)'], {}), '(32)\n', (273, 277), False, 'from katas.kyu_7.genetic_algorithm_series_1_generate import generate\n'), ((343, 355), 'katas.kyu_7.genetic_algorithm_series_1_generate.generate', 'generate', (['(64)'], {}), '(64)\n', (351, 355), False, 'from katas.kyu_7.genetic_algorithm_series_1_generate import generate\n')]
|
# Copyright (c) 2018-2019 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
ILRMA
=====
Blind Source Separation using Independent Low-Rank Matrix Analysis (ILRMA).
"""
import numpy as np
from .common import projection_back
def ilrma(
X,
n_src=None,
n_iter=20,
proj_back=False,
W0=None,
n_components=2,
return_filters=0,
callback=None,
):
"""
Implementation of ILRMA algorithm without partitioning function for BSS presented in
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, *Determined blind
source separation unifying independent vector analysis and nonnegative matrix
factorization,* IEEE/ACM Trans. ASLP, vol. 24, no. 9, pp. 1626-1641, Sept. 2016
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME> *Determined
Blind Source Separation with Independent Low-Rank Matrix Analysis,* in
Audio Source Separation, <NAME>, 2018, pp. 125-156.
Parameters
----------
X: ndarray (nframes, nfrequencies, nchannels)
STFT representation of the observed signal n_src: int, optional
The number of sources or independent components
n_iter: int, optional
The number of iterations (default 20)
proj_back: bool, optional
Scaling on first mic by back projection (default True)
W0: ndarray (nfrequencies, nchannels, nchannels), optional
Initial value for demixing matrix
n_components: int
Number of components in the non-negative spectrum
return_filters: bool
If true, the function will return the demixing matrix too
callback: func
A callback function called every 10 iterations, allows to monitor convergence
Returns
-------
Returns an (nframes, nfrequencies, nsources) array. Also returns
the demixing matrix W (nfrequencies, nchannels, nsources)
if ``return_values`` keyword is True.
"""
n_frames, n_freq, n_chan = X.shape
# default to determined case
if n_src is None:
n_src = X.shape[2]
# Only supports determined case
assert n_chan == n_src, "There should be as many microphones as sources"
# initialize the demixing matrices
# The demixing matrix has the following dimensions (nfrequencies, nchannels, nsources),
if W0 is None:
W = np.array([np.eye(n_chan, n_src) for f in range(n_freq)], dtype=X.dtype)
else:
W = W0.copy()
# initialize the nonnegative matrixes with random values
T = np.array(0.1 + 0.9 * np.random.rand(n_src, n_freq, n_components))
V = np.array(0.1 + 0.9 * np.random.rand(n_src, n_frames, n_components))
R = np.zeros((n_src, n_freq, n_frames))
I = np.eye(n_src, n_src)
U = np.zeros((n_freq, n_src, n_chan, n_chan), dtype=X.dtype)
product = np.zeros((n_freq, n_chan, n_chan), dtype=X.dtype)
lambda_aux = np.zeros(n_src)
eps = 1e-15
eyes = np.tile(np.eye(n_chan, n_chan), (n_freq, 1, 1))
# Things are more efficient when the frequencies are over the first axis
Y = np.zeros((n_freq, n_src, n_frames), dtype=X.dtype)
X_original = X
X = X.transpose([1, 2, 0]).copy()
np.matmul(T, V.swapaxes(1, 2), out=R)
# Compute the demixed output
def demix(Y, X, W):
Y[:, :, :] = np.matmul(W, X)
demix(Y, X, W)
# P.shape == R.shape == (n_src, n_freq, n_frames)
P = np.power(abs(Y.transpose([1, 0, 2])), 2.0)
iR = 1 / R
for epoch in range(n_iter):
if callback is not None and epoch % 10 == 0:
Y_t = Y.transpose([2, 0, 1])
if proj_back:
z = projection_back(Y_t, X_original[:, :, 0])
callback(Y_t * np.conj(z[None, :, :]))
else:
callback(Y_t)
# simple loop as a start
for s in range(n_src):
## NMF
######
T[s, :, :] *= np.sqrt(
np.dot(P[s, :, :] * iR[s, :, :] ** 2, V[s, :, :])
/ np.dot(iR[s, :, :], V[s, :, :])
)
T[T < eps] = eps
R[s, :, :] = np.dot(T[s, :, :], V[s, :, :].T)
iR[s, :, :] = 1 / R[s, :, :]
V[s, :, :] *= np.sqrt(
np.dot(P[s, :, :].T * iR[s, :, :].T ** 2, T[s, :, :])
/ np.dot(iR[s, :, :].T, T[s, :, :])
)
V[V < eps] = eps
R[s, :, :] = np.dot(T[s, :, :], V[s, :, :].T)
iR[s, :, :] = 1 / R[s, :, :]
## IVA
######
# Compute Auxiliary Variable
# shape: (n_freq, n_chan, n_chan)
C = np.matmul((X * iR[s, :, None, :]), np.conj(X.swapaxes(1, 2)) / n_frames)
WV = np.matmul(W, C)
W[:, s, :] = np.conj(np.linalg.solve(WV, eyes[:, :, s]))
# normalize
denom = np.matmul(
np.matmul(W[:, None, s, :], C[:, :, :]), np.conj(W[:, s, :, None])
)
W[:, s, :] /= np.sqrt(denom[:, :, 0])
demix(Y, X, W)
np.power(abs(Y.transpose([1, 0, 2])), 2.0, out=P)
for s in range(n_src):
lambda_aux[s] = 1 / np.sqrt(np.mean(P[s, :, :]))
W[:, :, s] *= lambda_aux[s]
P[s, :, :] *= lambda_aux[s] ** 2
R[s, :, :] *= lambda_aux[s] ** 2
T[s, :, :] *= lambda_aux[s] ** 2
Y = Y.transpose([2, 0, 1]).copy()
if proj_back:
z = projection_back(Y, X_original[:, :, 0])
Y *= np.conj(z[None, :, :])
if return_filters:
return Y, W
else:
return Y
|
[
"numpy.conj",
"numpy.random.rand",
"numpy.zeros",
"numpy.mean",
"numpy.matmul",
"numpy.dot",
"numpy.eye",
"numpy.linalg.solve",
"numpy.sqrt"
] |
[((3600, 3635), 'numpy.zeros', 'np.zeros', (['(n_src, n_freq, n_frames)'], {}), '((n_src, n_freq, n_frames))\n', (3608, 3635), True, 'import numpy as np\n'), ((3644, 3664), 'numpy.eye', 'np.eye', (['n_src', 'n_src'], {}), '(n_src, n_src)\n', (3650, 3664), True, 'import numpy as np\n'), ((3673, 3729), 'numpy.zeros', 'np.zeros', (['(n_freq, n_src, n_chan, n_chan)'], {'dtype': 'X.dtype'}), '((n_freq, n_src, n_chan, n_chan), dtype=X.dtype)\n', (3681, 3729), True, 'import numpy as np\n'), ((3744, 3793), 'numpy.zeros', 'np.zeros', (['(n_freq, n_chan, n_chan)'], {'dtype': 'X.dtype'}), '((n_freq, n_chan, n_chan), dtype=X.dtype)\n', (3752, 3793), True, 'import numpy as np\n'), ((3811, 3826), 'numpy.zeros', 'np.zeros', (['n_src'], {}), '(n_src)\n', (3819, 3826), True, 'import numpy as np\n'), ((3988, 4038), 'numpy.zeros', 'np.zeros', (['(n_freq, n_src, n_frames)'], {'dtype': 'X.dtype'}), '((n_freq, n_src, n_frames), dtype=X.dtype)\n', (3996, 4038), True, 'import numpy as np\n'), ((3862, 3884), 'numpy.eye', 'np.eye', (['n_chan', 'n_chan'], {}), '(n_chan, n_chan)\n', (3868, 3884), True, 'import numpy as np\n'), ((4218, 4233), 'numpy.matmul', 'np.matmul', (['W', 'X'], {}), '(W, X)\n', (4227, 4233), True, 'import numpy as np\n'), ((6388, 6410), 'numpy.conj', 'np.conj', (['z[None, :, :]'], {}), '(z[None, :, :])\n', (6395, 6410), True, 'import numpy as np\n'), ((5017, 5049), 'numpy.dot', 'np.dot', (['T[s, :, :]', 'V[s, :, :].T'], {}), '(T[s, :, :], V[s, :, :].T)\n', (5023, 5049), True, 'import numpy as np\n'), ((5318, 5350), 'numpy.dot', 'np.dot', (['T[s, :, :]', 'V[s, :, :].T'], {}), '(T[s, :, :], V[s, :, :].T)\n', (5324, 5350), True, 'import numpy as np\n'), ((5626, 5641), 'numpy.matmul', 'np.matmul', (['W', 'C'], {}), '(W, C)\n', (5635, 5641), True, 'import numpy as np\n'), ((5890, 5913), 'numpy.sqrt', 'np.sqrt', (['denom[:, :, 0]'], {}), '(denom[:, :, 0])\n', (5897, 5913), True, 'import numpy as np\n'), ((3286, 3307), 'numpy.eye', 'np.eye', (['n_chan', 'n_src'], {}), '(n_chan, n_src)\n', (3292, 3307), True, 'import numpy as np\n'), ((3471, 3514), 'numpy.random.rand', 'np.random.rand', (['n_src', 'n_freq', 'n_components'], {}), '(n_src, n_freq, n_components)\n', (3485, 3514), True, 'import numpy as np\n'), ((3545, 3590), 'numpy.random.rand', 'np.random.rand', (['n_src', 'n_frames', 'n_components'], {}), '(n_src, n_frames, n_components)\n', (3559, 3590), True, 'import numpy as np\n'), ((5675, 5709), 'numpy.linalg.solve', 'np.linalg.solve', (['WV', 'eyes[:, :, s]'], {}), '(WV, eyes[:, :, s])\n', (5690, 5709), True, 'import numpy as np\n'), ((5783, 5822), 'numpy.matmul', 'np.matmul', (['W[:, None, s, :]', 'C[:, :, :]'], {}), '(W[:, None, s, :], C[:, :, :])\n', (5792, 5822), True, 'import numpy as np\n'), ((5824, 5849), 'numpy.conj', 'np.conj', (['W[:, s, :, None]'], {}), '(W[:, s, :, None])\n', (5831, 5849), True, 'import numpy as np\n'), ((4848, 4897), 'numpy.dot', 'np.dot', (['(P[s, :, :] * iR[s, :, :] ** 2)', 'V[s, :, :]'], {}), '(P[s, :, :] * iR[s, :, :] ** 2, V[s, :, :])\n', (4854, 4897), True, 'import numpy as np\n'), ((4916, 4947), 'numpy.dot', 'np.dot', (['iR[s, :, :]', 'V[s, :, :]'], {}), '(iR[s, :, :], V[s, :, :])\n', (4922, 4947), True, 'import numpy as np\n'), ((5143, 5196), 'numpy.dot', 'np.dot', (['(P[s, :, :].T * iR[s, :, :].T ** 2)', 'T[s, :, :]'], {}), '(P[s, :, :].T * iR[s, :, :].T ** 2, T[s, :, :])\n', (5149, 5196), True, 'import numpy as np\n'), ((5215, 5248), 'numpy.dot', 'np.dot', (['iR[s, :, :].T', 'T[s, :, :]'], {}), '(iR[s, :, :].T, T[s, :, :])\n', (5221, 5248), True, 'import numpy as np\n'), ((6068, 6087), 'numpy.mean', 'np.mean', (['P[s, :, :]'], {}), '(P[s, :, :])\n', (6075, 6087), True, 'import numpy as np\n'), ((4621, 4643), 'numpy.conj', 'np.conj', (['z[None, :, :]'], {}), '(z[None, :, :])\n', (4628, 4643), True, 'import numpy as np\n')]
|
# coding: utf8
"""Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: <NAME>
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from .pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
[
"os.urandom",
"itertools.izip",
"base64.b64decode"
] |
[((1707, 1724), 'base64.b64decode', 'b64decode', (['hash_a'], {}), '(hash_a)\n', (1716, 1724), False, 'from base64 import b64encode, b64decode\n'), ((2090, 2110), 'itertools.izip', 'izip', (['hash_a', 'hash_b'], {}), '(hash_a, hash_b)\n', (2094, 2110), False, 'from itertools import izip\n'), ((1162, 1182), 'os.urandom', 'urandom', (['SALT_LENGTH'], {}), '(SALT_LENGTH)\n', (1169, 1182), False, 'from os import urandom\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import threading
import time
import zmq
from collections import deque, defaultdict
import parl
import sys
from parl.utils import to_str, to_byte, logger, get_ip_address
from parl.remote import remote_constants
from parl.remote.job_center import JobCenter
from parl.remote.cluster_monitor import ClusterMonitor
from parl.remote.grpc_heartbeat import HeartbeatClientThread
import cloudpickle
import time
from parl.remote.utils import has_module
class Master(object):
"""Base class for a master node, the control center for our cluster, which provides connections to workers and clients.
There is only one master node in each cluster, and it is responsible for
receiving jobs from the clients and allocating computation resources to
run the jobs.
To start a master node, we use the following xparl command line api:
.. code-block:: python
xparl start --port localhost:1234
At the same time, a local worker will be started and connect to the
master node.
Attributes:
job_center (JobCenter): A thread-safe data structure that stores the job address of vacant cpus.
client_socket (zmq.Context.socket): A socket that receives submitted
job from the client, and later sends
job_address back to the client.
master_ip(str): The ip address of the master node.
cpu_num(int): The number of available CPUs in the cluster.
worker_num(int): The number of workers connected to this cluster.
cluster_monitor(dict): A dict to record worker status and client status.
client_hostname(dict): A dict to store hostname for each client address.
Args:
port: The ip port that the master node binds to.
"""
def __init__(self, port, monitor_port=None):
self.ctx = zmq.Context()
self.master_ip = get_ip_address()
self.all_client_heartbeat_threads = []
self.all_worker_heartbeat_threads = []
self.monitor_url = "http://{}:{}".format(self.master_ip, monitor_port)
logger.set_dir(
os.path.expanduser('~/.parl_data/master/{}_{}'.format(
self.master_ip, port)))
self.client_socket = self.ctx.socket(zmq.REP)
self.client_socket.bind("tcp://*:{}".format(port))
self.client_socket.linger = 0
self.port = port
self.job_center = JobCenter(self.master_ip)
self.cluster_monitor = ClusterMonitor()
self.master_is_alive = True
self.client_hostname = defaultdict(int)
def _get_status(self):
return self.cluster_monitor.get_status()
def _print_workers(self):
"""Display `worker_pool` infomation."""
logger.info(
"Master connects to {} workers and have {} vacant CPUs.\n".format(
self.worker_num, self.cpu_num))
@property
def cpu_num(self):
return self.job_center.cpu_num
@property
def worker_num(self):
return self.job_center.worker_num
def _receive_message(self):
"""Master node will receive various types of message: (1) worker
connection; (2) worker update; (3) client connection; (4) job
submittion; (5) reset job.
"""
message = self.client_socket.recv_multipart()
tag = message[0]
# a new worker connects to the master
if tag == remote_constants.WORKER_CONNECT_TAG:
self.client_socket.send_multipart([remote_constants.NORMAL_TAG])
elif tag == remote_constants.MONITOR_TAG:
status = self._get_status()
self.client_socket.send_multipart(
[remote_constants.NORMAL_TAG, status])
# `xparl status` command line API
elif tag == remote_constants.STATUS_TAG:
status_info = self.cluster_monitor.get_status_info()
self.client_socket.send_multipart(
[remote_constants.NORMAL_TAG,
to_byte(status_info)])
elif tag == remote_constants.WORKER_INITIALIZED_TAG:
initialized_worker = cloudpickle.loads(message[1])
worker_address = initialized_worker.worker_address
self.job_center.add_worker(initialized_worker)
hostname = self.job_center.get_hostname(worker_address)
self.cluster_monitor.add_worker_status(worker_address, hostname)
logger.info("A new worker {} is added, ".format(worker_address) +
"the cluster has {} CPUs.\n".format(self.cpu_num))
def heartbeat_exit_callback_func(worker_address):
self.job_center.drop_worker(worker_address)
self.cluster_monitor.drop_worker_status(worker_address)
logger.warning("\n[Master] Cannot connect to the worker " +
"{}. ".format(worker_address) +
"Worker_pool will drop this worker.")
self._print_workers()
logger.warning("Exit worker monitor from master.")
# a thread for sending heartbeat signals to the client
thread = HeartbeatClientThread(
worker_address,
heartbeat_exit_callback_func=heartbeat_exit_callback_func,
exit_func_args=(worker_address, ))
self.all_worker_heartbeat_threads.append(thread)
thread.setDaemon(True)
thread.start()
self.client_socket.send_multipart([remote_constants.NORMAL_TAG])
# a client connects to the master
elif tag == remote_constants.CLIENT_CONNECT_TAG:
# `client_heartbeat_address` is the
# `reply_master_heartbeat_address` of the client
client_heartbeat_address = to_str(message[1])
client_hostname = to_str(message[2])
client_id = to_str(message[3])
self.client_hostname[client_heartbeat_address] = client_hostname
logger.info(
"Client {} is connected.".format(client_heartbeat_address))
def heartbeat_exit_callback_func(client_heartbeat_address):
self.cluster_monitor.drop_client_status(
client_heartbeat_address)
logger.warning("[Master] cannot connect to the client " +
"{}. ".format(client_heartbeat_address) +
"Please check if it is still alive.")
logger.info(
"Master connects to {} workers and have {} vacant CPUs.\n".
format(self.worker_num, self.cpu_num))
# a thread for sending heartbeat signals to the client
thread = HeartbeatClientThread(
client_heartbeat_address,
heartbeat_exit_callback_func=heartbeat_exit_callback_func,
exit_func_args=(client_heartbeat_address, ))
self.all_client_heartbeat_threads.append(thread)
thread.setDaemon(True)
thread.start()
log_monitor_address = "{}/logs?client_id={}".format(
self.monitor_url, client_id)
self.client_socket.send_multipart(
[remote_constants.NORMAL_TAG,
to_byte(log_monitor_address)])
elif tag == remote_constants.CHECK_VERSION_TAG:
self.client_socket.send_multipart([
remote_constants.NORMAL_TAG,
to_byte(parl.__version__),
to_byte(str(sys.version_info.major)),
to_byte(str(sys.version_info.minor)),
to_byte(str(has_module('pyarrow')))
])
# a client submits a job to the master
elif tag == remote_constants.CLIENT_SUBMIT_TAG:
# check available CPU resources
if self.cpu_num:
logger.info("Submitting job...")
job = self.job_center.request_job()
self.client_socket.send_multipart([
remote_constants.NORMAL_TAG,
to_byte(job.job_address),
to_byte(job.client_heartbeat_address),
to_byte(job.ping_heartbeat_address),
])
client_id = to_str(message[2])
job_info = {job.job_id: job.log_server_address}
self.cluster_monitor.add_client_job(client_id, job_info)
self._print_workers()
else:
self.client_socket.send_multipart([remote_constants.CPU_TAG])
# a worker updates
elif tag == remote_constants.NEW_JOB_TAG:
initialized_job = cloudpickle.loads(message[1])
last_job_address = to_str(message[2])
self.client_socket.send_multipart([remote_constants.NORMAL_TAG])
self.job_center.update_job(last_job_address, initialized_job,
initialized_job.worker_address)
logger.info("A worker updated. cpu_num:{}".format(self.cpu_num))
self._print_workers()
# client update status periodically
elif tag == remote_constants.CLIENT_STATUS_UPDATE_TAG:
client_heartbeat_address = to_str(message[1])
client_status = cloudpickle.loads(message[2])
client_status['client_hostname'] = self.client_hostname[
client_heartbeat_address]
self.cluster_monitor.update_client_status(client_heartbeat_address,
client_status)
self.client_socket.send_multipart([remote_constants.NORMAL_TAG])
# worker update status periodically
elif tag == remote_constants.WORKER_STATUS_UPDATE_TAG:
worker_address = to_str(message[1])
worker_status = cloudpickle.loads(message[2])
vacant_cpus = self.job_center.get_vacant_cpu(worker_address)
total_cpus = self.job_center.get_total_cpu(worker_address)
self.cluster_monitor.update_worker_status(
worker_status, worker_address, vacant_cpus, total_cpus)
self.client_socket.send_multipart([remote_constants.NORMAL_TAG])
# check before start a worker
elif tag == remote_constants.NORMAL_TAG:
self.client_socket.send_multipart([remote_constants.NORMAL_TAG])
else:
raise NotImplementedError()
def exit(self):
""" Close the master.
"""
self.master_is_alive = False
for thread in self.all_client_heartbeat_threads:
if thread.is_alive():
thread.exit()
for thread in self.all_worker_heartbeat_threads:
if thread.is_alive():
thread.exit()
def run(self):
"""An infinite loop waiting for messages from the workers and
clients.
Master node will receive four types of messages:
1. A new worker connects to the master node.
2. A connected worker sending new job address after it kills an old
job.
3. A new client connects to the master node.
4. A connected client submits a job after a remote object is created.
"""
self.client_socket.linger = 0
self.client_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
while self.master_is_alive:
try:
self._receive_message()
pass
except zmq.error.Again as e:
#detect whether `self.master_is_alive` is True periodically
pass
logger.warning("[Master] Exit master.")
|
[
"parl.utils.logger.warning",
"parl.remote.grpc_heartbeat.HeartbeatClientThread",
"parl.utils.logger.info",
"cloudpickle.loads",
"parl.remote.cluster_monitor.ClusterMonitor",
"parl.remote.job_center.JobCenter",
"collections.defaultdict",
"parl.utils.to_byte",
"parl.utils.to_str",
"parl.utils.get_ip_address",
"parl.remote.utils.has_module",
"zmq.Context"
] |
[((2496, 2509), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (2507, 2509), False, 'import zmq\n'), ((2535, 2551), 'parl.utils.get_ip_address', 'get_ip_address', ([], {}), '()\n', (2549, 2551), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((3059, 3084), 'parl.remote.job_center.JobCenter', 'JobCenter', (['self.master_ip'], {}), '(self.master_ip)\n', (3068, 3084), False, 'from parl.remote.job_center import JobCenter\n'), ((3116, 3132), 'parl.remote.cluster_monitor.ClusterMonitor', 'ClusterMonitor', ([], {}), '()\n', (3130, 3132), False, 'from parl.remote.cluster_monitor import ClusterMonitor\n'), ((3200, 3216), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3211, 3216), False, 'from collections import deque, defaultdict\n'), ((12261, 12300), 'parl.utils.logger.warning', 'logger.warning', (['"""[Master] Exit master."""'], {}), "('[Master] Exit master.')\n", (12275, 12300), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((4740, 4769), 'cloudpickle.loads', 'cloudpickle.loads', (['message[1]'], {}), '(message[1])\n', (4757, 4769), False, 'import cloudpickle\n'), ((5787, 5922), 'parl.remote.grpc_heartbeat.HeartbeatClientThread', 'HeartbeatClientThread', (['worker_address'], {'heartbeat_exit_callback_func': 'heartbeat_exit_callback_func', 'exit_func_args': '(worker_address,)'}), '(worker_address, heartbeat_exit_callback_func=\n heartbeat_exit_callback_func, exit_func_args=(worker_address,))\n', (5808, 5922), False, 'from parl.remote.grpc_heartbeat import HeartbeatClientThread\n'), ((4622, 4642), 'parl.utils.to_byte', 'to_byte', (['status_info'], {}), '(status_info)\n', (4629, 4642), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((5647, 5697), 'parl.utils.logger.warning', 'logger.warning', (['"""Exit worker monitor from master."""'], {}), "('Exit worker monitor from master.')\n", (5661, 5697), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((6423, 6441), 'parl.utils.to_str', 'to_str', (['message[1]'], {}), '(message[1])\n', (6429, 6441), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((6472, 6490), 'parl.utils.to_str', 'to_str', (['message[2]'], {}), '(message[2])\n', (6478, 6490), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((6515, 6533), 'parl.utils.to_str', 'to_str', (['message[3]'], {}), '(message[3])\n', (6521, 6533), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((7361, 7519), 'parl.remote.grpc_heartbeat.HeartbeatClientThread', 'HeartbeatClientThread', (['client_heartbeat_address'], {'heartbeat_exit_callback_func': 'heartbeat_exit_callback_func', 'exit_func_args': '(client_heartbeat_address,)'}), '(client_heartbeat_address,\n heartbeat_exit_callback_func=heartbeat_exit_callback_func,\n exit_func_args=(client_heartbeat_address,))\n', (7382, 7519), False, 'from parl.remote.grpc_heartbeat import HeartbeatClientThread\n'), ((7906, 7934), 'parl.utils.to_byte', 'to_byte', (['log_monitor_address'], {}), '(log_monitor_address)\n', (7913, 7934), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((8103, 8128), 'parl.utils.to_byte', 'to_byte', (['parl.__version__'], {}), '(parl.__version__)\n', (8110, 8128), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((8498, 8530), 'parl.utils.logger.info', 'logger.info', (['"""Submitting job..."""'], {}), "('Submitting job...')\n", (8509, 8530), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((8893, 8911), 'parl.utils.to_str', 'to_str', (['message[2]'], {}), '(message[2])\n', (8899, 8911), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((9291, 9320), 'cloudpickle.loads', 'cloudpickle.loads', (['message[1]'], {}), '(message[1])\n', (9308, 9320), False, 'import cloudpickle\n'), ((9352, 9370), 'parl.utils.to_str', 'to_str', (['message[2]'], {}), '(message[2])\n', (9358, 9370), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((9853, 9871), 'parl.utils.to_str', 'to_str', (['message[1]'], {}), '(message[1])\n', (9859, 9871), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((9900, 9929), 'cloudpickle.loads', 'cloudpickle.loads', (['message[2]'], {}), '(message[2])\n', (9917, 9929), False, 'import cloudpickle\n'), ((8266, 8287), 'parl.remote.utils.has_module', 'has_module', (['"""pyarrow"""'], {}), "('pyarrow')\n", (8276, 8287), False, 'from parl.remote.utils import has_module\n'), ((8704, 8728), 'parl.utils.to_byte', 'to_byte', (['job.job_address'], {}), '(job.job_address)\n', (8711, 8728), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((8750, 8787), 'parl.utils.to_byte', 'to_byte', (['job.client_heartbeat_address'], {}), '(job.client_heartbeat_address)\n', (8757, 8787), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((8809, 8844), 'parl.utils.to_byte', 'to_byte', (['job.ping_heartbeat_address'], {}), '(job.ping_heartbeat_address)\n', (8816, 8844), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((10405, 10423), 'parl.utils.to_str', 'to_str', (['message[1]'], {}), '(message[1])\n', (10411, 10423), False, 'from parl.utils import to_str, to_byte, logger, get_ip_address\n'), ((10452, 10481), 'cloudpickle.loads', 'cloudpickle.loads', (['message[2]'], {}), '(message[2])\n', (10469, 10481), False, 'import cloudpickle\n')]
|
#!/usr/bin/env python
# coding=utf-8
#!/usr/bin/env python
# coding=utf-8
import ansible.playbook
from ansible import callbacks
from ansible import utils
import json
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(stats,verbose=utils.VERBOSITY)
res=ansible.playbook.PlayBook(
playbook='replset_test.yml',
stats=stats,
callbacks=playbook_cb,
runner_callbacks=runner_cb
).run()
data = json.dumps(res,indent=4)
|
[
"ansible.callbacks.AggregateStats",
"ansible.callbacks.PlaybookRunnerCallbacks",
"ansible.callbacks.PlaybookCallbacks",
"json.dumps"
] |
[((175, 201), 'ansible.callbacks.AggregateStats', 'callbacks.AggregateStats', ([], {}), '()\n', (199, 201), False, 'from ansible import callbacks\n'), ((216, 268), 'ansible.callbacks.PlaybookCallbacks', 'callbacks.PlaybookCallbacks', ([], {'verbose': 'utils.VERBOSITY'}), '(verbose=utils.VERBOSITY)\n', (243, 268), False, 'from ansible import callbacks\n'), ((281, 346), 'ansible.callbacks.PlaybookRunnerCallbacks', 'callbacks.PlaybookRunnerCallbacks', (['stats'], {'verbose': 'utils.VERBOSITY'}), '(stats, verbose=utils.VERBOSITY)\n', (314, 346), False, 'from ansible import callbacks\n'), ((536, 561), 'json.dumps', 'json.dumps', (['res'], {'indent': '(4)'}), '(res, indent=4)\n', (546, 561), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Slit devices in AMOR"""
from numpy import arctan, radians, tan
from nicos.core import Attach, HasPrecision, Override, Param, Readable, \
dictwith, oneof, status
from nicos.core.utils import multiStatus
from nicos.devices.generic.slit import Slit, SlitAxis
from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, \
InterfaceLogicalMotorHandler
class SlitOpening(HasPrecision, SlitAxis):
"""Device to control the slit opening/height.
Motor dXt changes moves the slit's top slab in turn changing the
slit opening. Motor dXb changes the position of the whole slit
moving it up or down (X is the slit number).
This device reads the current opening using the motor dXt and
changes the opening using combination of the motors dXt and dXb
such that the center remains aligned.
"""
parameter_overrides = {
'unit': Override(mandatory=False, default='mm'),
'fmtstr': Override(userparam=False),
'maxage': Override(userparam=False),
'pollinterval': Override(userparam=False),
'warnlimits': Override(userparam=False),
'precision': Override(userparam=False, default=0.01),
'target': Override(volatile=True)
}
status_to_msg = {
status.ERROR: 'Error in %s',
status.BUSY: 'Moving: %s ...',
status.WARN: 'Warning in %s',
status.NOTREACHED: '%s did not reach target!',
status.UNKNOWN: 'Unknown status in %s!',
status.OK: 'Ready.'
}
def doReadTarget(self):
# Do not allow None as target
target = self._getFromCache('target', self.doRead)
return target if target is not None else self.doRead(0)
def _convertRead(self, positions):
return positions[3]
def _convertStart(self, target, current):
current_opening = current[3]
current_bottom = current[2]
new_bottom = current_bottom + 0.5 * (current_opening - target)
return current[0], current[1], new_bottom, target
def doStatus(self, maxage=0):
# Check for error and warning in the dependent devices
st_devs = multiStatus(self._adevs, maxage)
devs = [dname for dname, d in self._adevs.items()
if d.status()[0] == st_devs[0]]
if st_devs[0] in self.status_to_msg:
msg = self.status_to_msg[st_devs[0]]
if '%' in msg:
msg = msg % ', '.join(devs)
return st_devs[0], msg
return st_devs
def read_divergence(xs, slit):
left, _, bottom, top = slit
s = arctan(top/xs)
d = arctan(bottom/xs)
return s+d, 2*arctan(left/xs), (s-d)/2
def read_beam_shaping(slit):
left, right, bottom, top = slit
return top+bottom, right+left, (top-bottom)/2
class AmorSlitHandler(InterfaceLogicalMotorHandler):
attached_devices = {
'xs': Attach('Sample x position', Readable, missingok=True,
optional=True),
'mu': Attach('Sample omega', Readable, missingok=True,
optional=True),
'nu': Attach('Sample omega', Readable, missingok=True,
optional=True),
'ltz': Attach('Sample x position', Readable, missingok=True,
optional=True),
'xd2': Attach('Sample x position', Readable, missingok=True,
optional=True),
'xl': Attach('Deflector x position', Readable, missingok=True,
optional=True),
'mu_offset': Attach('Sample x position', Readable, missingok=True,
optional=True),
'kappa': Attach('Inclination of the beam after the Selene guide',
Readable, missingok=True, optional=True),
'soz_ideal': Attach('Ideal sample omega', Readable, missingok=True,
optional=True),
'xd3': Attach('', Readable, missingok=True, optional=True),
'slit1': Attach('slit 1', Slit, missingok=True, optional=True),
'slit2': Attach('slit 2', Slit, missingok=True, optional=True),
'slit2z': Attach('Z motor for slit 2', Readable, missingok=True,
optional=True),
'slit3': Attach('slit 3', Slit, missingok=True, optional=True),
'slit3z': Attach('Z motor for slit 3', Readable, missingok=True,
optional=True),
}
def doPreinit(self, mode):
self._status_devs = ['slit1', 'slit2', 'slit2z', 'slit3', 'slit3z']
InterfaceLogicalMotorHandler.doPreinit(self, mode)
self.valuetype = dictwith(div=float, did=float, dih=float)
def doRead(self, maxage=0):
result = {}
if self._is_active('diaphragm1'):
v, h, d = read_divergence(self._read_dev('xs'),
self._read_dev('slit1'))
result.update({'div': v, 'dih': h, 'did': d})
if self._is_active('diaphragm2'):
v, h, d = read_beam_shaping(self._read_dev('slit2'))
result.update({'d2v': v, 'd2h': h, 'd2d': d})
if self._is_active('diaphragm3'):
v, h, d = read_beam_shaping(self._read_dev('slit3'))
result.update({'d3v': v, 'd3h': h, 'd3d': d})
return result
def _get_move_list(self, targets):
positions = []
if self._is_active('diaphragm1'):
xs = self._read_dev('xs')
div = targets.get('div') or self._read_dev('div')
did = targets.get('did') or self._read_dev('did')
dih = targets.get('dih') or self._read_dev('dih')
top = xs * tan(radians(div / 2 + did))
bottom = xs * tan(radians(div / 2 - did))
horizontal = xs * tan(radians(dih / 2))
positions.extend([(self._get_dev('slit1'),
(top, bottom, horizontal, horizontal))
])
if self._is_active('diaphragm2'):
v = targets.get('d2v')
d = targets.get('d2d')
h = targets.get('d2h')
ltz = self._read_dev('ltz')
xd2 = self._read_dev('xd2')
xl = self._read_dev('xl')
mu_offset = self._read_dev('mu_offset')
kappa = self._read_dev('kappa')
if self._is_active('deflector'):
z = ltz - (xd2 - xl) * tan(radians(self._read_dev('mu') +
mu_offset))
else:
z = xd2 * tan(radians(kappa))
top = 0.5 * (v + d)
bottom = 0.5 * (v - d)
horizontal = 0.5 * h
positions.extend([(self._get_dev('slit2z'), z),
(self._get_dev('slit2'),
(top, bottom, horizontal, horizontal))
])
if self._is_active('diaphragm3'):
soz_ideal = self._read_dev('soz_ideal')
xd3 = self._read_dev('xd3')
nu = self._read_dev('nu')
xs = self._read_dev('xs')
kappa = self._read_dev('kappa')
v = targets.get('d3v')
d = targets.get('d3d')
h = targets.get('d3h')
z = soz_ideal + (xd3 - xs) * tan(radians(nu + kappa))
top = 0.5 * (v + d)
bottom = 0.5 * (v - d)
horizontal = 0.5 * h
positions.extend([(self._get_dev('slit2z'), z),
(self._get_dev('slit2'),
(top, bottom, horizontal, horizontal))
])
return positions
motortypes = ['div', 'dih', 'did', 'd2v', 'd2h', 'd2d', 'd3v', 'd3h', 'd3d']
class AmorSlitLogicalMotor(AmorLogicalMotor):
""" Class to represent the logical slit motors in AMOR.
"""
parameters = {
'motortype': Param('Type of motor %s' % ','.join(motortypes),
type=oneof(*motortypes), mandatory=True),
}
parameter_overrides = {
'unit': Override(mandatory=False, default='degree'),
'target': Override(volatile=True),
'abslimits': Override(mandatory=False, default=(-3.0, 3.0)),
'userlimits': Override(mandatory=False, default=(-3.0, 3.0))
}
attached_devices = {
'controller': Attach('Controller for the logical motors',
AmorSlitHandler)
}
def doRead(self, maxage=0):
return self._attached_controller.doRead(maxage)
|
[
"nicos.core.Attach",
"numpy.radians",
"nicos_sinq.amor.devices.logical_motor.InterfaceLogicalMotorHandler.doPreinit",
"nicos.core.dictwith",
"nicos.core.Override",
"numpy.arctan",
"nicos.core.utils.multiStatus",
"nicos.core.oneof"
] |
[((3633, 3649), 'numpy.arctan', 'arctan', (['(top / xs)'], {}), '(top / xs)\n', (3639, 3649), False, 'from numpy import arctan, radians, tan\n'), ((3656, 3675), 'numpy.arctan', 'arctan', (['(bottom / xs)'], {}), '(bottom / xs)\n', (3662, 3675), False, 'from numpy import arctan, radians, tan\n'), ((1957, 1996), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""mm"""'}), "(mandatory=False, default='mm')\n", (1965, 1996), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2016, 2041), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2024, 2041), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2061, 2086), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2069, 2086), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2112, 2137), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2120, 2137), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2161, 2186), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2169, 2186), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2209, 2248), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)', 'default': '(0.01)'}), '(userparam=False, default=0.01)\n', (2217, 2248), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((2268, 2291), 'nicos.core.Override', 'Override', ([], {'volatile': '(True)'}), '(volatile=True)\n', (2276, 2291), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((3196, 3228), 'nicos.core.utils.multiStatus', 'multiStatus', (['self._adevs', 'maxage'], {}), '(self._adevs, maxage)\n', (3207, 3228), False, 'from nicos.core.utils import multiStatus\n'), ((3930, 3998), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (3936, 3998), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4036, 4099), 'nicos.core.Attach', 'Attach', (['"""Sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample omega', Readable, missingok=True, optional=True)\n", (4042, 4099), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4136, 4199), 'nicos.core.Attach', 'Attach', (['"""Sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample omega', Readable, missingok=True, optional=True)\n", (4142, 4199), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4237, 4305), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4243, 4305), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4344, 4412), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4350, 4412), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4450, 4521), 'nicos.core.Attach', 'Attach', (['"""Deflector x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Deflector x position', Readable, missingok=True, optional=True)\n", (4456, 4521), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4565, 4633), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4571, 4633), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4679, 4780), 'nicos.core.Attach', 'Attach', (['"""Inclination of the beam after the Selene guide"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Inclination of the beam after the Selene guide', Readable,\n missingok=True, optional=True)\n", (4685, 4780), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4823, 4892), 'nicos.core.Attach', 'Attach', (['"""Ideal sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Ideal sample omega', Readable, missingok=True, optional=True)\n", (4829, 4892), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((4931, 4982), 'nicos.core.Attach', 'Attach', (['""""""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('', Readable, missingok=True, optional=True)\n", (4937, 4982), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5001, 5054), 'nicos.core.Attach', 'Attach', (['"""slit 1"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 1', Slit, missingok=True, optional=True)\n", (5007, 5054), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5073, 5126), 'nicos.core.Attach', 'Attach', (['"""slit 2"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 2', Slit, missingok=True, optional=True)\n", (5079, 5126), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5146, 5215), 'nicos.core.Attach', 'Attach', (['"""Z motor for slit 2"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Z motor for slit 2', Readable, missingok=True, optional=True)\n", (5152, 5215), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5259, 5312), 'nicos.core.Attach', 'Attach', (['"""slit 3"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 3', Slit, missingok=True, optional=True)\n", (5265, 5312), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5332, 5401), 'nicos.core.Attach', 'Attach', (['"""Z motor for slit 3"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Z motor for slit 3', Readable, missingok=True, optional=True)\n", (5338, 5401), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((5554, 5604), 'nicos_sinq.amor.devices.logical_motor.InterfaceLogicalMotorHandler.doPreinit', 'InterfaceLogicalMotorHandler.doPreinit', (['self', 'mode'], {}), '(self, mode)\n', (5592, 5604), False, 'from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, InterfaceLogicalMotorHandler\n'), ((5630, 5671), 'nicos.core.dictwith', 'dictwith', ([], {'div': 'float', 'did': 'float', 'dih': 'float'}), '(div=float, did=float, dih=float)\n', (5638, 5671), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9044, 9087), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""degree"""'}), "(mandatory=False, default='degree')\n", (9052, 9087), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9107, 9130), 'nicos.core.Override', 'Override', ([], {'volatile': '(True)'}), '(volatile=True)\n', (9115, 9130), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9153, 9199), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '(-3.0, 3.0)'}), '(mandatory=False, default=(-3.0, 3.0))\n', (9161, 9199), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9223, 9269), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '(-3.0, 3.0)'}), '(mandatory=False, default=(-3.0, 3.0))\n', (9231, 9269), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((9324, 9384), 'nicos.core.Attach', 'Attach', (['"""Controller for the logical motors"""', 'AmorSlitHandler'], {}), "('Controller for the logical motors', AmorSlitHandler)\n", (9330, 9384), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((3692, 3709), 'numpy.arctan', 'arctan', (['(left / xs)'], {}), '(left / xs)\n', (3698, 3709), False, 'from numpy import arctan, radians, tan\n'), ((8956, 8974), 'nicos.core.oneof', 'oneof', (['*motortypes'], {}), '(*motortypes)\n', (8961, 8974), False, 'from nicos.core import Attach, HasPrecision, Override, Param, Readable, dictwith, oneof, status\n'), ((6656, 6678), 'numpy.radians', 'radians', (['(div / 2 + did)'], {}), '(div / 2 + did)\n', (6663, 6678), False, 'from numpy import arctan, radians, tan\n'), ((6710, 6732), 'numpy.radians', 'radians', (['(div / 2 - did)'], {}), '(div / 2 - did)\n', (6717, 6732), False, 'from numpy import arctan, radians, tan\n'), ((6768, 6784), 'numpy.radians', 'radians', (['(dih / 2)'], {}), '(dih / 2)\n', (6775, 6784), False, 'from numpy import arctan, radians, tan\n'), ((7536, 7550), 'numpy.radians', 'radians', (['kappa'], {}), '(kappa)\n', (7543, 7550), False, 'from numpy import arctan, radians, tan\n'), ((8275, 8294), 'numpy.radians', 'radians', (['(nu + kappa)'], {}), '(nu + kappa)\n', (8282, 8294), False, 'from numpy import arctan, radians, tan\n')]
|
from typing import Dict, List, Optional
from pydantic import BaseModel
from xpresso import App, FromJson, Path
class Item(BaseModel):
name: str
price: float
tax: Optional[float] = None
async def create_receipt(items: FromJson[List[Item]]) -> Dict[str, float]:
return {item.name: item.price + (item.tax or 0) for item in items}
app = App(
routes=[
Path(
"/items/",
post=create_receipt,
)
]
)
|
[
"xpresso.Path"
] |
[((383, 419), 'xpresso.Path', 'Path', (['"""/items/"""'], {'post': 'create_receipt'}), "('/items/', post=create_receipt)\n", (387, 419), False, 'from xpresso import App, FromJson, Path\n')]
|
"""
Created by: <NAME>
Email: <EMAIL>
"""
import turtle
import math
def golden_spiral1():
# fibbionaci
a = 0
b = 1
# go up
turtle.left(90)
while b < 610:
b += a
a = b - a
# create 90 degrees circle with radius b
turtle.circle(b, extent = 90)
turtle.done()
def golden_spiral2():
# fibbionaci
a = 0
b = 1
# go up
turtle.left(90)
while b < 610:
b += a
a = b - a
cir = 2 * b * math.pi / 4
num_steps = 10
if cir > 50:
num_steps = 100
elif cir > 10:
num_steps = 50
step = cir / num_steps
angle = 90 / num_steps
i = 0
while i < num_steps:
turtle.left(angle)
turtle.forward(step)
i += 1
turtle.done()
|
[
"turtle.circle",
"turtle.left",
"turtle.done",
"turtle.forward"
] |
[((146, 161), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (157, 161), False, 'import turtle\n'), ((308, 321), 'turtle.done', 'turtle.done', ([], {}), '()\n', (319, 321), False, 'import turtle\n'), ((399, 414), 'turtle.left', 'turtle.left', (['(90)'], {}), '(90)\n', (410, 414), False, 'import turtle\n'), ((821, 834), 'turtle.done', 'turtle.done', ([], {}), '()\n', (832, 834), False, 'import turtle\n'), ((273, 300), 'turtle.circle', 'turtle.circle', (['b'], {'extent': '(90)'}), '(b, extent=90)\n', (286, 300), False, 'import turtle\n'), ((745, 763), 'turtle.left', 'turtle.left', (['angle'], {}), '(angle)\n', (756, 763), False, 'import turtle\n'), ((776, 796), 'turtle.forward', 'turtle.forward', (['step'], {}), '(step)\n', (790, 796), False, 'import turtle\n')]
|
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import ddt
from hypothesis import given
from hypothesis import strategies
import mock
import six
from poppy.manager.default.ssl_certificate import \
DefaultSSLCertificateController
from tests.functional.transport.pecan import base
@ddt.ddt
class SSLCertificatebyStatusTest(base.FunctionalTest):
@given(strategies.text())
def test_get_certificate_status_invalid_queryparam(self, status):
# invalid status field
try:
# NOTE(TheSriram): Py3k Hack
if six.PY3 and type(status) == str:
status = status.encode('utf-8')
url = '/v1.0/admin/certificates?status={0}'.format(status)
else:
url = '/v1.0/admin/certificates?status=%s' \
% status.decode('utf-8')
except (UnicodeDecodeError, UnicodeEncodeError):
pass
else:
response = self.app.get(url,
headers={'Content-Type':
'application/json',
'X-Project-ID':
str(uuid.uuid4())},
expect_errors=True)
self.assertEqual(response.status_code, 400)
@ddt.data(u'create_in_progress', u'deployed', u'failed', u'cancelled')
def test_get_service_status_valid_queryparam(self, status):
# valid status
with mock.patch.object(DefaultSSLCertificateController,
'get_certs_by_status'):
response = self.app.get('/v1.0/admin/certificates'
'?status={0}'.format(status),
headers={'Content-Type':
'application/json',
'X-Project-ID':
str(uuid.uuid4())})
self.assertEqual(response.status_code, 200)
|
[
"uuid.uuid4",
"mock.patch.object",
"ddt.data",
"hypothesis.strategies.text"
] |
[((1885, 1954), 'ddt.data', 'ddt.data', (['u"""create_in_progress"""', 'u"""deployed"""', 'u"""failed"""', 'u"""cancelled"""'], {}), "(u'create_in_progress', u'deployed', u'failed', u'cancelled')\n", (1893, 1954), False, 'import ddt\n'), ((913, 930), 'hypothesis.strategies.text', 'strategies.text', ([], {}), '()\n', (928, 930), False, 'from hypothesis import strategies\n'), ((2055, 2128), 'mock.patch.object', 'mock.patch.object', (['DefaultSSLCertificateController', '"""get_certs_by_status"""'], {}), "(DefaultSSLCertificateController, 'get_certs_by_status')\n", (2072, 2128), False, 'import mock\n'), ((1750, 1762), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1760, 1762), False, 'import uuid\n'), ((2526, 2538), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2536, 2538), False, 'import uuid\n')]
|
#!/usr/bin/env python2.7
"""
Module to interact with Xpedite device driver to program H/W performance counters
This module provides logic to
1. Detect and interact with Xpedite device driver
2. Build and serialize requests from a list of pmc events
Author: <NAME>, <NAME>
"""
import os
import struct
import logging
from xpedite.pmu.request import (
PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest,
FixedPmuRequest, RequestSorter
)
from xpedite.pmu.event import EventState
LOGGER = logging.getLogger(__name__)
PMU_CTRL_DEVICE = '/dev/xpedite'
def canUsePMC():
"""Checks status of Xpedite device driver"""
return os.path.exists(PMU_CTRL_DEVICE) and os.access(PMU_CTRL_DEVICE, os.W_OK)
class PMUCtrl(object):
"""Interface to program pmu events with Xpedite device driver"""
def __init__(self, eventsDb):
self.device = None
self.eventsDb = eventsDb
def __enter__(self):
if not canUsePMC():
import socket
hostname = socket.gethostname()
raise Exception('PMC not enabled - run "xpedite pmc --enable" to load kernel module at host {}'.format(hostname))
self.device = open(PMU_CTRL_DEVICE, 'w')
return self
def __exit__(self, *args):
if self.device:
self.device.close()
@staticmethod
def buildRequestGroup(cpu, eventState):
"""
Builds a group of fixed, generic and offcore requests
:param cpu: Id of the target cpu core
:param eventState: Collection of pmu request to be processed
"""
request = struct.pack(
'=BBBB', cpu, len(eventState.fixedRequests), len(eventState.genericRequests), len(eventState.offcoreRequests)
)
for event in eventState.fixedRequests:
request += event.buildMask()
for _ in range(len(eventState.fixedRequests), 3):
request += FixedPmuRequest.defaultMask()
for event in eventState.genericRequests:
request += event.buildMask()
for _ in range(len(eventState.genericRequests), 8):
request += GenericPmuRequest.defaultMask()
for event in eventState.offcoreRequests:
request += event.buildMask()
for _ in range(len(eventState.offcoreRequests), 2):
request += OffcorePmuRequest.defaultMask()
return request
def resolveEvents(self, cpuSet, events):
"""
Resolves and build pmu requests for a list of events
:param cpuSet: A set of cpu cores to enable pmu
:param events: A list of pmu events to be resolved
"""
if len(events) > 11:
raise Exception('PMUCtrl - cannot enable more than 11 events - requested {}'.format(len(events)))
requestFactory = PmuRequestFactory(self.eventsDb)
eventState = EventState(cpuSet)
for event in events:
requests = requestFactory.buildRequests(event)
for request in requests:
if isinstance(request, GenericPmuRequest):
eventState.addGenericPmuRequest(request)
elif isinstance(request, OffcorePmuRequest):
eventState.addOffcorePmuRequest(request)
elif isinstance(request, FixedPmuRequest):
eventState.addFixedPmuRequest(request)
else:
raise Exception('PMUCtrl request - invalid event type {}'.format(type(event)))
return eventState
@staticmethod
def allocateEvents(eventState):
"""
Allocates registers for pmu events, while obeying constraints
:param eventState: A collection of resolved pmu events
"""
if eventState.genericRequests:
sortedRequests = RequestSorter.sort(eventState.genericRequests)
if sortedRequests and len(sortedRequests) == len(eventState.genericRequests):
eventState.genericRequests = sortedRequests
else:
pmcStr = '\n\t\t'.join((str(request) for request in eventState.genericRequests))
report = RequestSorter.reportConstraints(eventState.genericRequests)
errMsg = """Failed to program selected counters
--> chosen pmc - \n\t\t{}
--> reordered pmc - {}
The following constraints prevent all selected counter from being used simultaneously
{}""".format(pmcStr, sortedRequests, report)
raise Exception(errMsg)
def enable(self, cpuSet, events):
"""
Enables pmu events in a set of target cpus
:param cpuSet: A set of cpu cores to enable pmu
:param events: A list of pmu events to be enabled
"""
if not self.device:
raise Exception('PMUCtrl - xpedite device not enabled - use "with PMUCtrl() as pmuCtrl:" to init device')
eventState = self.resolveEvents(cpuSet, events)
self.allocateEvents(eventState)
for cpu in cpuSet:
requestGroup = self.buildRequestGroup(cpu, eventState)
LOGGER.debug(
'sending request (%d bytes) to xpedite ko [%s]',
len(requestGroup), ':'.join('{:02x}'.format(ord(request)) for request in requestGroup)
)
self.device.write(requestGroup)
self.device.flush()
return eventState
|
[
"xpedite.pmu.request.PmuRequestFactory",
"xpedite.pmu.event.EventState",
"xpedite.pmu.request.RequestSorter.sort",
"xpedite.pmu.request.OffcorePmuRequest.defaultMask",
"xpedite.pmu.request.GenericPmuRequest.defaultMask",
"os.path.exists",
"socket.gethostname",
"xpedite.pmu.request.FixedPmuRequest.defaultMask",
"xpedite.pmu.request.RequestSorter.reportConstraints",
"os.access",
"logging.getLogger"
] |
[((598, 625), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (615, 625), False, 'import logging\n'), ((734, 765), 'os.path.exists', 'os.path.exists', (['PMU_CTRL_DEVICE'], {}), '(PMU_CTRL_DEVICE)\n', (748, 765), False, 'import os\n'), ((770, 805), 'os.access', 'os.access', (['PMU_CTRL_DEVICE', 'os.W_OK'], {}), '(PMU_CTRL_DEVICE, os.W_OK)\n', (779, 805), False, 'import os\n'), ((2683, 2715), 'xpedite.pmu.request.PmuRequestFactory', 'PmuRequestFactory', (['self.eventsDb'], {}), '(self.eventsDb)\n', (2700, 2715), False, 'from xpedite.pmu.request import PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest, FixedPmuRequest, RequestSorter\n'), ((2733, 2751), 'xpedite.pmu.event.EventState', 'EventState', (['cpuSet'], {}), '(cpuSet)\n', (2743, 2751), False, 'from xpedite.pmu.event import EventState\n'), ((1067, 1087), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1085, 1087), False, 'import socket\n'), ((1885, 1914), 'xpedite.pmu.request.FixedPmuRequest.defaultMask', 'FixedPmuRequest.defaultMask', ([], {}), '()\n', (1912, 1914), False, 'from xpedite.pmu.request import PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest, FixedPmuRequest, RequestSorter\n'), ((2069, 2100), 'xpedite.pmu.request.GenericPmuRequest.defaultMask', 'GenericPmuRequest.defaultMask', ([], {}), '()\n', (2098, 2100), False, 'from xpedite.pmu.request import PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest, FixedPmuRequest, RequestSorter\n'), ((2255, 2286), 'xpedite.pmu.request.OffcorePmuRequest.defaultMask', 'OffcorePmuRequest.defaultMask', ([], {}), '()\n', (2284, 2286), False, 'from xpedite.pmu.request import PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest, FixedPmuRequest, RequestSorter\n'), ((3544, 3590), 'xpedite.pmu.request.RequestSorter.sort', 'RequestSorter.sort', (['eventState.genericRequests'], {}), '(eventState.genericRequests)\n', (3562, 3590), False, 'from xpedite.pmu.request import PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest, FixedPmuRequest, RequestSorter\n'), ((3845, 3904), 'xpedite.pmu.request.RequestSorter.reportConstraints', 'RequestSorter.reportConstraints', (['eventState.genericRequests'], {}), '(eventState.genericRequests)\n', (3876, 3904), False, 'from xpedite.pmu.request import PmuRequestFactory, GenericPmuRequest, OffcorePmuRequest, FixedPmuRequest, RequestSorter\n')]
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for a stream of logs output by a locally running emulator."""
import subprocess
from typing import List
from android_env.components import log_stream
_LOGCAT_COMMAND = ['logcat', '-v', 'epoch']
class AdbLogStream(log_stream.LogStream):
"""Manages adb logcat process for a locally running emulator."""
def __init__(self, adb_command_prefix: List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self._adb_command_prefix = adb_command_prefix
def _get_stream_output(self):
cmd = self._adb_command_prefix + _LOGCAT_COMMAND + self._filters
self._adb_subprocess = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
universal_newlines=True)
return self._adb_subprocess.stdout
def stop_stream(self):
self._adb_subprocess.kill()
|
[
"subprocess.Popen"
] |
[((1216, 1327), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'bufsize': '(1)', 'universal_newlines': '(True)'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n bufsize=1, universal_newlines=True)\n', (1232, 1327), False, 'import subprocess\n')]
|
import json
import sys
import os
from pprint import pprint
from cassandra.cluster import Cluster
from cassandra.policies import DCAwareRoundRobinPolicy
cluster = Cluster(load_balancing_policy=DCAwareRoundRobinPolicy())
session = cluster.connect()
session.execute('drop keyspace if exists de')
session.execute(
"""
CREATE KEYSPACE IF NOT EXISTS de WITH REPLICATION = {
'class' : 'SimpleStrategy',
'replication_factor' : 1
}
"""
)
path = './workshop_dataset1/'
session.set_keyspace('de')
# session.execute('use de')
session.execute('drop table if exists de')
session.execute(
"""
create table Q1(
tid text,
location text,
hashtag text,
primary key(hashtag,location,tid)
)with clustering order by(location desc, tid desc);
"""
)
for filename in os.listdir(path):
print(filename)
data = json.load(open(path+filename))
for x in data:#None is not iterable
if(data[x]["hashtags"] and data[x]["location"]):
for y in data[x]["hashtags"]:
if(y):
print(y)
session.execute(
"""
insert into Q1(tid,location,hashtag)
values (%(tid)s,%(location)s,%(hashtag)s)
""",
{
'tid':data[x]["tid"],
'location':data[x]["location"],
'hashtag':y,
}
)
hashtag1 = input("Question 1 : Enter hashtag name::\n")
list_of_rows = session.execute('select count(*) as count,hashtag,location from Q1 where hashtag=\''+hashtag1+'\'group by hashtag,location allow filtering')
#print(type(list_of_rows))
file1=open("file1.csv","w");
for row in list_of_rows:
x=str(row.hashtag)+";"+str(row.location)+";"+str(row.count)+"\n"
print(str(row.hashtag)+"\t\t\t"+str(row.location)+"\t\t\t"+str(row.count))
file1.write(x)
|
[
"cassandra.policies.DCAwareRoundRobinPolicy",
"os.listdir"
] |
[((789, 805), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (799, 805), False, 'import os\n'), ((192, 217), 'cassandra.policies.DCAwareRoundRobinPolicy', 'DCAwareRoundRobinPolicy', ([], {}), '()\n', (215, 217), False, 'from cassandra.policies import DCAwareRoundRobinPolicy\n')]
|
import base64
import datetime
import json
import logging
import os
import time
import traceback
from urllib.parse import urlparse, quote
from boto3.dynamodb.types import TypeDeserializer
from algoliasearch.search_client import SearchClient
# Logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if os.getenv('DEBUG', default = 1) else logging.INFO)
logger.info("Update Algolia")
# Algolia
ALGOLIA_FIELDS = ''
try:
fields_var = os.getenv('ALGOLIA_FIELDS')
if fields_var:
ALGOLIA_FIELDS = json.loads(fields_var)
logger.debug('Fields: %s', ALGOLIA_FIELDS)
except:
raise ValueError('If you specify fields, it must be an object with EITHER field: "include" OR "exclude".')
if ALGOLIA_FIELDS and ('include' in ALGOLIA_FIELDS and 'exclude' in ALGOLIA_FIELDS):
raise ValueError('If you specify fields, it must be an object with EITHER field: "include" OR "exclude".')
if ALGOLIA_FIELDS and ('include' not in ALGOLIA_FIELDS and 'exclude' not in ALGOLIA_FIELDS):
raise ValueError('If you specify fields, it must be an object with EITHER field: "include" OR "exclude".')
ALGOLIA_SETTINGS = ''
try:
settings_var = os.getenv('ALGOLIA_SETTINGS')
if settings_var:
ALGOLIA_SETTINGS = json.loads(settings_var)
logger.debug('Settings: %s', ALGOLIA_SETTINGS)
except:
raise ValueError('If you specify settings, it must at least have an object of settings.')
ALGOLIA_PROJECT_ID = os.getenv('ALGOLIA_PROJECT_ID', default = '')
if ALGOLIA_PROJECT_ID == '':
raise ValueError('You need to provide ALGOLIA_PROJECT_ID in order to guarantee uniqueness.')
ALGOLIA_APP_ID = os.getenv('ALGOLIA_APP_ID', default = '')
ALGOLIA_API_KEY = os.getenv('ALGOLIA_API_KEY', default = '')
if ALGOLIA_APP_ID == '' or ALGOLIA_API_KEY == '':
raise ValueError('You need to provide ALGOLIA_APP_ID and ALGOLIA_API_KEY env variables.')
client = SearchClient.create(ALGOLIA_APP_ID, ALGOLIA_API_KEY)
# custom encoder changes
# - sets to lists
class DDBTypesEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
# Subclass of boto's TypeDeserializer for DynamoDB to adjust for DynamoDB Stream format.
class StreamTypeDeserializer(TypeDeserializer):
def _deserialize_n(self, value):
#return float(value)
return value
def _deserialize_b(self, value):
return value # Already in Base64
# Extracts the DynamoDB table from an ARN
# ex: arn:aws:dynamodb:eu-west-1:123456789012:table/table-name/stream/2015-11-13T09:23:17.104 should return 'table-name'
def get_table_name_from_arn(arn):
return arn.split(':')[5].split('/')[1]
# Configure Index Settings
# https://www.algolia.com/doc/api-reference/settings-api-parameters/
def set_index_settings(index_name, opts):
if 'settings' not in opts:
return
logger.debug('Configuring Index: %s', opts)
settings = opts['settings']
opts.pop('settings')
index = client.init_index(index_name)
index.set_settings(settings, opts).wait()
# Configure Indexed Fields
# https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/how-to/reducing-object-size/
def set_indexed_fields(all_fields):
fields = {}
if not ALGOLIA_FIELDS:
logger.debug('Indexed Fields: %s', all_fields)
return all_fields
if 'include' in ALGOLIA_FIELDS:
key_set = set(ALGOLIA_FIELDS['include']) & set(all_fields.keys())
fields = { key: all_fields[key] for key in key_set }
elif 'exclude' in ALGOLIA_FIELDS:
key_set = set(all_fields.keys()) - set(ALGOLIA_FIELDS['exclude'])
fields = {key: all_fields[key] for key in key_set}
logger.debug('Indexed Fields: %s', fields)
return fields
# ObjectID = PrimaryKey(:SortKey)
def generateObjectID(keys):
objectID = ''
for key in keys:
objectID += keys[key] + ':'
return objectID[:-1]
# Lamba Handler
def _lambda_handler(event, context):
logger.debug('Event: %s', event)
records = event['Records']
now = datetime.datetime.utcnow()
ddb_deserializer = StreamTypeDeserializer()
operations = []
cnt_insert = cnt_modify = cnt_remove = 0
index_name = ''
# Process each record
for record in records:
# Handle both native DynamoDB Streams or Streams data from Kinesis (for manual replay)
logger.debug('Record: %s', record)
if record.get('eventSource') == 'aws:dynamodb':
ddb = record['dynamodb']
ddb_table_name = get_table_name_from_arn(record['eventSourceARN'])
doc_seq = ddb['SequenceNumber']
elif record.get('eventSource') == 'aws:kinesis':
ddb = json.loads(base64.b64decode(record['kinesis']['data']))
ddb_table_name = ddb['SourceTable']
doc_seq = record['kinesis']['sequenceNumber']
else:
logger.error('Ignoring non-DynamoDB event sources: %s',
record.get('eventSource'))
continue
# Compute DynamoDB table, type and index for item
doc_table = ddb_table_name.lower()
doc_table_parts = doc_table.split('-')
doc_index_name = doc_table_parts[0] if len(doc_table_parts) > 0 else doc_table
doc_index_suffix = "-" + doc_table_parts[-1] if len(doc_table_parts) > 1 else ''
index_name = ALGOLIA_PROJECT_ID + '-' + doc_index_name + doc_index_suffix
# Dispatch according to event TYPE
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
logger.debug('doc_table=%s, event_name=%s, seq=%s', doc_table, event_name, doc_seq)
# Treat events from a Kinesis stream as INSERTs
if event_name == 'AWS:KINESIS:RECORD':
event_name = 'INSERT'
# Ensure stream has required info
is_ddb_insert_or_update = (event_name == 'INSERT') or (event_name == 'MODIFY')
is_ddb_delete = event_name == 'REMOVE'
image_name = 'NewImage' if is_ddb_insert_or_update else 'Keys'
if image_name not in ddb:
logger.warning(
'Cannot process stream if it does not contain ' + image_name)
continue
logger.debug(image_name + ': %s', ddb[image_name])
# Deserialize DynamoDB type to Python types
doc_keys = ddb_deserializer.deserialize({'M': record['dynamodb']['Keys']})
doc_fields = ddb_deserializer.deserialize({'M': ddb[image_name]})
logger.debug('All Fields: %s', doc_fields)
doc_fields = set_indexed_fields(doc_fields)
# Update counters
if event_name == 'INSERT':
cnt_insert += 1
elif event_name == 'MODIFY':
cnt_modify += 1
elif event_name == 'REMOVE':
cnt_remove += 1
else:
logger.warning('Unsupported event_name: %s', event_name)
# Format as Algolia record
# [https://www.algolia.com/doc/guides/sending-and-managing-data/prepare-your-data/in-depth/what-is-in-a-record/]
doc_fields['objectID'] = generateObjectID(doc_keys)
# DynamoDB INSERT or MODIFY
if is_ddb_insert_or_update:
operation = {
'action': 'updateObject',
'indexName': index_name,
'body': doc_fields
}
# DynamoDB REMOVE
elif is_ddb_delete:
operation = {
'action': 'deleteObject',
'indexName': index_name,
'body': doc_fields
}
# Save operation
logger.debug('%s', operation)
operations.append(operation);
# Update Index Settings
set_index_settings(index_name, ALGOLIA_SETTINGS)
# Execute Batch Operations
# [https://www.algolia.com/doc/api-reference/api-methods/batch/]
logger.info('Posting to Algolia: inserts=%s updates=%s deletes=%s, total operations=%s', cnt_insert, cnt_modify, cnt_remove, len(operations) - 1)
client.multiple_batch(operations).wait()
# Global lambda handler - catches all exceptions to avoid dead letter in the DynamoDB Stream
def lambda_handler(event, context):
try:
return _lambda_handler(event, context)
except Exception:
logger.error(traceback.format_exc())
|
[
"json.loads",
"base64.b64decode",
"datetime.datetime.utcnow",
"json.JSONEncoder.default",
"traceback.format_exc",
"algoliasearch.search_client.SearchClient.create",
"os.getenv",
"logging.getLogger"
] |
[((259, 278), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (276, 278), False, 'import logging\n'), ((1440, 1483), 'os.getenv', 'os.getenv', (['"""ALGOLIA_PROJECT_ID"""'], {'default': '""""""'}), "('ALGOLIA_PROJECT_ID', default='')\n", (1449, 1483), False, 'import os\n'), ((1630, 1669), 'os.getenv', 'os.getenv', (['"""ALGOLIA_APP_ID"""'], {'default': '""""""'}), "('ALGOLIA_APP_ID', default='')\n", (1639, 1669), False, 'import os\n'), ((1690, 1730), 'os.getenv', 'os.getenv', (['"""ALGOLIA_API_KEY"""'], {'default': '""""""'}), "('ALGOLIA_API_KEY', default='')\n", (1699, 1730), False, 'import os\n'), ((1887, 1939), 'algoliasearch.search_client.SearchClient.create', 'SearchClient.create', (['ALGOLIA_APP_ID', 'ALGOLIA_API_KEY'], {}), '(ALGOLIA_APP_ID, ALGOLIA_API_KEY)\n', (1906, 1939), False, 'from algoliasearch.search_client import SearchClient\n'), ((446, 473), 'os.getenv', 'os.getenv', (['"""ALGOLIA_FIELDS"""'], {}), "('ALGOLIA_FIELDS')\n", (455, 473), False, 'import os\n'), ((1158, 1187), 'os.getenv', 'os.getenv', (['"""ALGOLIA_SETTINGS"""'], {}), "('ALGOLIA_SETTINGS')\n", (1167, 1187), False, 'import os\n'), ((4096, 4122), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4120, 4122), False, 'import datetime\n'), ((312, 341), 'os.getenv', 'os.getenv', (['"""DEBUG"""'], {'default': '(1)'}), "('DEBUG', default=1)\n", (321, 341), False, 'import os\n'), ((518, 540), 'json.loads', 'json.loads', (['fields_var'], {}), '(fields_var)\n', (528, 540), False, 'import json\n'), ((1236, 1260), 'json.loads', 'json.loads', (['settings_var'], {}), '(settings_var)\n', (1246, 1260), False, 'import json\n'), ((2130, 2165), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (2154, 2165), False, 'import json\n'), ((8307, 8329), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8327, 8329), False, 'import traceback\n'), ((4752, 4795), 'base64.b64decode', 'base64.b64decode', (["record['kinesis']['data']"], {}), "(record['kinesis']['data'])\n", (4768, 4795), False, 'import base64\n')]
|
import os
import numpy as np
import codecs
import pandas as pd
import json
from glob import glob
import cv2
import shutil
from sklearn.model_selection import train_test_split
#1.标签路径
csv_file = "annotations.csv"
saved_path = "./VOCdevkit/VOC2007/" #保存路径
image_save_path = "./JPEGImages/"
image_raw_parh = "./images/"
#2.创建要求文件夹
if not os.path.exists(saved_path + "Annotations"):
os.makedirs(saved_path + "Annotations")
if not os.path.exists(saved_path + "JPEGImages/"):
os.makedirs(saved_path + "JPEGImages/")
if not os.path.exists(saved_path + "ImageSets/Main/"):
os.makedirs(saved_path + "ImageSets/Main/")
#3.获取待处理文件
total_csv_annotations = {}
annotations = pd.read_csv(csv_file,header=None).values
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
#4.读取标注信息并写入 xml
for filename,label in total_csv_annotations.items():
#embed()
print(filename)
if filename == 'image':
continue
filename = filename.split(".png")[0]
print(filename)
height, width, channels = cv2.imread(image_raw_parh + filename + '.jpg').shape
#embed()
with codecs.open(saved_path + "Annotations/"+filename +".xml","w","utf-8") as xml:
xml.write('<annotation>\n')
xml.write('\t<folder>' + 'BCDD' + '</folder>\n')
xml.write('\t<filename>' + filename +'.jpg' + '</filename>\n')
xml.write('\t<source>\n')
xml.write('\t\t<database>BLOOD CELL DETECTION DATASET</database>\n')
xml.write('\t\t<annotation>UAV AutoLanding</annotation>\n')
xml.write('\t\t\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t</source>\n')
xml.write('\t<owner>\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t\t<name>Alex</name>\n')
xml.write('\t</owner>\n')
xml.write('\t<size>\n')
xml.write('\t\t<width>'+ str(width) + '</width>\n')
xml.write('\t\t<height>'+ str(height) + '</height>\n')
xml.write('\t\t<depth>' + str(channels) + '</depth>\n')
xml.write('\t</size>\n')
xml.write('\t\t<segmented>0</segmented>\n')
if isinstance(label,float):
## 空白
xml.write('</annotation>')
continue
for label_detail in label:
labels = label_detail
#embed()
xmin = int(float(labels[0]))
ymin = int(float(labels[1]))
xmax = int(float(labels[2]))
ymax = int(float(labels[3]))
label_ = labels[-1]
if xmax <= xmin:
pass
elif ymax <= ymin:
pass
else:
xml.write('\t<object>\n')
xml.write('\t\t<name>'+label_+'</name>\n')
xml.write('\t\t<pose>Unspecified</pose>\n')
xml.write('\t\t<truncated>0</truncated>\n')
xml.write('\t\t<difficult>0</difficult>\n')
xml.write('\t\t<bndbox>\n')
xml.write('\t\t\t<xmin>' + str(xmin) + '</xmin>\n')
xml.write('\t\t\t<ymin>' + str(ymin) + '</ymin>\n')
xml.write('\t\t\t<xmax>' + str(xmax) + '</xmax>\n')
xml.write('\t\t\t<ymax>' + str(ymax) + '</ymax>\n')
xml.write('\t\t</bndbox>\n')
xml.write('\t</object>\n')
print(filename,xmin,ymin,xmax,ymax,labels)
xml.write('</annotation>')
#6.split files for txt
txtsavepath = saved_path + "ImageSets/Main/"
ftrainval = open(txtsavepath+'/trainval.txt', 'w')
ftest = open(txtsavepath+'/test.txt', 'w')
ftrain = open(txtsavepath+'/train.txt', 'w')
fval = open(txtsavepath+'/val.txt', 'w')
total_files = glob(saved_path+"./Annotations/*.xml")
total_files = [i.split("\\")[-1].split(".xml")[0] for i in total_files]
#test_filepath = ""
for file in total_files:
ftrainval.write(file + "\n")
# move images to voc JPEGImages folder
for image in glob(image_raw_parh+"/*.jpg"):
shutil.copy(image,saved_path+image_save_path)
train_files,val_files = train_test_split(total_files,test_size=0.2,random_state=42)
for file in train_files:
ftrain.write(file + "\n")
#val
for file in val_files:
fval.write(file + "\n")
ftrainval.close()
ftrain.close()
fval.close()
#ftest.close()
|
[
"os.makedirs",
"numpy.concatenate",
"codecs.open",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"os.path.exists",
"cv2.imread",
"numpy.array",
"glob.glob",
"shutil.copy"
] |
[((3978, 4018), 'glob.glob', 'glob', (["(saved_path + './Annotations/*.xml')"], {}), "(saved_path + './Annotations/*.xml')\n", (3982, 4018), False, 'from glob import glob\n'), ((4220, 4251), 'glob.glob', 'glob', (["(image_raw_parh + '/*.jpg')"], {}), "(image_raw_parh + '/*.jpg')\n", (4224, 4251), False, 'from glob import glob\n'), ((4326, 4387), 'sklearn.model_selection.train_test_split', 'train_test_split', (['total_files'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(total_files, test_size=0.2, random_state=42)\n', (4342, 4387), False, 'from sklearn.model_selection import train_test_split\n'), ((352, 394), 'os.path.exists', 'os.path.exists', (["(saved_path + 'Annotations')"], {}), "(saved_path + 'Annotations')\n", (366, 394), False, 'import os\n'), ((400, 439), 'os.makedirs', 'os.makedirs', (["(saved_path + 'Annotations')"], {}), "(saved_path + 'Annotations')\n", (411, 439), False, 'import os\n'), ((447, 489), 'os.path.exists', 'os.path.exists', (["(saved_path + 'JPEGImages/')"], {}), "(saved_path + 'JPEGImages/')\n", (461, 489), False, 'import os\n'), ((495, 534), 'os.makedirs', 'os.makedirs', (["(saved_path + 'JPEGImages/')"], {}), "(saved_path + 'JPEGImages/')\n", (506, 534), False, 'import os\n'), ((542, 588), 'os.path.exists', 'os.path.exists', (["(saved_path + 'ImageSets/Main/')"], {}), "(saved_path + 'ImageSets/Main/')\n", (556, 588), False, 'import os\n'), ((594, 637), 'os.makedirs', 'os.makedirs', (["(saved_path + 'ImageSets/Main/')"], {}), "(saved_path + 'ImageSets/Main/')\n", (605, 637), False, 'import os\n'), ((695, 729), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': 'None'}), '(csv_file, header=None)\n', (706, 729), True, 'import pandas as pd\n'), ((821, 847), 'numpy.array', 'np.array', (['[annotation[1:]]'], {}), '([annotation[1:]])\n', (829, 847), True, 'import numpy as np\n'), ((4255, 4303), 'shutil.copy', 'shutil.copy', (['image', '(saved_path + image_save_path)'], {}), '(image, saved_path + image_save_path)\n', (4266, 4303), False, 'import shutil\n'), ((929, 988), 'numpy.concatenate', 'np.concatenate', (['(total_csv_annotations[key], value)'], {'axis': '(0)'}), '((total_csv_annotations[key], value), axis=0)\n', (943, 988), True, 'import numpy as np\n'), ((1285, 1331), 'cv2.imread', 'cv2.imread', (["(image_raw_parh + filename + '.jpg')"], {}), "(image_raw_parh + filename + '.jpg')\n", (1295, 1331), False, 'import cv2\n'), ((1360, 1434), 'codecs.open', 'codecs.open', (["(saved_path + 'Annotations/' + filename + '.xml')", '"""w"""', '"""utf-8"""'], {}), "(saved_path + 'Annotations/' + filename + '.xml', 'w', 'utf-8')\n", (1371, 1434), False, 'import codecs\n')]
|
from tests.cli.commands.helpers import WORKSPACE_ROOT, run
class TestPipenvE2E:
@staticmethod
def should_manage_pipenv_projects():
# WHEN I create two pipenv projects
projects = ["library-one", "library-two"]
for project in projects:
run(["workspace", "new", "--type", "pipenv", f"libs/{project}"])
# THEN they should both exist
assert set(run(["workspace", "list", "--output", "names"]).text.splitlines()) == {"library-one", "library-two"}
# AND I should be able to run commands in them
assert set(run(["workspace", "run", "-c", "pwd"]).stdout.splitlines()) == {
str(WORKSPACE_ROOT / f"libs/{project}") for project in projects
}
# AND GIVEN one depends on the other
with open(WORKSPACE_ROOT / "libs/library-two/setup.py", "w", encoding="utf-8") as file:
file.write(
"""from setuptools import setup, find_packages
setup(
name="library-two",
version="0.1.0",
packages=find_packages(),
install_requires=[],
)
"""
)
run(["pipenv", "install", "--editable", "../library-two"], cwd="libs/library-one")
# WHEN I check dependees
result = run(["workspace", "dependees", "library-two"])
# THEN the correct dependees are identified
assert set(result.text.splitlines()) == {"library-one", "library-two"}
# AND I can sync dependencies
run(["workspace", "sync", "library-one"])
|
[
"tests.cli.commands.helpers.run"
] |
[((1092, 1179), 'tests.cli.commands.helpers.run', 'run', (["['pipenv', 'install', '--editable', '../library-two']"], {'cwd': '"""libs/library-one"""'}), "(['pipenv', 'install', '--editable', '../library-two'], cwd=\n 'libs/library-one')\n", (1095, 1179), False, 'from tests.cli.commands.helpers import WORKSPACE_ROOT, run\n'), ((1225, 1271), 'tests.cli.commands.helpers.run', 'run', (["['workspace', 'dependees', 'library-two']"], {}), "(['workspace', 'dependees', 'library-two'])\n", (1228, 1271), False, 'from tests.cli.commands.helpers import WORKSPACE_ROOT, run\n'), ((1449, 1490), 'tests.cli.commands.helpers.run', 'run', (["['workspace', 'sync', 'library-one']"], {}), "(['workspace', 'sync', 'library-one'])\n", (1452, 1490), False, 'from tests.cli.commands.helpers import WORKSPACE_ROOT, run\n'), ((280, 344), 'tests.cli.commands.helpers.run', 'run', (["['workspace', 'new', '--type', 'pipenv', f'libs/{project}']"], {}), "(['workspace', 'new', '--type', 'pipenv', f'libs/{project}'])\n", (283, 344), False, 'from tests.cli.commands.helpers import WORKSPACE_ROOT, run\n'), ((402, 449), 'tests.cli.commands.helpers.run', 'run', (["['workspace', 'list', '--output', 'names']"], {}), "(['workspace', 'list', '--output', 'names'])\n", (405, 449), False, 'from tests.cli.commands.helpers import WORKSPACE_ROOT, run\n'), ((577, 615), 'tests.cli.commands.helpers.run', 'run', (["['workspace', 'run', '-c', 'pwd']"], {}), "(['workspace', 'run', '-c', 'pwd'])\n", (580, 615), False, 'from tests.cli.commands.helpers import WORKSPACE_ROOT, run\n')]
|
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from torch import Tensor
import numpy as np
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask
from mmdet.ops import ModulatedDeformConvPack
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob, build_norm_layer
import math
INF = 1e8
def get_mask_sample_region(gt_bb, mask_center, strides, num_points_per, gt_xs, gt_ys, radius=1.0):
# This function checks if a feature pixel is near the center of an instance
# returns true or false for every pixel and object size 204600 * 8
center_y = mask_center[..., 0]
center_x = mask_center[..., 1]
center_gt = gt_bb.new_zeros(gt_bb.shape)
# no gt
if center_x[..., 0].sum() == 0:
return gt_xs.new_zeros(gt_xs.shape, dtype=torch.uint8)
beg = 0
for level, n_p in enumerate(num_points_per):
end = beg + n_p # setting where to stop for each head
stride = strides[level] * radius
xmin = center_x[beg:end] - stride
ymin = center_y[beg:end] - stride
xmax = center_x[beg:end] + stride
ymax = center_y[beg:end] + stride
# limit sample region in gt
center_gt[beg:end, :, 0] = torch.where(xmin > gt_bb[beg:end, :, 0], xmin, gt_bb[beg:end, :, 0])
center_gt[beg:end, :, 1] = torch.where(ymin > gt_bb[beg:end, :, 1], ymin, gt_bb[beg:end, :, 1])
center_gt[beg:end, :, 2] = torch.where(xmax > gt_bb[beg:end, :, 2], gt_bb[beg:end, :, 2], xmax)
center_gt[beg:end, :, 3] = torch.where(ymax > gt_bb[beg:end, :, 3], gt_bb[beg:end, :, 3], ymax)
beg = end
left = gt_xs - center_gt[..., 0]
right = center_gt[..., 2] - gt_xs
top = gt_ys - center_gt[..., 1]
bottom = center_gt[..., 3] - gt_ys
center_bbox = torch.stack((left, top, right, bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 # 上下左右都>0 就是在bbox里面
return inside_gt_bbox_mask
def get_polar_coordinates(c_x, c_y, pos_mask_contour, n=72):
if len(pos_mask_contour.shape) == 2:
ct = pos_mask_contour
else:
ct = pos_mask_contour[:, 0, :]
x = ct[:, 0] - c_x
y = ct[:, 1] - c_y
angle = torch.atan2(x, y) * 180 / np.pi
angle[angle < 0] += 360
angle = angle.int()
dist = torch.sqrt(x ** 2 + y ** 2)
angle, idx = torch.sort(angle)
dist = dist[idx]
interval = 360 // n
new_coordinate = {}
for i in range(0, 360, interval):
if i in angle:
d = dist[angle == i].max()
new_coordinate[i] = d
elif i + 1 in angle:
d = dist[angle == i + 1].max()
new_coordinate[i] = d
elif i - 1 in angle:
d = dist[angle == i - 1].max()
new_coordinate[i] = d
elif i + 2 in angle:
d = dist[angle == i + 2].max()
new_coordinate[i] = d
elif i - 2 in angle:
d = dist[angle == i - 2].max()
new_coordinate[i] = d
elif i + 3 in angle:
d = dist[angle == i + 3].max()
new_coordinate[i] = d
elif i - 3 in angle:
d = dist[angle == i - 3].max()
new_coordinate[i] = d
distances = torch.zeros(n)
for a in range(0, 360, interval):
if a in new_coordinate.keys():
distances[a // interval] = new_coordinate[a]
else:
new_coordinate[a] = torch.tensor(1e-6)
distances[a // interval] = 1e-6
return distances, new_coordinate
def polar_centerness_target(pos_mask_targets, max_centerness=None):
# only calculate pos centerness targets, otherwise there may be nan
centerness_targets = torch.sqrt(pos_mask_targets.min() / pos_mask_targets.max())
if max_centerness:
centerness_targets /= max_centerness
return centerness_targets.clamp_max(1.0)
def get_points_single(featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
@HEADS.register_module
class FourierNetHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)),
use_dcn=False,
mask_nms=False,
bbox_from_mask=False,
center_sample=True,
use_mask_center=True,
radius=1.5,
loss_cls=None,
loss_bbox=None,
loss_mask=None,
loss_on_coe=False,
loss_centerness=None,
conv_cfg=None,
norm_cfg=None,
contour_points=360,
use_fourier=False,
num_coe=36,
visulize_coe=36,
centerness_factor=0.5,
normalized_centerness=False):
super(FourierNetHead, self).__init__()
self.use_fourier = use_fourier
self.contour_points = contour_points
self.num_coe = num_coe
self.visulize_coe = visulize_coe
self.interval = 360 // self.contour_points
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_mask = build_loss(loss_mask)
self.loss_on_coe = loss_on_coe
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.use_dcn = use_dcn
self.mask_nms = mask_nms
self.bbox_from_mask = bbox_from_mask
self.vis_num = 1000
self.count = 0
self.center_sample = center_sample
self.use_mask_center = use_mask_center
self.radius = radius
self.centerness_factor = centerness_factor
self.normalized_centerness = normalized_centerness
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
if not self.bbox_from_mask:
self.reg_convs = nn.ModuleList()
self.mask_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if not self.use_dcn:
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
if not self.bbox_from_mask:
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.mask_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
else:
self.cls_convs.append(
ModulatedDeformConvPack(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
dilation=1,
deformable_groups=1,
))
if self.norm_cfg:
self.cls_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])
self.cls_convs.append(nn.ReLU(inplace=True))
if not self.bbox_from_mask:
self.reg_convs.append(
ModulatedDeformConvPack(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
dilation=1,
deformable_groups=1,
))
if self.norm_cfg:
self.reg_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])
self.reg_convs.append(nn.ReLU(inplace=True))
self.mask_convs.append(
ModulatedDeformConvPack(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
dilation=1,
deformable_groups=1,
))
if self.norm_cfg:
self.mask_convs.append(build_norm_layer(self.norm_cfg, self.feat_channels)[1])
self.mask_convs.append(nn.ReLU(inplace=True))
self.polar_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.polar_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
if self.use_fourier:
self.polar_mask = nn.Conv2d(self.feat_channels, self.num_coe * 2, 3, padding=1)
else:
self.polar_mask = nn.Conv2d(self.feat_channels, self.contour_points, 3, padding=1)
self.polar_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales_bbox = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.scales_mask = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
if not self.use_dcn:
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
if not self.bbox_from_mask:
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
for m in self.mask_convs:
normal_init(m.conv, std=0.01)
else:
pass
bias_cls = bias_init_with_prob(0.01)
normal_init(self.polar_cls, std=0.01, bias=bias_cls)
normal_init(self.polar_reg, std=0.01)
normal_init(self.polar_mask, std=0.01)
normal_init(self.polar_centerness, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales_bbox, self.scales_mask)
def forward_single(self, x, scale_bbox, scale_mask):
cls_feat = x
reg_feat = x
mask_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.polar_cls(cls_feat)
for mask_layer in self.mask_convs:
mask_feat = mask_layer(mask_feat)
if self.use_fourier:
mask_pred = self.polar_mask(mask_feat)
mask_pred = scale_mask(mask_pred)
else:
mask_pred = scale_mask(self.polar_mask(mask_feat)).float().exp()
centerness = self.polar_centerness(cls_feat)
if not self.bbox_from_mask:
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale_bbox(self.polar_reg(reg_feat)).float().exp()
else:
bbox_pred = mask_pred[:, :4, :, :]
return cls_score, bbox_pred, centerness, mask_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'mask_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
mask_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_masks=None,
gt_bboxes_ignore=None,
gt_centers=None,
gt_max_centerness=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses) == len(mask_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
self.num_points_per_level = [i.size()[0] for i in all_level_points]
labels, bbox_targets, mask_targets, centerness_targets = self.polar_target(all_level_points, gt_labels,
gt_bboxes, gt_masks, gt_centers,
gt_max_centerness)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
if self.use_fourier:
if self.loss_on_coe:
flatten_mask_preds = [
mask_pred.permute(0, 2, 3, 1).reshape(-1, self.num_coe, 2)
for mask_pred in mask_preds
]
else:
flatten_mask_preds = []
flatten_bbox_preds = []
for mask_pred, points in zip(mask_preds, all_level_points):
mask_pred = mask_pred.permute(0, 2, 3, 1).reshape(-1, self.num_coe, 2)
if self.bbox_from_mask:
xy, m = self.distance2mask(points.repeat(num_imgs, 1), mask_pred, train=True)
b = torch.stack([xy[:, 0].min(1)[0],
xy[:, 1].min(1)[0],
xy[:, 0].max(1)[0],
xy[:, 1].max(1)[0]], -1)
flatten_bbox_preds.append(b)
flatten_mask_preds.append(m)
else:
m = torch.irfft(torch.cat([mask_pred, torch.zeros(mask_pred.shape[0],
self.contour_points - self.num_coe, 2).to(
"cuda")], 1), 1, True, False).float().exp()
flatten_mask_preds.append(m)
else:
flatten_mask_preds = [
mask_pred.permute(0, 2, 3, 1).reshape(-1, self.contour_points)
for mask_pred in mask_preds
]
if not self.bbox_from_mask:
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores) # [num_pixel, 80]
flatten_bbox_preds = torch.cat(flatten_bbox_preds) # [num_pixel, 4]
flatten_mask_preds = torch.cat(flatten_mask_preds) # [num_pixel, n]
flatten_centerness = torch.cat(flatten_centerness) # [num_pixel]
flatten_labels = torch.cat(labels).long() # [num_pixel]
flatten_centerness_targets = torch.cat(centerness_targets)
flatten_bbox_targets = torch.cat(bbox_targets) # [num_pixel, 4]
flatten_mask_targets = torch.cat(mask_targets) # [num_pixel, n]
flatten_points = torch.cat([points.repeat(num_imgs, 1)
for points in all_level_points]) # [num_pixel,2]
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
pos_mask_preds = flatten_mask_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_mask_targets = flatten_mask_targets[pos_inds]
pos_centerness_targets = flatten_centerness_targets[pos_inds]
pos_points = flatten_points[pos_inds]
if self.bbox_from_mask:
pos_decoded_bbox_preds = pos_bbox_preds
else:
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
if self.loss_on_coe:
pos_mask_targets = torch.rfft(pos_mask_targets, 1, True, False)
pos_mask_targets = pos_mask_targets[..., :self.num_coe, :]
loss_mask = self.loss_mask(pos_mask_preds,
pos_mask_targets)
else:
loss_mask = self.loss_mask(pos_mask_preds,
pos_mask_targets,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum()
)
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_mask = pos_mask_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_mask=loss_mask,
loss_centerness=loss_centerness)
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def polar_target(self, points, labels_list, bbox_list, mask_list, centers_list, centerness_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list, mask_targets_list, centerness_targets_list = multi_apply(
self.polar_target_single,
bbox_list,
mask_list,
labels_list,
centers_list,
centerness_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
centerness_targets_list = [
centerness_targets.split(num_points, 0)
for centerness_targets in centerness_targets_list
]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
mask_targets_list = [
mask_targets.split(num_points, 0)
for mask_targets in mask_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_centerness_targets = []
concat_lvl_bbox_targets = []
concat_lvl_mask_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_centerness_targets.append(
torch.cat([centerness[i] for centerness in centerness_targets_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
concat_lvl_mask_targets.append(
torch.cat(
[mask_targets[i] for mask_targets in mask_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets, concat_lvl_mask_targets, concat_lvl_centerness_targets
def polar_target_single(self, gt_bboxes, gt_masks, gt_labels, mask_centers, gt_max_centerness, points,
regress_ranges):
# Sum of all points ever
num_points = points.size(0)
# Number of ground truth objects
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
# Area of all bounding boxes
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * \
(gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
areas = areas[None].repeat(num_points, 1) # Make a copy for all points
# Make a copy for each object (adds a dimension equal to num of ground truth bboxes)
regress_ranges = regress_ranges[:, None, :].expand(num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) # Make a copy for all points
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts) # Make a copy for each object
ys = ys[:, None].expand(num_points, num_gts) # Make a copy for each object
# The pixel distance between all object bounding boxes and all points in feature map
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# make centerness regression targets
mask_centers = mask_centers[None].expand(num_points, num_gts, 2)
if self.center_sample:
if self.use_mask_center:
inside_gt_bbox_mask = get_mask_sample_region(gt_bboxes,
mask_centers,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = self.get_sample_region(gt_bboxes,
self.strides,
self.num_points_per_level,
xs,
ys,
radius=self.radius)
else:
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
# returns the maximum vector in the bounding box targets
max_regress_distance = bbox_targets.max(-1)[0]
# check if it is in regress range
inside_regress_range = (max_regress_distance >= regress_ranges[..., 0]) & \
(max_regress_distance <= regress_ranges[..., 1])
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
# set the ground truth labels
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
# get the indexes of features which have objects
pos_inds = labels.nonzero().reshape(-1)
mask_targets = torch.zeros(num_points, self.contour_points, device=bbox_targets.device).float()
centerness_target = torch.zeros(num_points, device=bbox_targets.device).float()
pos_mask_ids = min_area_inds[pos_inds]
for p, i in zip(pos_inds, pos_mask_ids):
x, y = points[p]
pos_mask_contour = gt_masks[i]
dists, _ = get_polar_coordinates(x, y, pos_mask_contour, self.contour_points)
mask_targets[p] = dists
if self.normalized_centerness:
centerness_target[p] = polar_centerness_target(dists, gt_max_centerness[i])
else:
centerness_target[p] = polar_centerness_target(dists)
return labels, bbox_targets, mask_targets, centerness_target
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
mask_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
mask_pred_list = [
mask_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list,
bbox_pred_list,
mask_pred_list,
centerness_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mask_preds,
centernesses,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_masks = []
mlvl_centerness = []
for cls_score, bbox_pred, mask_pred, centerness, points in zip(
cls_scores, bbox_preds, mask_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
if self.use_fourier:
mask_pred = mask_pred.permute(1, 2, 0).reshape(-1, self.num_coe * 2)
else:
mask_pred = mask_pred.permute(1, 2, 0).reshape(-1, self.contour_points)
nms_pre = cfg.get('nms_pre', -1)
if 0 < nms_pre < scores.shape[0]:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
mask_pred = mask_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
if not self.bbox_from_mask:
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
# masks, _ = self.distance2mask(points, mask_pred, bbox=bboxes)
masks, _ = self.distance2mask(points, mask_pred, max_shape=img_shape)
else:
masks, _ = self.distance2mask(points, mask_pred, max_shape=img_shape)
bboxes = torch.stack([masks[:, 0].min(1)[0],
masks[:, 1].min(1)[0],
masks[:, 0].max(1)[0],
masks[:, 1].max(1)[0]], -1)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_masks.append(masks)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_masks = torch.cat(mlvl_masks)
if rescale:
_mlvl_bboxes = mlvl_bboxes / mlvl_bboxes.new_tensor(scale_factor)
try:
# TODO:change cuda
scale_factor = torch.tensor(scale_factor)[:2].cuda().unsqueeze(1).repeat(1, self.contour_points)
_mlvl_masks = mlvl_masks / scale_factor
except (RuntimeError, TypeError, NameError, IndexError):
_mlvl_masks = mlvl_masks / mlvl_masks.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
if self.mask_nms:
'''1 mask->min_bbox->nms, performance same to origin box'''
_mlvl_bboxes = torch.stack([_mlvl_masks[:, 0].min(1)[0],
_mlvl_masks[:, 1].min(1)[0],
_mlvl_masks[:, 0].max(1)[0],
_mlvl_masks[:, 1].max(1)[0]], -1)
det_bboxes, det_labels, det_masks = multiclass_nms_with_mask(
_mlvl_bboxes,
mlvl_scores,
_mlvl_masks,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness + self.centerness_factor)
else:
'''2 origin bbox->nms, performance same to mask->min_bbox'''
det_bboxes, det_labels, det_masks = multiclass_nms_with_mask(
_mlvl_bboxes,
mlvl_scores,
_mlvl_masks,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness + self.centerness_factor)
return det_bboxes, det_labels, det_masks
# test
def distance2mask(self, points, distances, max_shape=None, train=False, bbox=None):
"""Decode distance prediction to mask points
Args:
points (Tensor): Shape (n, 2), [x, y].
distances (Tensor): Distances from the given point to edge of contour.
max_shape (tuple): Shape of the image.
train (bool): set true in training mode
bbox (bool): clamp mask predictions which are outside the predicted bbox
Returns:
Tensor: Decoded masks.
"""
if self.use_fourier:
if train:
distances = torch.irfft(
torch.cat([distances, torch.zeros(distances.shape[0], self.contour_points - self.num_coe, 2,
device=points.device)], 1), 1, True, False).float().exp()
else:
distances = distances.reshape(-1, self.num_coe, 2)
distances = torch.irfft(
torch.cat([distances[..., :self.visulize_coe, :],
torch.zeros(distances.shape[0], self.contour_points - self.visulize_coe, 2,
device=points.device)], 1), 1, True, False).float().exp()
angles = torch.range(0, 359, self.interval, device=points.device) / 180 * math.pi
num_points = points.shape[0]
points = points[:, :, None].repeat(1, 1, angles.shape[0])
c_x, c_y = points[:, 0], points[:, 1]
sin = torch.sin(angles)
cos = torch.cos(angles)
sin = sin[None, :].repeat(num_points, 1)
cos = cos[None, :].repeat(num_points, 1)
x = distances * sin + c_x
y = distances * cos + c_y
if max_shape is not None:
x = x.clamp(min=0, max=max_shape[1] - 1)
y = y.clamp(min=0, max=max_shape[0] - 1)
if bbox is not None:
x = torch.max(torch.min(x, bbox[:, 2].unsqueeze(1)), bbox[:, 0].unsqueeze(1))
y = torch.max(torch.min(y, bbox[:, 3].unsqueeze(1)), bbox[:, 1].unsqueeze(1))
res = torch.cat([x[:, None, :], y[:, None, :]], dim=1)
return res, distances
|
[
"torch.sqrt",
"torch.cat",
"torch.cos",
"mmdet.core.distance2bbox",
"torch.arange",
"torch.range",
"mmdet.core.multiclass_nms_with_mask",
"mmcv.cnn.normal_init",
"torch.zeros",
"torch.atan2",
"mmdet.core.multi_apply",
"mmdet.ops.ModulatedDeformConvPack",
"torch.where",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.sort",
"torch.nn.ReLU",
"torch.stack",
"mmdet.core.force_fp32",
"torch.meshgrid",
"torch.rfft",
"torch.sin",
"torch.tensor"
] |
[((1861, 1904), 'torch.stack', 'torch.stack', (['(left, top, right, bottom)', '(-1)'], {}), '((left, top, right, bottom), -1)\n', (1872, 1904), False, 'import torch\n'), ((2346, 2373), 'torch.sqrt', 'torch.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2356, 2373), False, 'import torch\n'), ((2391, 2408), 'torch.sort', 'torch.sort', (['angle'], {}), '(angle)\n', (2401, 2408), False, 'import torch\n'), ((3266, 3280), 'torch.zeros', 'torch.zeros', (['n'], {}), '(n)\n', (3277, 3280), False, 'import torch\n'), ((4003, 4066), 'torch.arange', 'torch.arange', (['(0)', '(w * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, w * stride, stride, dtype=dtype, device=device)\n', (4015, 4066), False, 'import torch\n'), ((4090, 4153), 'torch.arange', 'torch.arange', (['(0)', '(h * stride)', 'stride'], {'dtype': 'dtype', 'device': 'device'}), '(0, h * stride, stride, dtype=dtype, device=device)\n', (4102, 4153), False, 'import torch\n'), ((4174, 4206), 'torch.meshgrid', 'torch.meshgrid', (['y_range', 'x_range'], {}), '(y_range, x_range)\n', (4188, 4206), False, 'import torch\n'), ((12444, 12523), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'mask_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'mask_preds', 'centernesses'))\n", (12454, 12523), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((26270, 26335), 'mmdet.core.force_fp32', 'force_fp32', ([], {'apply_to': "('cls_scores', 'bbox_preds', 'centernesses')"}), "(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n", (26280, 26335), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((1293, 1361), 'torch.where', 'torch.where', (['(xmin > gt_bb[beg:end, :, 0])', 'xmin', 'gt_bb[beg:end, :, 0]'], {}), '(xmin > gt_bb[beg:end, :, 0], xmin, gt_bb[beg:end, :, 0])\n', (1304, 1361), False, 'import torch\n'), ((1397, 1465), 'torch.where', 'torch.where', (['(ymin > gt_bb[beg:end, :, 1])', 'ymin', 'gt_bb[beg:end, :, 1]'], {}), '(ymin > gt_bb[beg:end, :, 1], ymin, gt_bb[beg:end, :, 1])\n', (1408, 1465), False, 'import torch\n'), ((1501, 1569), 'torch.where', 'torch.where', (['(xmax > gt_bb[beg:end, :, 2])', 'gt_bb[beg:end, :, 2]', 'xmax'], {}), '(xmax > gt_bb[beg:end, :, 2], gt_bb[beg:end, :, 2], xmax)\n', (1512, 1569), False, 'import torch\n'), ((1605, 1673), 'torch.where', 'torch.where', (['(ymax > gt_bb[beg:end, :, 3])', 'gt_bb[beg:end, :, 3]', 'ymax'], {}), '(ymax > gt_bb[beg:end, :, 3], gt_bb[beg:end, :, 3], ymax)\n', (1616, 1673), False, 'import torch\n'), ((6680, 6695), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6693, 6695), True, 'import torch.nn as nn\n'), ((6803, 6818), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6816, 6818), True, 'import torch.nn as nn\n'), ((10012, 10078), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', 'self.cls_out_channels', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.cls_out_channels, 3, padding=1)\n', (10021, 10078), True, 'import torch.nn as nn\n'), ((10117, 10163), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(4)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 4, 3, padding=1)\n', (10126, 10163), True, 'import torch.nn as nn\n'), ((10426, 10472), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(1)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, 1, 3, padding=1)\n', (10435, 10472), True, 'import torch.nn as nn\n'), ((11069, 11121), 'mmcv.cnn.normal_init', 'normal_init', (['self.polar_cls'], {'std': '(0.01)', 'bias': 'bias_cls'}), '(self.polar_cls, std=0.01, bias=bias_cls)\n', (11080, 11121), False, 'from mmcv.cnn import normal_init\n'), ((11130, 11167), 'mmcv.cnn.normal_init', 'normal_init', (['self.polar_reg'], {'std': '(0.01)'}), '(self.polar_reg, std=0.01)\n', (11141, 11167), False, 'from mmcv.cnn import normal_init\n'), ((11176, 11214), 'mmcv.cnn.normal_init', 'normal_init', (['self.polar_mask'], {'std': '(0.01)'}), '(self.polar_mask, std=0.01)\n', (11187, 11214), False, 'from mmcv.cnn import normal_init\n'), ((11223, 11267), 'mmcv.cnn.normal_init', 'normal_init', (['self.polar_centerness'], {'std': '(0.01)'}), '(self.polar_centerness, std=0.01)\n', (11234, 11267), False, 'from mmcv.cnn import normal_init\n'), ((11314, 11389), 'mmdet.core.multi_apply', 'multi_apply', (['self.forward_single', 'feats', 'self.scales_bbox', 'self.scales_mask'], {}), '(self.forward_single, feats, self.scales_bbox, self.scales_mask)\n', (11325, 11389), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((15748, 15777), 'torch.cat', 'torch.cat', (['flatten_cls_scores'], {}), '(flatten_cls_scores)\n', (15757, 15777), False, 'import torch\n'), ((15826, 15855), 'torch.cat', 'torch.cat', (['flatten_bbox_preds'], {}), '(flatten_bbox_preds)\n', (15835, 15855), False, 'import torch\n'), ((15903, 15932), 'torch.cat', 'torch.cat', (['flatten_mask_preds'], {}), '(flatten_mask_preds)\n', (15912, 15932), False, 'import torch\n'), ((15980, 16009), 'torch.cat', 'torch.cat', (['flatten_centerness'], {}), '(flatten_centerness)\n', (15989, 16009), False, 'import torch\n'), ((16128, 16157), 'torch.cat', 'torch.cat', (['centerness_targets'], {}), '(centerness_targets)\n', (16137, 16157), False, 'import torch\n'), ((16189, 16212), 'torch.cat', 'torch.cat', (['bbox_targets'], {}), '(bbox_targets)\n', (16198, 16212), False, 'import torch\n'), ((16262, 16285), 'torch.cat', 'torch.cat', (['mask_targets'], {}), '(mask_targets)\n', (16271, 16285), False, 'import torch\n'), ((19965, 20006), 'torch.cat', 'torch.cat', (['expanded_regress_ranges'], {'dim': '(0)'}), '(expanded_regress_ranges, dim=0)\n', (19974, 20006), False, 'import torch\n'), ((20031, 20055), 'torch.cat', 'torch.cat', (['points'], {'dim': '(0)'}), '(points, dim=0)\n', (20040, 20055), False, 'import torch\n'), ((20193, 20365), 'mmdet.core.multi_apply', 'multi_apply', (['self.polar_target_single', 'bbox_list', 'mask_list', 'labels_list', 'centers_list', 'centerness_list'], {'points': 'concat_points', 'regress_ranges': 'concat_regress_ranges'}), '(self.polar_target_single, bbox_list, mask_list, labels_list,\n centers_list, centerness_list, points=concat_points, regress_ranges=\n concat_regress_ranges)\n', (20204, 20365), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((23360, 23403), 'torch.stack', 'torch.stack', (['(left, top, right, bottom)', '(-1)'], {}), '((left, top, right, bottom), -1)\n', (23371, 23403), False, 'import torch\n'), ((30516, 30538), 'torch.cat', 'torch.cat', (['mlvl_bboxes'], {}), '(mlvl_bboxes)\n', (30525, 30538), False, 'import torch\n'), ((30560, 30581), 'torch.cat', 'torch.cat', (['mlvl_masks'], {}), '(mlvl_masks)\n', (30569, 30581), False, 'import torch\n'), ((31072, 31094), 'torch.cat', 'torch.cat', (['mlvl_scores'], {}), '(mlvl_scores)\n', (31081, 31094), False, 'import torch\n'), ((31182, 31222), 'torch.cat', 'torch.cat', (['[padding, mlvl_scores]'], {'dim': '(1)'}), '([padding, mlvl_scores], dim=1)\n', (31191, 31222), False, 'import torch\n'), ((31249, 31275), 'torch.cat', 'torch.cat', (['mlvl_centerness'], {}), '(mlvl_centerness)\n', (31258, 31275), False, 'import torch\n'), ((33972, 33989), 'torch.sin', 'torch.sin', (['angles'], {}), '(angles)\n', (33981, 33989), False, 'import torch\n'), ((34004, 34021), 'torch.cos', 'torch.cos', (['angles'], {}), '(angles)\n', (34013, 34021), False, 'import torch\n'), ((34554, 34602), 'torch.cat', 'torch.cat', (['[x[:, None, :], y[:, None, :]]'], {'dim': '(1)'}), '([x[:, None, :], y[:, None, :]], dim=1)\n', (34563, 34602), False, 'import torch\n'), ((2251, 2268), 'torch.atan2', 'torch.atan2', (['x', 'y'], {}), '(x, y)\n', (2262, 2268), False, 'import torch\n'), ((3462, 3481), 'torch.tensor', 'torch.tensor', (['(1e-06)'], {}), '(1e-06)\n', (3474, 3481), False, 'import torch\n'), ((6761, 6776), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6774, 6776), True, 'import torch.nn as nn\n'), ((10223, 10284), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', '(self.num_coe * 2)', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.num_coe * 2, 3, padding=1)\n', (10232, 10284), True, 'import torch.nn as nn\n'), ((10329, 10393), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.feat_channels', 'self.contour_points', '(3)'], {'padding': '(1)'}), '(self.feat_channels, self.contour_points, 3, padding=1)\n', (10338, 10393), True, 'import torch.nn as nn\n'), ((17365, 17408), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', 'pos_bbox_targets'], {}), '(pos_points, pos_bbox_targets)\n', (17378, 17408), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((31704, 31874), 'mmdet.core.multiclass_nms_with_mask', 'multiclass_nms_with_mask', (['_mlvl_bboxes', 'mlvl_scores', '_mlvl_masks', 'cfg.score_thr', 'cfg.nms', 'cfg.max_per_img'], {'score_factors': '(mlvl_centerness + self.centerness_factor)'}), '(_mlvl_bboxes, mlvl_scores, _mlvl_masks, cfg.\n score_thr, cfg.nms, cfg.max_per_img, score_factors=mlvl_centerness +\n self.centerness_factor)\n', (31728, 31874), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((32115, 32285), 'mmdet.core.multiclass_nms_with_mask', 'multiclass_nms_with_mask', (['_mlvl_bboxes', 'mlvl_scores', '_mlvl_masks', 'cfg.score_thr', 'cfg.nms', 'cfg.max_per_img'], {'score_factors': '(mlvl_centerness + self.centerness_factor)'}), '(_mlvl_bboxes, mlvl_scores, _mlvl_masks, cfg.\n score_thr, cfg.nms, cfg.max_per_img, score_factors=mlvl_centerness +\n self.centerness_factor)\n', (32139, 32285), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((10739, 10768), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (10750, 10768), False, 'from mmcv.cnn import normal_init\n'), ((10954, 10983), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (10965, 10983), False, 'from mmcv.cnn import normal_init\n'), ((16051, 16068), 'torch.cat', 'torch.cat', (['labels'], {}), '(labels)\n', (16060, 16068), False, 'import torch\n'), ((17284, 17325), 'mmdet.core.distance2bbox', 'distance2bbox', (['pos_points', 'pos_bbox_preds'], {}), '(pos_points, pos_bbox_preds)\n', (17297, 17325), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((17801, 17845), 'torch.rfft', 'torch.rfft', (['pos_mask_targets', '(1)', '(True)', '(False)'], {}), '(pos_mask_targets, 1, True, False)\n', (17811, 17845), False, 'import torch\n'), ((21334, 21382), 'torch.cat', 'torch.cat', (['[labels[i] for labels in labels_list]'], {}), '([labels[i] for labels in labels_list])\n', (21343, 21382), False, 'import torch\n'), ((21450, 21518), 'torch.cat', 'torch.cat', (['[centerness[i] for centerness in centerness_targets_list]'], {}), '([centerness[i] for centerness in centerness_targets_list])\n', (21459, 21518), False, 'import torch\n'), ((21580, 21646), 'torch.cat', 'torch.cat', (['[bbox_targets[i] for bbox_targets in bbox_targets_list]'], {}), '([bbox_targets[i] for bbox_targets in bbox_targets_list])\n', (21589, 21646), False, 'import torch\n'), ((21729, 21795), 'torch.cat', 'torch.cat', (['[mask_targets[i] for mask_targets in mask_targets_list]'], {}), '([mask_targets[i] for mask_targets in mask_targets_list])\n', (21738, 21795), False, 'import torch\n'), ((25508, 25580), 'torch.zeros', 'torch.zeros', (['num_points', 'self.contour_points'], {'device': 'bbox_targets.device'}), '(num_points, self.contour_points, device=bbox_targets.device)\n', (25519, 25580), False, 'import torch\n'), ((25617, 25668), 'torch.zeros', 'torch.zeros', (['num_points'], {'device': 'bbox_targets.device'}), '(num_points, device=bbox_targets.device)\n', (25628, 25668), False, 'import torch\n'), ((29757, 29810), 'mmdet.core.distance2bbox', 'distance2bbox', (['points', 'bbox_pred'], {'max_shape': 'img_shape'}), '(points, bbox_pred, max_shape=img_shape)\n', (29770, 29810), False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms_with_mask\n'), ((33735, 33791), 'torch.range', 'torch.range', (['(0)', '(359)', 'self.interval'], {'device': 'points.device'}), '(0, 359, self.interval, device=points.device)\n', (33746, 33791), False, 'import torch\n'), ((8294, 8403), 'mmdet.ops.ModulatedDeformConvPack', 'ModulatedDeformConvPack', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': '(1)'}), '(chn, self.feat_channels, 3, stride=1, padding=1,\n dilation=1, deformable_groups=1)\n', (8317, 8403), False, 'from mmdet.ops import ModulatedDeformConvPack\n'), ((8762, 8783), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8769, 8783), True, 'import torch.nn as nn\n'), ((9493, 9602), 'mmdet.ops.ModulatedDeformConvPack', 'ModulatedDeformConvPack', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': '(1)'}), '(chn, self.feat_channels, 3, stride=1, padding=1,\n dilation=1, deformable_groups=1)\n', (9516, 9602), False, 'from mmdet.ops import ModulatedDeformConvPack\n'), ((9963, 9984), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9970, 9984), True, 'import torch.nn as nn\n'), ((10870, 10899), 'mmcv.cnn.normal_init', 'normal_init', (['m.conv'], {'std': '(0.01)'}), '(m.conv, std=0.01)\n', (10881, 10899), False, 'from mmcv.cnn import normal_init\n'), ((8897, 9006), 'mmdet.ops.ModulatedDeformConvPack', 'ModulatedDeformConvPack', (['chn', 'self.feat_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': '(1)'}), '(chn, self.feat_channels, 3, stride=1, padding=1,\n dilation=1, deformable_groups=1)\n', (8920, 9006), False, 'from mmdet.ops import ModulatedDeformConvPack\n'), ((9409, 9430), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9416, 9430), True, 'import torch.nn as nn\n'), ((30763, 30789), 'torch.tensor', 'torch.tensor', (['scale_factor'], {}), '(scale_factor)\n', (30775, 30789), False, 'import torch\n'), ((33129, 33225), 'torch.zeros', 'torch.zeros', (['distances.shape[0]', '(self.contour_points - self.num_coe)', '(2)'], {'device': 'points.device'}), '(distances.shape[0], self.contour_points - self.num_coe, 2,\n device=points.device)\n', (33140, 33225), False, 'import torch\n'), ((33540, 33641), 'torch.zeros', 'torch.zeros', (['distances.shape[0]', '(self.contour_points - self.visulize_coe)', '(2)'], {'device': 'points.device'}), '(distances.shape[0], self.contour_points - self.visulize_coe, 2,\n device=points.device)\n', (33551, 33641), False, 'import torch\n'), ((15067, 15137), 'torch.zeros', 'torch.zeros', (['mask_pred.shape[0]', '(self.contour_points - self.num_coe)', '(2)'], {}), '(mask_pred.shape[0], self.contour_points - self.num_coe, 2)\n', (15078, 15137), False, 'import torch\n')]
|
from __future__ import division
import numpy as np
from rwt import dwt, idwt, dwtaxis, idwtaxis
from rwt.wavelets import waveletCoeffs
from rwt.utilities import softThreshold, hardThreshold
import matplotlib.pyplot as plt
from scipy.misc import lena
import os
IMAGES_BASE = 'images'
def savefig(title):
plt.savefig(os.path.join(IMAGES_BASE, title))
def tutorial1():
img = lena()
cl, ch, rl, rh = waveletCoeffs('db1')
img_wavelet, L = dwt(img, cl, ch, L=1)
plt.figure()
plt.gray()
plt.imshow(img_wavelet)
savefig('lena_dwt.png')
if __name__ == '__main__':
tutorial1()
plt.show()
|
[
"scipy.misc.lena",
"matplotlib.pyplot.gray",
"matplotlib.pyplot.show",
"rwt.wavelets.waveletCoeffs",
"matplotlib.pyplot.imshow",
"rwt.dwt",
"matplotlib.pyplot.figure",
"os.path.join"
] |
[((400, 406), 'scipy.misc.lena', 'lena', ([], {}), '()\n', (404, 406), False, 'from scipy.misc import lena\n'), ((435, 455), 'rwt.wavelets.waveletCoeffs', 'waveletCoeffs', (['"""db1"""'], {}), "('db1')\n", (448, 455), False, 'from rwt.wavelets import waveletCoeffs\n'), ((478, 499), 'rwt.dwt', 'dwt', (['img', 'cl', 'ch'], {'L': '(1)'}), '(img, cl, ch, L=1)\n', (481, 499), False, 'from rwt import dwt, idwt, dwtaxis, idwtaxis\n'), ((511, 523), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (521, 523), True, 'import matplotlib.pyplot as plt\n'), ((529, 539), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (537, 539), True, 'import matplotlib.pyplot as plt\n'), ((545, 568), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_wavelet'], {}), '(img_wavelet)\n', (555, 568), True, 'import matplotlib.pyplot as plt\n'), ((664, 674), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (672, 674), True, 'import matplotlib.pyplot as plt\n'), ((333, 365), 'os.path.join', 'os.path.join', (['IMAGES_BASE', 'title'], {}), '(IMAGES_BASE, title)\n', (345, 365), False, 'import os\n')]
|
# Copyright (C) 2019 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: <NAME>, <NAME>
from mocasin.common.trace import (
DataflowTrace,
ComputeSegment,
ReadTokenSegment,
WriteTokenSegment,
)
class TgffTrace(DataflowTrace):
"""Represents the behavior of an SDF3 application
See `~DataflowTrace`.
Args:
processor_list (list of TgffProcessor): a list of all processors the
trace should be generated for
tgff_graph (TgffGraph): The tgff graph for which traces should be
generated
repetitions (int): number of iterations in which the complete graph is
executed
"""
def __init__(self, processor_list, tgff_graph, repetitions):
self._processor_list = processor_list
self._repetitions = repetitions
self._tgff_graph = tgff_graph
def get_trace(self, process):
"""Get the trace for a specific task in the TGFF graph
Args:
process (str): Name of the task to get a trace for
Yields:
ComputeSegment, ReadTokenSegment, or WriteTokenSegment: The next
segment in the process trace
"""
task_name = process
if task_name not in self._tgff_graph.tasks:
raise RuntimeError(f"Unknown task! ({process})")
# prepare a dict of computation cycles for all processor types
processor_cycles = {}
for processor in self._processor_list.values():
processor_cycles[processor.type] = processor.get_operation(
self._tgff_graph.tasks[task_name]
)
# iterate over all repetitions
for _ in range(0, self._repetitions):
# First, the task reads from all input channels
for channel_name, properties in self._tgff_graph.channels.items():
# properties[1] is the name of the channel's sink task
# FIXME: This mechanism should be simplified or the variable
# named property
if task_name == properties[1]:
yield ReadTokenSegment(channel=channel_name, num_tokens=1)
# Then, it computes
yield ComputeSegment(processor_cycles)
# Finally, it writes to all output channels
for channel_name, properties in self._tgff_graph.channels.items():
# properties[0] is the name of the channel's source task
# FIXME: This mechanism should be simplified or the variable
# named property
if task_name == properties[0]:
yield WriteTokenSegment(channel=channel_name, num_tokens=1)
|
[
"mocasin.common.trace.WriteTokenSegment",
"mocasin.common.trace.ComputeSegment",
"mocasin.common.trace.ReadTokenSegment"
] |
[((2220, 2252), 'mocasin.common.trace.ComputeSegment', 'ComputeSegment', (['processor_cycles'], {}), '(processor_cycles)\n', (2234, 2252), False, 'from mocasin.common.trace import DataflowTrace, ComputeSegment, ReadTokenSegment, WriteTokenSegment\n'), ((2116, 2168), 'mocasin.common.trace.ReadTokenSegment', 'ReadTokenSegment', ([], {'channel': 'channel_name', 'num_tokens': '(1)'}), '(channel=channel_name, num_tokens=1)\n', (2132, 2168), False, 'from mocasin.common.trace import DataflowTrace, ComputeSegment, ReadTokenSegment, WriteTokenSegment\n'), ((2645, 2698), 'mocasin.common.trace.WriteTokenSegment', 'WriteTokenSegment', ([], {'channel': 'channel_name', 'num_tokens': '(1)'}), '(channel=channel_name, num_tokens=1)\n', (2662, 2698), False, 'from mocasin.common.trace import DataflowTrace, ComputeSegment, ReadTokenSegment, WriteTokenSegment\n')]
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from fnmatch import fnmatchcase
import sys
from . import Command
from .. import console
from ..console import log
from ..results import iter_results
from . import util
class Rm(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"rm", help="Remove results from the database",
description="""
Removes entries from the results database.
""")
parser.add_argument(
'patterns', nargs='+',
help="""Pattern(s) to match, each of the form X=Y. X may
be one of "benchmark", "commit_hash", "python" or any of
the machine or environment params. Y is a case-sensitive
glob pattern.""")
parser.add_argument(
"-y", action="store_true",
help="""Don't prompt for confirmation.""")
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args):
return cls.run(conf, args.patterns, args.y)
@classmethod
def run(cls, conf, patterns, y=True):
global_patterns = {}
single_benchmark = None
files_to_remove = set()
count = 0
for pattern in patterns:
parts = pattern.split('=', 1)
if len(parts) != 2:
raise util.UserError("Invalid pattern '{0}'".format(pattern))
if parts[0] == 'benchmark':
if single_benchmark is not None:
raise util.UserError("'benchmark' appears more than once")
single_benchmark = parts[1]
else:
if parts[0] in global_patterns:
raise util.UserError(
"'{0}' appears more than once".format(parts[0]))
global_patterns[parts[0]] = parts[1]
for result in iter_results(conf.results_dir):
found = True
for key, val in six.iteritems(global_patterns):
if key == 'commit_hash':
if not util.hash_equal(result.commit_hash, val):
found = False
break
elif key == 'python':
if not fnmatchcase(result.env.python, val):
found = False
break
else:
if not fnmatchcase(result.params.get(key), val):
found = False
break
if not found:
continue
if single_benchmark is not None:
found = False
for benchmark in list(result.get_all_result_keys()):
if fnmatchcase(benchmark, single_benchmark):
count += 1
files_to_remove.add(result)
result.remove_result(benchmark)
else:
files_to_remove.add(result)
if single_benchmark is not None:
log.info("Removing {0} benchmarks in {1} files".format(
count, len(files_to_remove)))
else:
log.info("Removing {0} files".format(len(files_to_remove)))
if not y:
do = console.get_answer_default("Perform operations", "n")
if len(do) and do.lower()[0] != 'y':
sys.exit(0)
if single_benchmark is not None:
for result in files_to_remove:
result.save(conf.results_dir)
else:
for result in files_to_remove:
result.rm(conf.results_dir)
|
[
"six.iteritems",
"sys.exit",
"fnmatch.fnmatchcase"
] |
[((2189, 2219), 'six.iteritems', 'six.iteritems', (['global_patterns'], {}), '(global_patterns)\n', (2202, 2219), False, 'import six\n'), ((3592, 3603), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3600, 3603), False, 'import sys\n'), ((2948, 2988), 'fnmatch.fnmatchcase', 'fnmatchcase', (['benchmark', 'single_benchmark'], {}), '(benchmark, single_benchmark)\n', (2959, 2988), False, 'from fnmatch import fnmatchcase\n'), ((2464, 2499), 'fnmatch.fnmatchcase', 'fnmatchcase', (['result.env.python', 'val'], {}), '(result.env.python, val)\n', (2475, 2499), False, 'from fnmatch import fnmatchcase\n')]
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
import argparse
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
def RF_Classifier(X, y, indep=None, fold=5, n_trees=100, out='RF_output'):
"""
Parameters:
----------
:param X: 2-D ndarray
:param y: 1-D ndarray
:param indep: 2-D ndarray, the first column is labels and the rest are feature values
:param fold: int, default 5
:param n_trees: int, number of trees, default: 5
:param out:
:return:
info: str, the model parameters
cross-validation result: list with element is ndarray
independent result: ndarray, the first column is labels and the rest are prediction scores.
"""
classes = sorted(list(set(y)))
if indep.shape[0] != 0:
indep_out = np.zeros((indep.shape[0], len(classes) + 1))
indep_out[:, 0] = indep[:, 0]
prediction_result_cv = []
prediction_result_ind = np.array([])
if indep.shape[0] != 0:
prediction_result_ind = np.zeros((len(indep), len(classes) + 1))
prediction_result_ind[:, 0] = indep[:, 0]
folds = StratifiedKFold(fold).split(X, y)
for i, (trained, valided) in enumerate(folds):
train_y, train_X = y[trained], X[trained]
valid_y, valid_X = y[valided], X[valided]
model = RandomForestClassifier(n_estimators=n_trees, bootstrap=False)
rfc = model.fit(train_X, train_y)
scores = rfc.predict_proba(valid_X)
tmp_result = np.zeros((len(valid_y), len(classes) + 1))
tmp_result[:, 0], tmp_result[:, 1:] = valid_y, scores
prediction_result_cv.append(tmp_result)
# independent
if indep.shape[0] != 0:
prediction_result_ind[:, 1:] += rfc.predict_proba(indep[:, 1:])
if indep.shape[0] != 0:
prediction_result_ind[:, 1:] /= fold
header = 'n_trees: %d' % n_trees
return header, prediction_result_cv, prediction_result_ind
|
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"sklearn.model_selection.StratifiedKFold"
] |
[((1023, 1035), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1031, 1035), True, 'import numpy as np\n'), ((1410, 1471), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_trees', 'bootstrap': '(False)'}), '(n_estimators=n_trees, bootstrap=False)\n', (1432, 1471), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1205, 1226), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['fold'], {}), '(fold)\n', (1220, 1226), False, 'from sklearn.model_selection import StratifiedKFold\n')]
|
import pytest
from gpmap import GenotypePhenotypeMap
import numpy as np
@pytest.fixture(scope="module")
def gpvolve_gpm():
wildtype = "AAA"
genotypes = [
"AAA",
"AAB",
"ABA",
"BAA",
"ABB",
"BAB",
"BBA",
"BBB"
]
mutations = {
0: ["A", "B"],
1: ["A", "B"],
2: ["A", "B"],
}
phenotypes = np.random.rand(len(genotypes))
gpm = GenotypePhenotypeMap(wildtype=wildtype,
genotype=genotypes,
phenotype=phenotypes)
return gpm
@pytest.fixture(scope="module")
def number_data():
return {"max_float":np.finfo(float).max,
"tiny_float":np.finfo(float).tiny,
"max_int":np.iinfo(int).max}
@pytest.fixture(scope="module")
def pop_gen_scenarios():
scenarios = []
for f1 in 10**np.arange(-10,11,1,dtype=float):
for f2 in 10**np.arange(-10,11,1,dtype=float):
for pop in 10**np.arange(0,10,dtype=int):
scenarios.append((f1,f2,pop))
return scenarios
|
[
"gpmap.GenotypePhenotypeMap",
"pytest.fixture",
"numpy.iinfo",
"numpy.finfo",
"numpy.arange"
] |
[((75, 105), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (89, 105), False, 'import pytest\n'), ((605, 635), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (619, 635), False, 'import pytest\n'), ((791, 821), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (805, 821), False, 'import pytest\n'), ((443, 529), 'gpmap.GenotypePhenotypeMap', 'GenotypePhenotypeMap', ([], {'wildtype': 'wildtype', 'genotype': 'genotypes', 'phenotype': 'phenotypes'}), '(wildtype=wildtype, genotype=genotypes, phenotype=\n phenotypes)\n', (463, 529), False, 'from gpmap import GenotypePhenotypeMap\n'), ((885, 919), 'numpy.arange', 'np.arange', (['(-10)', '(11)', '(1)'], {'dtype': 'float'}), '(-10, 11, 1, dtype=float)\n', (894, 919), True, 'import numpy as np\n'), ((680, 695), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (688, 695), True, 'import numpy as np\n'), ((726, 741), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (734, 741), True, 'import numpy as np\n'), ((770, 783), 'numpy.iinfo', 'np.iinfo', (['int'], {}), '(int)\n', (778, 783), True, 'import numpy as np\n'), ((940, 974), 'numpy.arange', 'np.arange', (['(-10)', '(11)', '(1)'], {'dtype': 'float'}), '(-10, 11, 1, dtype=float)\n', (949, 974), True, 'import numpy as np\n'), ((1000, 1027), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {'dtype': 'int'}), '(0, 10, dtype=int)\n', (1009, 1027), True, 'import numpy as np\n')]
|
import os
import cv2
import torch
import time
import ujson as json
import numpy as np
from tqdm import tqdm
from torch.cuda.amp import autocast, GradScaler
from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, \
deeplabv1_resnet50, deeplabv1_resnet101, enet_
from utils.datasets import StandardLaneDetectionDataset
from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose
from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix
def erfnet_tusimple(num_classes, scnn=False, pretrained_weights='erfnet_encoder_pretrained.pth.tar'):
# Define ERFNet for TuSimple (With only ImageNet pretraining)
return erfnet_resnet(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.3, dropout_2=0.3, flattened_size=4400, scnn=scnn)
def erfnet_culane(num_classes, scnn=False, pretrained_weights='erfnet_encoder_pretrained.pth.tar'):
# Define ERFNet for CULane (With only ImageNet pretraining)
return erfnet_resnet(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.1, dropout_2=0.1, flattened_size=4500, scnn=scnn)
def vgg16_tusimple(num_classes, scnn=False, pretrained_weights='pytorch-pretrained'):
# Define Vgg16 for Tusimple (With only ImageNet pretraining)
return deeplabv1_vgg16(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.1, flattened_size=6160, scnn=scnn)
def vgg16_culane(num_classes, scnn=False, pretrained_weights='pytorch-pretrained'):
# Define Vgg16 for CULane (With only ImageNet pretraining)
return deeplabv1_vgg16(pretrained_weights=pretrained_weights, num_classes=num_classes, num_lanes=num_classes - 1,
dropout_1=0.1, flattened_size=4500, scnn=scnn)
def resnet_tusimple(num_classes, backbone_name='resnet18', scnn=False):
# Define ResNets for Tusimple (With only ImageNet pretraining)
model_map = {
'resnet18': deeplabv1_resnet18,
'resnet34': deeplabv1_resnet34,
'resnet50': deeplabv1_resnet50,
'resnet101': deeplabv1_resnet101,
}
return model_map[backbone_name](pretrained=False, num_classes=num_classes, num_lanes=num_classes - 1,
channel_reduce=128, flattened_size=6160, scnn=scnn)
def resnet_culane(num_classes, backbone_name='resnet18', scnn=False):
# Define ResNets for CULane (With only ImageNet pretraining)
model_map = {
'resnet18': deeplabv1_resnet18,
'resnet34': deeplabv1_resnet34,
'resnet50': deeplabv1_resnet50,
'resnet101': deeplabv1_resnet101,
}
return model_map[backbone_name](pretrained=False, num_classes=num_classes, num_lanes=num_classes - 1,
channel_reduce=128, flattened_size=4500, scnn=scnn)
def enet_tusimple(num_classes, encoder_only, continue_from):
return enet_(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01, dropout_2=0.1, flattened_size=4400,
encoder_only=encoder_only, pretrained_weights=continue_from if not encoder_only else None)
def enet_culane(num_classes, encoder_only, continue_from):
return enet_(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01, dropout_2=0.1, flattened_size=4500,
encoder_only=encoder_only, pretrained_weights=continue_from if not encoder_only else None)
def init(batch_size, state, input_sizes, dataset, mean, std, base, workers=10):
# Return data_loaders
# depending on whether the state is
# 0: training
# 1: fast validation by mean IoU (validation set)
# 2: just testing (test set)
# 3: just testing (validation set)
# Transformations
# ! Can't use torchvision.Transforms.Compose
transforms_test = Compose(
[Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
ToTensor(),
Normalize(mean=mean, std=std)])
transforms_train = Compose(
[Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
RandomRotation(degrees=3),
ToTensor(),
Normalize(mean=mean, std=std)])
if state == 0:
data_set = StandardLaneDetectionDataset(root=base, image_set='train', transforms=transforms_train,
data_set=dataset)
data_loader = torch.utils.data.DataLoader(dataset=data_set, batch_size=batch_size,
num_workers=workers, shuffle=True)
validation_set = StandardLaneDetectionDataset(root=base, image_set='val',
transforms=transforms_test, data_set=dataset)
validation_loader = torch.utils.data.DataLoader(dataset=validation_set, batch_size=batch_size * 4,
num_workers=workers, shuffle=False)
return data_loader, validation_loader
elif state == 1 or state == 2 or state == 3:
image_sets = ['valfast', 'test', 'val']
data_set = StandardLaneDetectionDataset(root=base, image_set=image_sets[state - 1],
transforms=transforms_test, data_set=dataset)
data_loader = torch.utils.data.DataLoader(dataset=data_set, batch_size=batch_size,
num_workers=workers, shuffle=False)
return data_loader
else:
raise ValueError
def train_schedule(writer, loader, validation_loader, val_num_steps, device, criterion, net, optimizer, lr_scheduler,
num_epochs, is_mixed_precision, input_sizes, exp_name, num_classes):
# Should be the same as segmentation, given customized loss classes
net.train()
epoch = 0
running_loss = 0.0
loss_num_steps = int(len(loader) / 10) if len(loader) > 10 else 1
if is_mixed_precision:
scaler = GradScaler()
# Training
best_validation = 0
while epoch < num_epochs:
net.train()
time_now = time.time()
for i, data in enumerate(loader, 0):
inputs, labels, lane_existence = data
inputs, labels, lane_existence = inputs.to(device), labels.to(device), lane_existence.to(device)
optimizer.zero_grad()
with autocast(is_mixed_precision):
# To support intermediate losses for SAD
loss = criterion(inputs, labels, lane_existence, net, input_sizes[0])
if is_mixed_precision:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
lr_scheduler.step()
running_loss += loss.item()
current_step_num = int(epoch * len(loader) + i + 1)
# Record losses
if current_step_num % loss_num_steps == (loss_num_steps - 1):
print('[%d, %d] loss: %.4f' % (epoch + 1, i + 1, running_loss / loss_num_steps))
writer.add_scalar('training loss',
running_loss / loss_num_steps,
current_step_num)
running_loss = 0.0
# Record checkpoints
if validation_loader is not None:
if current_step_num % val_num_steps == (val_num_steps - 1) or \
current_step_num == num_epochs * len(loader):
# save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,
# filename=exp_name + '_' + str(current_step_num) + '.pt')
test_pixel_accuracy, test_mIoU = fast_evaluate(loader=validation_loader, device=device, net=net,
num_classes=num_classes, output_size=input_sizes[0],
is_mixed_precision=is_mixed_precision)
writer.add_scalar('test pixel accuracy',
test_pixel_accuracy,
current_step_num)
writer.add_scalar('test mIoU',
test_mIoU,
current_step_num)
net.train()
# Record best model (straight to disk)
if test_mIoU > best_validation:
best_validation = test_mIoU
save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,
filename=exp_name + '.pt')
epoch += 1
print('Epoch time: %.2fs' % (time.time() - time_now))
# For no-evaluation mode
if validation_loader is None:
save_checkpoint(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler, filename=exp_name + '.pt')
def fast_evaluate(net, device, loader, is_mixed_precision, output_size, num_classes):
# Fast evaluation (e.g. on the validation set) by pixel-wise mean IoU
net.eval()
conf_mat = ConfusionMatrix(num_classes)
with torch.no_grad():
for image, target in tqdm(loader):
image, target = image.to(device), target.to(device)
with autocast(is_mixed_precision):
output = net(image)['out']
output = torch.nn.functional.interpolate(output, size=output_size, mode='bilinear', align_corners=True)
conf_mat.update(target.flatten(), output.argmax(1).flatten())
acc_global, acc, iu = conf_mat.compute()
print((
'global correct: {:.2f}\n'
'average row correct: {}\n'
'IoU: {}\n'
'mean IoU: {:.2f}'
).format(
acc_global.item() * 100,
['{:.2f}'.format(i) for i in (acc * 100).tolist()],
['{:.2f}'.format(i) for i in (iu * 100).tolist()],
iu.mean().item() * 100))
return acc_global.item() * 100, iu.mean().item() * 100
# Adapted from harryhan618/SCNN_Pytorch
def test_one_set(net, device, loader, is_mixed_precision, input_sizes, gap, ppl, thresh, dataset):
# Predict on 1 data_loader and save predictions for the official script
all_lanes = []
net.eval()
with torch.no_grad():
for images, filenames in tqdm(loader):
images = images.to(device)
with autocast(is_mixed_precision):
outputs = net(images)
prob_map = torch.nn.functional.interpolate(outputs['out'], size=input_sizes[0], mode='bilinear',
align_corners=True).softmax(dim=1)
existence = (outputs['lane'].sigmoid() > 0.5)
if dataset == 'tusimple': # At most 5 lanes
indices = (existence.sum(dim=1, keepdim=True) > 5).expand_as(existence) * \
(existence == existence.min(dim=1, keepdim=True).values)
existence[indices] = 0
# To CPU
prob_map = prob_map.cpu().numpy()
existence = existence.cpu().numpy()
# Get coordinates for lanes
for j in range(existence.shape[0]):
lane_coordinates = prob_to_lines(prob_map[j], existence[j], resize_shape=input_sizes[1],
gap=gap, ppl=ppl, thresh=thresh, dataset=dataset)
if dataset == 'culane':
# Save each lane to disk
dir_name = filenames[j][:filenames[j].rfind('/')]
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(filenames[j], "w") as f:
for lane in lane_coordinates:
if lane: # No printing for []
for (x, y) in lane:
print("{} {}".format(x, y), end=" ", file=f)
print(file=f)
elif dataset == 'tusimple':
# Save lanes to a single file
formatted = {
"h_samples": [160 + y * 10 for y in range(ppl)],
"lanes": lane_coordinates,
"run_time": 0,
"raw_file": filenames[j]
}
all_lanes.append(json.dumps(formatted))
else:
raise ValueError
if dataset == 'tusimple':
with open('./output/tusimple_pred.json', 'w') as f:
for lane in all_lanes:
print(lane, end="\n", file=f)
# Adapted from harryhan618/SCNN_Pytorch
# Note that in tensors we have indices start from 0 and in annotations coordinates start at 1
def get_lane(prob_map, gap, ppl, thresh, resize_shape=None, dataset='culane'):
"""
Arguments:
----------
prob_map: prob map for single lane, np array size (h, w)
resize_shape: reshape size target, (H, W)
Return:
----------
coords: x coords bottom up every gap px, 0 for non-exist, in resized shape
"""
if resize_shape is None:
resize_shape = prob_map.shape
h, w = prob_map.shape
H, W = resize_shape
coords = np.zeros(ppl)
for i in range(ppl):
if dataset == 'tusimple': # Annotation start at 10 pixel away from bottom
y = int(h - (ppl - i) * gap / H * h)
elif dataset == 'culane': # Annotation start at bottom
y = int(h - i * gap / H * h - 1) # Same as original SCNN code
else:
raise ValueError
if y < 0:
break
line = prob_map[y, :]
id = np.argmax(line)
if line[id] > thresh:
coords[i] = int(id / w * W)
if (coords > 0).sum() < 2:
coords = np.zeros(ppl)
return coords
# Adapted from harryhan618/SCNN_Pytorch
def prob_to_lines(seg_pred, exist, resize_shape=None, smooth=True, gap=20, ppl=None, thresh=0.3, dataset='culane'):
"""
Arguments:
----------
seg_pred: np.array size (num_classes, h, w)
resize_shape: reshape size target, (H, W)
exist: list of existence, e.g. [0, 1, 1, 0]
smooth: whether to smooth the probability or not
gap: y pixel gap for sampling
ppl: how many points for one lane
thresh: probability threshold
all_points: Whether to save all sample points or just points predicted as lane
Return:
----------
coordinates: [x, y] list of lanes, e.g.: [ [[9, 569], [50, 549]] ,[[630, 569], [647, 549]] ]
"""
if resize_shape is None:
resize_shape = seg_pred.shape[1:] # seg_pred (num_classes, h, w)
_, h, w = seg_pred.shape
H, W = resize_shape
coordinates = []
if ppl is None:
ppl = round(H / 2 / gap)
for i in range(1, seg_pred.shape[0]):
prob_map = seg_pred[i, :, :]
if exist[i - 1]:
if smooth:
prob_map = cv2.blur(prob_map, (9, 9), borderType=cv2.BORDER_REPLICATE)
coords = get_lane(prob_map, gap, ppl, thresh, resize_shape, dataset=dataset)
if coords.sum() == 0:
continue
if dataset == 'tusimple': # Invalid sample points need to be included as negative value, e.g. -2
coordinates.append([coords[j] if coords[j] > 0 else -2 for j in range(ppl)])
elif dataset == 'culane':
coordinates.append([[coords[j], H - j * gap - 1] for j in range(ppl) if coords[j] > 0])
else:
raise ValueError
return coordinates
def build_lane_detection_model(args, num_classes):
scnn = True if args.method == 'scnn' else False
if args.dataset == 'tusimple' and args.backbone == 'erfnet':
net = erfnet_tusimple(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'culane' and args.backbone == 'erfnet':
net = erfnet_culane(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'culane' and args.backbone == 'vgg16':
net = vgg16_culane(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'tusimple' and args.backbone == 'vgg16':
net = vgg16_tusimple(num_classes=num_classes, scnn=scnn)
elif args.dataset == 'tusimple' and 'resnet' in args.backbone:
net = resnet_tusimple(num_classes=num_classes, scnn=scnn, backbone_name=args.backbone)
elif args.dataset == 'culane' and 'resnet' in args.backbone:
net = resnet_culane(num_classes=num_classes, scnn=scnn, backbone_name=args.backbone)
elif args.dataset == 'tusimple' and args.backbone == 'enet':
net = enet_tusimple(num_classes=num_classes, encoder_only=args.encoder_only,
continue_from=args.continue_from)
elif args.dataset == 'culane' and args.backbone == 'enet':
net = enet_culane(num_classes=num_classes, encoder_only=args.encoder_only,
continue_from=args.continue_from)
elif args.method == 'lstr':
pass
else:
raise ValueError
return net
|
[
"numpy.argmax",
"torchvision_models.segmentation.deeplabv1_vgg16",
"utils.all_utils_semseg.save_checkpoint",
"torch.no_grad",
"torchvision_models.segmentation.erfnet_resnet",
"torch.cuda.amp.autocast",
"torch.utils.data.DataLoader",
"os.path.exists",
"utils.all_utils_semseg.ConfusionMatrix",
"ujson.dumps",
"tqdm.tqdm",
"transforms.Resize",
"torchvision_models.segmentation.enet_",
"torch.cuda.amp.GradScaler",
"transforms.RandomRotation",
"utils.datasets.StandardLaneDetectionDataset",
"os.makedirs",
"numpy.zeros",
"cv2.blur",
"time.time",
"torch.nn.functional.interpolate",
"transforms.ToTensor",
"transforms.Normalize"
] |
[((706, 881), 'torchvision_models.segmentation.erfnet_resnet', 'erfnet_resnet', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.3)', 'dropout_2': '(0.3)', 'flattened_size': '(4400)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.3, dropout_2=0.3,\n flattened_size=4400, scnn=scnn)\n', (719, 881), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((1075, 1250), 'torchvision_models.segmentation.erfnet_resnet', 'erfnet_resnet', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.1)', 'dropout_2': '(0.1)', 'flattened_size': '(4500)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.1, dropout_2=0.1,\n flattened_size=4500, scnn=scnn)\n', (1088, 1250), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((1431, 1594), 'torchvision_models.segmentation.deeplabv1_vgg16', 'deeplabv1_vgg16', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.1)', 'flattened_size': '(6160)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.1, flattened_size=\n 6160, scnn=scnn)\n', (1446, 1594), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((1772, 1935), 'torchvision_models.segmentation.deeplabv1_vgg16', 'deeplabv1_vgg16', ([], {'pretrained_weights': 'pretrained_weights', 'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.1)', 'flattened_size': '(4500)', 'scnn': 'scnn'}), '(pretrained_weights=pretrained_weights, num_classes=\n num_classes, num_lanes=num_classes - 1, dropout_1=0.1, flattened_size=\n 4500, scnn=scnn)\n', (1787, 1935), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((3066, 3274), 'torchvision_models.segmentation.enet_', 'enet_', ([], {'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.01)', 'dropout_2': '(0.1)', 'flattened_size': '(4400)', 'encoder_only': 'encoder_only', 'pretrained_weights': '(continue_from if not encoder_only else None)'}), '(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01,\n dropout_2=0.1, flattened_size=4400, encoder_only=encoder_only,\n pretrained_weights=continue_from if not encoder_only else None)\n', (3071, 3274), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((3357, 3565), 'torchvision_models.segmentation.enet_', 'enet_', ([], {'num_classes': 'num_classes', 'num_lanes': '(num_classes - 1)', 'dropout_1': '(0.01)', 'dropout_2': '(0.1)', 'flattened_size': '(4500)', 'encoder_only': 'encoder_only', 'pretrained_weights': '(continue_from if not encoder_only else None)'}), '(num_classes=num_classes, num_lanes=num_classes - 1, dropout_1=0.01,\n dropout_2=0.1, flattened_size=4500, encoder_only=encoder_only,\n pretrained_weights=continue_from if not encoder_only else None)\n', (3362, 3565), False, 'from torchvision_models.segmentation import erfnet_resnet, deeplabv1_vgg16, deeplabv1_resnet18, deeplabv1_resnet34, deeplabv1_resnet50, deeplabv1_resnet101, enet_\n'), ((9311, 9339), 'utils.all_utils_semseg.ConfusionMatrix', 'ConfusionMatrix', (['num_classes'], {}), '(num_classes)\n', (9326, 9339), False, 'from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix\n'), ((13473, 13486), 'numpy.zeros', 'np.zeros', (['ppl'], {}), '(ppl)\n', (13481, 13486), True, 'import numpy as np\n'), ((4343, 4453), 'utils.datasets.StandardLaneDetectionDataset', 'StandardLaneDetectionDataset', ([], {'root': 'base', 'image_set': '"""train"""', 'transforms': 'transforms_train', 'data_set': 'dataset'}), "(root=base, image_set='train', transforms=\n transforms_train, data_set=dataset)\n", (4371, 4453), False, 'from utils.datasets import StandardLaneDetectionDataset\n'), ((4519, 4626), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'data_set', 'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(True)'}), '(dataset=data_set, batch_size=batch_size,\n num_workers=workers, shuffle=True)\n', (4546, 4626), False, 'import torch\n'), ((4698, 4805), 'utils.datasets.StandardLaneDetectionDataset', 'StandardLaneDetectionDataset', ([], {'root': 'base', 'image_set': '"""val"""', 'transforms': 'transforms_test', 'data_set': 'dataset'}), "(root=base, image_set='val', transforms=\n transforms_test, data_set=dataset)\n", (4726, 4805), False, 'from utils.datasets import StandardLaneDetectionDataset\n'), ((4883, 5001), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'validation_set', 'batch_size': '(batch_size * 4)', 'num_workers': 'workers', 'shuffle': '(False)'}), '(dataset=validation_set, batch_size=batch_size *\n 4, num_workers=workers, shuffle=False)\n', (4910, 5001), False, 'import torch\n'), ((6070, 6082), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (6080, 6082), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((6192, 6203), 'time.time', 'time.time', ([], {}), '()\n', (6201, 6203), False, 'import time\n'), ((9019, 9122), 'utils.all_utils_semseg.save_checkpoint', 'save_checkpoint', ([], {'net': 'net', 'optimizer': 'optimizer', 'lr_scheduler': 'lr_scheduler', 'filename': "(exp_name + '.pt')"}), "(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,\n filename=exp_name + '.pt')\n", (9034, 9122), False, 'from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix\n'), ((9349, 9364), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9362, 9364), False, 'import torch\n'), ((9395, 9407), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (9399, 9407), False, 'from tqdm import tqdm\n'), ((10457, 10472), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10470, 10472), False, 'import torch\n'), ((10507, 10519), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (10511, 10519), False, 'from tqdm import tqdm\n'), ((13905, 13920), 'numpy.argmax', 'np.argmax', (['line'], {}), '(line)\n', (13914, 13920), True, 'import numpy as np\n'), ((14039, 14052), 'numpy.zeros', 'np.zeros', (['ppl'], {}), '(ppl)\n', (14047, 14052), True, 'import numpy as np\n'), ((3979, 4039), 'transforms.Resize', 'Resize', ([], {'size_image': 'input_sizes[0]', 'size_label': 'input_sizes[0]'}), '(size_image=input_sizes[0], size_label=input_sizes[0])\n', (3985, 4039), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4050, 4060), 'transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4058, 4060), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4071, 4100), 'transforms.Normalize', 'Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (4080, 4100), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4144, 4204), 'transforms.Resize', 'Resize', ([], {'size_image': 'input_sizes[0]', 'size_label': 'input_sizes[0]'}), '(size_image=input_sizes[0], size_label=input_sizes[0])\n', (4150, 4204), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4215, 4240), 'transforms.RandomRotation', 'RandomRotation', ([], {'degrees': '(3)'}), '(degrees=3)\n', (4229, 4240), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4251, 4261), 'transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (4259, 4261), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((4272, 4301), 'transforms.Normalize', 'Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (4281, 4301), False, 'from transforms import ToTensor, Normalize, Resize, RandomRotation, Compose\n'), ((5217, 5339), 'utils.datasets.StandardLaneDetectionDataset', 'StandardLaneDetectionDataset', ([], {'root': 'base', 'image_set': 'image_sets[state - 1]', 'transforms': 'transforms_test', 'data_set': 'dataset'}), '(root=base, image_set=image_sets[state - 1],\n transforms=transforms_test, data_set=dataset)\n', (5245, 5339), False, 'from utils.datasets import StandardLaneDetectionDataset\n'), ((5406, 5514), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'data_set', 'batch_size': 'batch_size', 'num_workers': 'workers', 'shuffle': '(False)'}), '(dataset=data_set, batch_size=batch_size,\n num_workers=workers, shuffle=False)\n', (5433, 5514), False, 'import torch\n'), ((6460, 6488), 'torch.cuda.amp.autocast', 'autocast', (['is_mixed_precision'], {}), '(is_mixed_precision)\n', (6468, 6488), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((9490, 9518), 'torch.cuda.amp.autocast', 'autocast', (['is_mixed_precision'], {}), '(is_mixed_precision)\n', (9498, 9518), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((9588, 9686), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['output'], {'size': 'output_size', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(output, size=output_size, mode='bilinear',\n align_corners=True)\n", (9619, 9686), False, 'import torch\n'), ((10578, 10606), 'torch.cuda.amp.autocast', 'autocast', (['is_mixed_precision'], {}), '(is_mixed_precision)\n', (10586, 10606), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((15178, 15237), 'cv2.blur', 'cv2.blur', (['prob_map', '(9, 9)'], {'borderType': 'cv2.BORDER_REPLICATE'}), '(prob_map, (9, 9), borderType=cv2.BORDER_REPLICATE)\n', (15186, 15237), False, 'import cv2\n'), ((8922, 8933), 'time.time', 'time.time', ([], {}), '()\n', (8931, 8933), False, 'import time\n'), ((8725, 8828), 'utils.all_utils_semseg.save_checkpoint', 'save_checkpoint', ([], {'net': 'net', 'optimizer': 'optimizer', 'lr_scheduler': 'lr_scheduler', 'filename': "(exp_name + '.pt')"}), "(net=net, optimizer=optimizer, lr_scheduler=lr_scheduler,\n filename=exp_name + '.pt')\n", (8740, 8828), False, 'from utils.all_utils_semseg import save_checkpoint, ConfusionMatrix\n'), ((10673, 10783), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (["outputs['out']"], {'size': 'input_sizes[0]', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(outputs['out'], size=input_sizes[0], mode=\n 'bilinear', align_corners=True)\n", (10704, 10783), False, 'import torch\n'), ((11794, 11818), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (11808, 11818), False, 'import os\n'), ((11844, 11865), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (11855, 11865), False, 'import os\n'), ((12612, 12633), 'ujson.dumps', 'json.dumps', (['formatted'], {}), '(formatted)\n', (12622, 12633), True, 'import ujson as json\n')]
|
import os.path as osp
from cytokit.ops.op import CytokitOp, get_tf_config
from cytokit.miq import prediction
from cytokit import data as cytokit_data
from cytokit import io as cytokit_io
import tensorflow as tf
import numpy as np
import logging
DEFAULT_PATCH_SIZE = 84
DEFAULT_N_CLASSES = 11
logger = logging.getLogger(__name__)
class CytokitFocalPlaneSelector(CytokitOp):
"""Best focal plan selection operation
Args:
config: Cytokit configuration
patch_size: size of patches within image to estimate quality for; defaults to 84, same as default
in originating classifier project
n_classes: number of different quality strata to predict logits for; defaults to 11, same as default
in originating classifier project
save_tile: Indicates whether or not best-focus tiles (with single z-plane) should be saved; default false
Note:
See https://github.com/google/microscopeimagequality for more details on the classifier used by this operation
"""
def __init__(self, config, patch_size=DEFAULT_PATCH_SIZE, n_classes=DEFAULT_N_CLASSES, save_tile=False):
super().__init__(config)
self.mqiest = None
self.graph = None
params = config.best_focus_params
self.patch_size = params.get('patch_size', patch_size)
self.n_classes = params.get('n_classes', n_classes)
self.focus_cycle, self.focus_channel = config.get_channel_coordinates(params['channel'])
self.save_tile = params.get('save_tile', save_tile)
def initialize(self):
model_path = cytokit_data.initialize_best_focus_model()
self.graph = tf.Graph()
self.mqiest = prediction.ImageQualityClassifier(
model_path, self.patch_size, self.n_classes,
graph=self.graph, session_config=get_tf_config(self)
)
return self
def shutdown(self):
self.mqiest._sess.close()
return self
def _run(self, tile, **kwargs):
# Subset to 3D stack based on reference cycle and channel
# * tile should have shape (cycles, z, channel, height, width)
img = tile[self.focus_cycle, :, self.focus_channel, :, :]
nz = img.shape[0]
scores = []
classes = []
for iz in range(nz):
pred = self.mqiest.predict(img[iz])
# Append n_classes length array of class probabilities ordered from 0 to n_classes
# where 0 is the best possible quality and n_classes the worst
scores.append(pred.probabilities)
classes.append(np.argmax(pred.probabilities))
# Calculate scores as probability weighted sum of (reversed) class indexes, giving one score per z-plane
scores = np.dot(np.array(scores), np.arange(self.n_classes)[::-1])
assert len(scores) == nz, \
'Expecting {} scores but only {} were found (scores = {})'.format(nz, len(scores), scores)
# Reverse class designations
classes = self.n_classes - np.array(classes) - 1
# Determine best z plane as index with highest score
best_z = np.argmax(scores)
# Record and log classification information
self.record({'scores': scores, 'classes': classes, 'best_z': best_z})
logger.debug('Best focal plane: z = {} (score: {})'.format(best_z, scores.max()))
# Subset tile to best focal plane
best_focus_tile = tile[:, [best_z], :, :, :]
# Return best focus tile and other context
return best_focus_tile, best_z, scores
def save(self, tile_indices, output_dir, data):
region_index, tile_index, tx, ty = tile_indices
best_focus_tile, best_z, scores = data
path = cytokit_io.get_best_focus_img_path(region_index, tx, ty, best_z)
if self.save_tile:
cytokit_io.save_tile(osp.join(output_dir, path), best_focus_tile, config=self.config)
return [path]
|
[
"numpy.argmax",
"cytokit.data.initialize_best_focus_model",
"cytokit.io.get_best_focus_img_path",
"numpy.array",
"numpy.arange",
"tensorflow.Graph",
"cytokit.ops.op.get_tf_config",
"os.path.join",
"logging.getLogger"
] |
[((302, 329), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (319, 329), False, 'import logging\n'), ((1594, 1636), 'cytokit.data.initialize_best_focus_model', 'cytokit_data.initialize_best_focus_model', ([], {}), '()\n', (1634, 1636), True, 'from cytokit import data as cytokit_data\n'), ((1658, 1668), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1666, 1668), True, 'import tensorflow as tf\n'), ((3118, 3135), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3127, 3135), True, 'import numpy as np\n'), ((3723, 3787), 'cytokit.io.get_best_focus_img_path', 'cytokit_io.get_best_focus_img_path', (['region_index', 'tx', 'ty', 'best_z'], {}), '(region_index, tx, ty, best_z)\n', (3757, 3787), True, 'from cytokit import io as cytokit_io\n'), ((2754, 2770), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2762, 2770), True, 'import numpy as np\n'), ((1828, 1847), 'cytokit.ops.op.get_tf_config', 'get_tf_config', (['self'], {}), '(self)\n', (1841, 1847), False, 'from cytokit.ops.op import CytokitOp, get_tf_config\n'), ((2585, 2614), 'numpy.argmax', 'np.argmax', (['pred.probabilities'], {}), '(pred.probabilities)\n', (2594, 2614), True, 'import numpy as np\n'), ((2772, 2797), 'numpy.arange', 'np.arange', (['self.n_classes'], {}), '(self.n_classes)\n', (2781, 2797), True, 'import numpy as np\n'), ((3017, 3034), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (3025, 3034), True, 'import numpy as np\n'), ((3848, 3874), 'os.path.join', 'osp.join', (['output_dir', 'path'], {}), '(output_dir, path)\n', (3856, 3874), True, 'import os.path as osp\n')]
|
"""ops.syncretism.io model"""
__docformat__ = "numpy"
import configparser
import logging
from typing import Tuple
import pandas as pd
import requests
import yfinance as yf
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.options import yfinance_model
logger = logging.getLogger(__name__)
accepted_orders = [
"e_desc",
"e_asc",
"iv_desc",
"iv_asc",
"md_desc",
"md_asc",
"lp_desc",
"lp_asc",
"oi_asc",
"oi_desc",
"v_desc",
"v_asc",
]
@log_start_end(log=logger)
def get_historical_greeks(
ticker: str, expiry: str, chain_id: str, strike: float, put: bool
) -> pd.DataFrame:
"""Get histoical option greeks
Parameters
----------
ticker: str
Stock ticker
expiry: str
Option expiration date
chain_id: str
OCC option symbol. Overwrites other inputs
strike: float
Strike price to look for
put: bool
Is this a put option?
Returns
-------
df: pd.DataFrame
Dataframe containing historical greeks
"""
if not chain_id:
options = yfinance_model.get_option_chain(ticker, expiry)
if put:
options = options.puts
else:
options = options.calls
chain_id = options.loc[options.strike == strike, "contractSymbol"].values[0]
r = requests.get(f"https://api.syncretism.io/ops/historical/{chain_id}")
if r.status_code != 200:
console.print("Error in request.")
return pd.DataFrame()
history = r.json()
iv, delta, gamma, theta, rho, vega, premium, price, time = (
[],
[],
[],
[],
[],
[],
[],
[],
[],
)
for entry in history:
time.append(pd.to_datetime(entry["timestamp"], unit="s"))
iv.append(entry["impliedVolatility"])
gamma.append(entry["gamma"])
delta.append(entry["delta"])
theta.append(entry["theta"])
rho.append(entry["rho"])
vega.append(entry["vega"])
premium.append(entry["premium"])
price.append(entry["regularMarketPrice"])
data = {
"iv": iv,
"gamma": gamma,
"delta": delta,
"theta": theta,
"rho": rho,
"vega": vega,
"premium": premium,
"price": price,
}
df = pd.DataFrame(data, index=time)
return df
@log_start_end(log=logger)
def get_screener_output(preset: str, presets_path: str) -> Tuple[pd.DataFrame, str]:
"""Screen options based on preset filters
Parameters
----------
preset: str
Preset file to screen for
presets_path: str
Path to preset folder
Returns
-------
pd.DataFrame:
DataFrame with screener data, or empty if errors
str:
String containing error message if supplied
"""
d_cols = {
"contractSymbol": "CS",
"symbol": "S",
"optType": "T",
"strike": "Str",
"expiration": "Exp ∨",
"impliedVolatility": "IV",
"lastPrice": "LP",
"bid": "B",
"ask": "A",
"volume": "V",
"openInterest": "OI",
"yield": "Y",
"monthlyyield": "MY",
"regularMarketPrice": "SMP",
"regularMarketDayLow": "SMDL",
"regularMarketDayHigh": "SMDH",
"lastTradeDate": "LU",
"lastCrawl": "LC",
"inTheMoney": "ITM",
"pChange": "PC",
"priceToBook": "PB",
}
preset_filter = configparser.RawConfigParser()
preset_filter.optionxform = str # type: ignore
preset_filter.read(presets_path + preset + ".ini")
d_filters = {k: v for k, v in dict(preset_filter["FILTER"]).items() if v}
s_filters = str(d_filters)
s_filters = (
s_filters.replace(": '", ": ")
.replace("',", ",")
.replace("'}", "}")
.replace("'", '"')
)
for order in accepted_orders:
s_filters = s_filters.replace(f" {order}", f' "{order}"')
errors = check_presets(d_filters)
if errors:
return pd.DataFrame(), errors
link = "https://api.syncretism.io/ops"
res = requests.get(
link, headers={"Content-type": "application/json"}, data=s_filters
)
# pylint:disable=no-else-return
if res.status_code == 200:
df_res = pd.DataFrame(res.json())
if df_res.empty:
return df_res, f"No options data found for preset: {preset}"
df_res = df_res.rename(columns=d_cols)[list(d_cols.values())[:17]]
df_res["Exp ∨"] = df_res["Exp ∨"].apply(
lambda x: pd.to_datetime(x, unit="s").strftime("%m-%d-%y")
)
df_res["LU"] = df_res["LU"].apply(
lambda x: pd.to_datetime(x, unit="s").strftime("%m-%d-%y")
)
df_res["Y"] = df_res["Y"].round(3)
df_res["MY"] = df_res["MY"].round(3)
return df_res, ""
else:
return pd.DataFrame(), f"Request Error: {res.status_code}"
# pylint: disable=eval-used
@log_start_end(log=logger)
def check_presets(preset_dict: dict) -> str:
"""Checks option screener preset values
Parameters
----------
preset_dict: dict
Defined presets from configparser
Returns
-------
error: str
String of all errors accumulated
"""
float_list = [
"min-iv",
"max-iv",
"min-oi",
"max-oi",
"min-strike",
"max-strike",
"min-volume",
"max-volume",
"min-voi",
"max-voi",
"min-diff",
"max-diff",
"min-ask-bid",
"max-ask-bid",
"min-exp",
"max-exp",
"min-price",
"max-price",
"min-price-20d",
"max-price-20d",
"min-volume-20d",
"max-volume-20d",
"min-iv-20d",
"max-iv-20d",
"min-delta-20d",
"max-delta-20d",
"min-gamma-20d",
"max-gamma-20d",
"min-theta-20d",
"max-theta-20d",
"min-vega-20d",
"max-vega-20d",
"min-rho-20d",
"max-rho-20d",
"min-price-100d",
"max-price-100d",
"min-volume-100d",
"max-volume-100d",
"min-iv-100d",
"max-iv-100d",
"min-delta-100d",
"max-delta-100d",
"min-gamma-100d",
"max-gamma-100d",
"min-theta-100d",
"max-theta-100d",
"min-vega-100d",
"max-vega-100d",
"min-rho-100d",
"max-rho-100d",
"min-sto",
"max-sto",
"min-yield",
"max-yield",
"min-myield",
"max-myield",
"min-delta",
"max-delta",
"min-gamma",
"max-gamma",
"min-theta",
"max-theta",
"min-vega",
"max-vega",
"min-cap",
"max-cap",
]
bool_list = ["active", "stock", "etf", "puts", "calls", "itm", "otm", "exclude"]
error = ""
for key, value in preset_dict.items():
if key in float_list:
try:
float(value)
if value.startswith("."):
error += f"{key} : {value} needs to be formatted with leading 0\n"
except Exception:
error += f"{key} : {value}, should be float\n"
elif key in bool_list:
if value not in ["true", "false"]:
error += f"{key} : {value}, Should be [true/false]\n"
elif key == "tickers":
for ticker in value.split(","):
try:
if yf.Ticker(eval(ticker)).info["regularMarketPrice"] is None:
error += f"{key} : {ticker} not found on yfinance"
except NameError:
error += f"{key} : {value}, {ticker} failed"
elif key == "limit":
try:
int(value)
except Exception:
error += f"{key} : {value} , should be integer\n"
elif key == "order-by":
if value.replace('"', "") not in accepted_orders:
error += f"{key} : {value} not accepted ordering\n"
if error:
logging.exception(error)
return error
|
[
"pandas.DataFrame",
"logging.exception",
"configparser.RawConfigParser",
"logging.getLogger",
"gamestonk_terminal.stocks.options.yfinance_model.get_option_chain",
"pandas.to_datetime",
"requests.get",
"gamestonk_terminal.rich_config.console.print",
"gamestonk_terminal.decorators.log_start_end"
] |
[((353, 380), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (370, 380), False, 'import logging\n'), ((577, 602), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (590, 602), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((2466, 2491), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (2479, 2491), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((5066, 5091), 'gamestonk_terminal.decorators.log_start_end', 'log_start_end', ([], {'log': 'logger'}), '(log=logger)\n', (5079, 5091), False, 'from gamestonk_terminal.decorators import log_start_end\n'), ((1419, 1487), 'requests.get', 'requests.get', (['f"""https://api.syncretism.io/ops/historical/{chain_id}"""'], {}), "(f'https://api.syncretism.io/ops/historical/{chain_id}')\n", (1431, 1487), False, 'import requests\n'), ((2418, 2448), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'time'}), '(data, index=time)\n', (2430, 2448), True, 'import pandas as pd\n'), ((3565, 3595), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (3593, 3595), False, 'import configparser\n'), ((4207, 4292), 'requests.get', 'requests.get', (['link'], {'headers': "{'Content-type': 'application/json'}", 'data': 's_filters'}), "(link, headers={'Content-type': 'application/json'}, data=s_filters\n )\n", (4219, 4292), False, 'import requests\n'), ((1174, 1221), 'gamestonk_terminal.stocks.options.yfinance_model.get_option_chain', 'yfinance_model.get_option_chain', (['ticker', 'expiry'], {}), '(ticker, expiry)\n', (1205, 1221), False, 'from gamestonk_terminal.stocks.options import yfinance_model\n'), ((1526, 1560), 'gamestonk_terminal.rich_config.console.print', 'console.print', (['"""Error in request."""'], {}), "('Error in request.')\n", (1539, 1560), False, 'from gamestonk_terminal.rich_config import console\n'), ((1576, 1590), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1588, 1590), True, 'import pandas as pd\n'), ((8178, 8202), 'logging.exception', 'logging.exception', (['error'], {}), '(error)\n', (8195, 8202), False, 'import logging\n'), ((1842, 1886), 'pandas.to_datetime', 'pd.to_datetime', (["entry['timestamp']"], {'unit': '"""s"""'}), "(entry['timestamp'], unit='s')\n", (1856, 1886), True, 'import pandas as pd\n'), ((4129, 4143), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4141, 4143), True, 'import pandas as pd\n'), ((4981, 4995), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4993, 4995), True, 'import pandas as pd\n'), ((4658, 4685), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (4672, 4685), True, 'import pandas as pd\n'), ((4782, 4809), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (4796, 4809), True, 'import pandas as pd\n')]
|
from django.conf.urls import url
from . import views
app_name = 'code'
urlpatterns = [
url(r'^(?P<pk>[0-9]+)/(?P<question>[0-9]+)/$',views.submission, name='submission'),
]
|
[
"django.conf.urls.url"
] |
[((94, 181), 'django.conf.urls.url', 'url', (['"""^(?P<pk>[0-9]+)/(?P<question>[0-9]+)/$"""', 'views.submission'], {'name': '"""submission"""'}), "('^(?P<pk>[0-9]+)/(?P<question>[0-9]+)/$', views.submission, name=\n 'submission')\n", (97, 181), False, 'from django.conf.urls import url\n')]
|
import torch
import numpy as np
class Uniform_Buffer:
def __init__(self, buffer_size, state_shape, action_shape, device, mix=1):
self._n = 0
self._p = 0
self.mix = mix
self.buffer_size = buffer_size
self.total_size = mix * buffer_size
self.states = torch.empty(
(self.total_size, *state_shape), dtype=torch.float, device=device)
self.actions = torch.empty(
(self.total_size, *action_shape), dtype=torch.float, device=device)
self.rewards = torch.empty(
(self.total_size, 1), dtype=torch.float, device=device)
self.dones = torch.empty(
(self.total_size, 1), dtype=torch.float, device=device)
self.log_pis = torch.empty(
(self.total_size, 1), dtype=torch.float, device=device)
self.next_states = torch.empty(
(self.total_size, *state_shape), dtype=torch.float, device=device)
def append(self, state, action, reward, done, log_pi, next_state):
self.states[self._p].copy_(torch.from_numpy(state))
self.actions[self._p].copy_(torch.from_numpy(action))
self.rewards[self._p] = float(reward)
self.dones[self._p] = float(done)
self.log_pis[self._p] = float(log_pi)
self.next_states[self._p].copy_(torch.from_numpy(next_state))
self._p = (self._p + 1) % self.total_size
self._n = min(self._n + 1, self.total_size)
def get(self):
assert self._p % self.buffer_size == 0
start = (self._p - self.buffer_size) % self.total_size
idxes = slice(start, start + self.buffer_size)
return (
self.states[idxes],
self.actions[idxes],
self.rewards[idxes],
self.dones[idxes],
self.log_pis[idxes],
self.next_states[idxes]
)
def sample(self, batch_size):
assert self._p % self.buffer_size == 0
idxes = np.random.randint(low=0, high=self._n, size=batch_size)
return (
self.states[idxes],
self.actions[idxes],
self.rewards[idxes],
self.dones[idxes],
self.log_pis[idxes],
self.next_states[idxes]
)
|
[
"numpy.random.randint",
"torch.empty",
"torch.from_numpy"
] |
[((305, 383), 'torch.empty', 'torch.empty', (['(self.total_size, *state_shape)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, *state_shape), dtype=torch.float, device=device)\n', (316, 383), False, 'import torch\n'), ((420, 499), 'torch.empty', 'torch.empty', (['(self.total_size, *action_shape)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, *action_shape), dtype=torch.float, device=device)\n', (431, 499), False, 'import torch\n'), ((536, 603), 'torch.empty', 'torch.empty', (['(self.total_size, 1)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, 1), dtype=torch.float, device=device)\n', (547, 603), False, 'import torch\n'), ((638, 705), 'torch.empty', 'torch.empty', (['(self.total_size, 1)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, 1), dtype=torch.float, device=device)\n', (649, 705), False, 'import torch\n'), ((742, 809), 'torch.empty', 'torch.empty', (['(self.total_size, 1)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, 1), dtype=torch.float, device=device)\n', (753, 809), False, 'import torch\n'), ((850, 928), 'torch.empty', 'torch.empty', (['(self.total_size, *state_shape)'], {'dtype': 'torch.float', 'device': 'device'}), '((self.total_size, *state_shape), dtype=torch.float, device=device)\n', (861, 928), False, 'import torch\n'), ((1951, 2006), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self._n', 'size': 'batch_size'}), '(low=0, high=self._n, size=batch_size)\n', (1968, 2006), True, 'import numpy as np\n'), ((1049, 1072), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (1065, 1072), False, 'import torch\n'), ((1110, 1134), 'torch.from_numpy', 'torch.from_numpy', (['action'], {}), '(action)\n', (1126, 1134), False, 'import torch\n'), ((1310, 1338), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (1326, 1338), False, 'import torch\n')]
|
"""
Linear State Space Assembler
"""
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.linear.utils.ss_interface as ss_interface
import sharpy.utils.settings as settings
import sharpy.utils.h5utils as h5
import warnings
@solver
class LinearAssembler(BaseSolver):
r"""
Warnings:
Under development - please advise of new features and bugs!
Creates a workspace containing the different linear elements of the state-space.
The user specifies which elements to build sequentially via the ``linear_system`` setting.
The most common uses will be:
* Aerodynamic: :class:`sharpy.linear.assembler.LinearUVLM` solver
* Structural: :class:`sharpy.linear.assembler.LinearBeam` solver
* Aeroelastic: :class:`sharpy.linear.assembler.LinearAeroelastic` solver
The solver enables to load a user specific assembly of a state-space by means of the ``LinearCustom`` block.
See :class:`sharpy.sharpy.linear.assembler.LinearAssembler` for a detailed description of each of the state-space assemblies.
Upon assembly of the linear system, the data structure ``data.linear`` will be created. The :class:`.Linear`
contains the state-space as an attribute. This state space will be the one employed by postprocessors.
Important: running the linear routines requires information on the tangent mass, stiffness and gyroscopic
structural matrices therefore the solver :class:`solvers.modal.Modal` must have been run prior to linearisation.
In addition, if the problem includes rigid body velocities, at least one
timestep of :class:`solvers.DynamicCoupled` must have run such that the rigid body velocity is included.
Example:
The typical ``flow`` setting used prior to using this solver for an aeroelastic simulation with rigid body dynamics
will be similar to:
>>> flow = ['BeamLoader',
>>> 'AerogridLoader',
>>> 'StaticTrim',
>>> 'DynamicCoupled', # a single time step will suffice
>>> 'Modal',
>>> 'LinearAssembler']
"""
solver_id = 'LinearAssembler'
solver_classification = 'Linear'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['linear_system'] = 'str'
settings_default['linear_system'] = None
settings_description['linear_system'] = 'Name of chosen state space assembly type'
settings_types['linear_system_settings'] = 'dict'
settings_default['linear_system_settings'] = dict()
settings_description['linear_system_settings'] = 'Settings for the desired state space assembler'
settings_types['linearisation_tstep'] = 'int'
settings_default['linearisation_tstep'] = -1
settings_description['linearisation_tstep'] = 'Chosen linearisation time step number from available time steps'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.settings = dict()
self.data = None
def initialise(self, data, custom_settings=None):
self.data = data
if custom_settings:
self.data.settings[self.solver_id] = custom_settings
self.settings = self.data.settings[self.solver_id]
# else:custom_settings
else:
self.settings = data.settings[self.solver_id]
settings.to_custom_types(self.settings, self.settings_types, self.settings_default)
# Get consistent linearisation timestep
ii_step = self.settings['linearisation_tstep']
if type(ii_step) != int:
ii_step = self.settings['linearisation_tstep'].value
tsstruct0 = data.structure.timestep_info[ii_step]
tsaero0 = data.aero.timestep_info[ii_step]
# Create data.linear
self.data.linear = Linear(tsaero0, tsstruct0)
# Load available systems
import sharpy.linear.assembler
# Load roms
import sharpy.rom
lsys = ss_interface.initialise_system(self.settings['linear_system'])
lsys.initialise(data)
self.data.linear.linear_system = lsys
def run(self):
self.data.linear.ss = self.data.linear.linear_system.assemble()
return self.data
class Linear(object):
"""
This is the class responsible for the transfer of information and can be accessed as ``data.linear``. It stores
as class attributes the following classes that describe the linearised problem.
Attributes:
ss (sharpy.linear.src.libss.ss): State-space system
linear_system (sharpy.linear.utils.ss_interface.BaseElement): Assemble system properties
tsaero0 (sharpy.utils.datastructures.AeroTimeStepInfo): Linearisation aerodynamic timestep
tsstruct0 (sharpy.utils.datastructures.StructTimeStepInfo): Linearisation structural timestep
timestep_info (list): Linear time steps
"""
def __init__(self, tsaero0, tsstruct0):
self.linear_system = None
self.ss = None
self.tsaero0 = tsaero0
self.tsstruct0 = tsstruct0
self.timestep_info = []
self.uvlm = None
self.beam = None
if __name__ == "__main__":
print('Testing the assembly of the pendulum system')
test = 'aeroelastic'
if test == 'beam':
data = h5.readh5('/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/flexible_beam_static.data.h5').data
beam_settings = {'modal_projection': False,
'inout_coords': 'nodes',
'discrete_time': True,
'newmark_damp': 0.15*1,
'discr_method': 'newmark',
'dt': 0.001,
'proj_modes': 'undamped',
'use_euler': True,
'num_modes': 13,
'remove_dofs': ['V'],
'gravity': 'on'}
custom_settings = {'linearisation_tstep': -1,
'flow': ['LinearBeam'],
'LinearBeam': beam_settings}
linear_space = LinearAssembler()
linear_space.initialise(data, custom_settings)
data = linear_space.run()
# import sharpy.solvers.lindynamicsim as lindynsim
# linear_sim = lindynsim.LinearDynamicSimulation()
# linear_sim.initialise(data)
import numpy as np
eigs = np.linalg.eig(data.linear.ss.A)
eigs_ct = np.log(eigs[0]) / data.linear.ss.dt
order = np.argsort(eigs_ct.real)[::-1]
eigs_ct = eigs_ct[order]
print('End')
elif test == 'uvlm':
data = h5.readh5('/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/sears_uinf0050_AR100_M8N12Ms10_KR15_sp0.data.h5').data
uvlm_settings = {'dt': 0.001,
'integr_order': 2,
'density': 1.225,
'remove_predictor': False,
'use_sparse': False,
'ScalingDict': {'length': 1.,
'speed': 1.,
'density': 1.},
'remove_inputs': ['u_gust']}
custom_settings = {'linearisation_tstep': -1,
'flow': ['LinearUVLM'],
'LinearUVLM': uvlm_settings}
linear_space = LinearAssembler()
linear_space.initialise(data, custom_settings)
data = linear_space.run()
elif test=='aeroelastic':
data = h5.readh5('/home/ng213/sharpy_cases/ToSORT_FlyingWings/01_RichardsBFF/cases/horten/horten.data.h5').data
custom_settings = {'flow': ['LinearAeroelastic'],
'LinearAeroelastic': {
'beam_settings': {'modal_projection': False,
'inout_coords': 'nodes',
'discrete_time': True,
'newmark_damp': 0.5,
'discr_method': 'newmark',
'dt': 0.001,
'proj_modes': 'undamped',
'use_euler': 'off',
'num_modes': 40,
'print_info': 'on',
'gravity': 'on',
'remove_dofs': []},
'aero_settings': {'dt': 0.001,
'integr_order': 2,
'density': 1.225*0.0000000001,
'remove_predictor': False,
'use_sparse': True,
'rigid_body_motion': True,
'use_euler': False,
'remove_inputs': ['u_gust']},
'rigid_body_motion': True}}
linear_space = LinearAssembler()
linear_space.initialise(data, custom_settings)
data = linear_space.run()
print('End')
|
[
"numpy.log",
"sharpy.utils.settings.SettingsTable",
"sharpy.utils.settings.to_custom_types",
"numpy.linalg.eig",
"sharpy.linear.utils.ss_interface.initialise_system",
"numpy.argsort",
"sharpy.utils.h5utils.readh5"
] |
[((2890, 2914), 'sharpy.utils.settings.SettingsTable', 'settings.SettingsTable', ([], {}), '()\n', (2912, 2914), True, 'import sharpy.utils.settings as settings\n'), ((3441, 3529), 'sharpy.utils.settings.to_custom_types', 'settings.to_custom_types', (['self.settings', 'self.settings_types', 'self.settings_default'], {}), '(self.settings, self.settings_types, self.\n settings_default)\n', (3465, 3529), True, 'import sharpy.utils.settings as settings\n'), ((4057, 4119), 'sharpy.linear.utils.ss_interface.initialise_system', 'ss_interface.initialise_system', (["self.settings['linear_system']"], {}), "(self.settings['linear_system'])\n", (4087, 4119), True, 'import sharpy.linear.utils.ss_interface as ss_interface\n'), ((6488, 6519), 'numpy.linalg.eig', 'np.linalg.eig', (['data.linear.ss.A'], {}), '(data.linear.ss.A)\n', (6501, 6519), True, 'import numpy as np\n'), ((5377, 5483), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['"""/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/flexible_beam_static.data.h5"""'], {}), "(\n '/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/flexible_beam_static.data.h5'\n )\n", (5386, 5483), True, 'import sharpy.utils.h5utils as h5\n'), ((6538, 6553), 'numpy.log', 'np.log', (['eigs[0]'], {}), '(eigs[0])\n', (6544, 6553), True, 'import numpy as np\n'), ((6590, 6614), 'numpy.argsort', 'np.argsort', (['eigs_ct.real'], {}), '(eigs_ct.real)\n', (6600, 6614), True, 'import numpy as np\n'), ((6716, 6841), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['"""/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/sears_uinf0050_AR100_M8N12Ms10_KR15_sp0.data.h5"""'], {}), "(\n '/home/ng213/sharpy_cases/CC_DevTests/01_LinearAssembly/sears_uinf0050_AR100_M8N12Ms10_KR15_sp0.data.h5'\n )\n", (6725, 6841), True, 'import sharpy.utils.h5utils as h5\n'), ((7632, 7741), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['"""/home/ng213/sharpy_cases/ToSORT_FlyingWings/01_RichardsBFF/cases/horten/horten.data.h5"""'], {}), "(\n '/home/ng213/sharpy_cases/ToSORT_FlyingWings/01_RichardsBFF/cases/horten/horten.data.h5'\n )\n", (7641, 7741), True, 'import sharpy.utils.h5utils as h5\n')]
|
#!/usr/bin/env python3
# Qualcomm Sahara / Firehose Client (c) B.Kerler 2018-2020.
# Licensed under MIT License
"""
Usage:
edl.py -h | --help
edl.py [--vid=vid] [--pid=pid]
edl.py [--loader=filename] [--memory=memtype]
edl.py [--debugmode]
edl.py [--gpt-num-part-entries=number] [--gpt-part-entry-size=number] [--gpt-part-entry-start-lba=number]
edl.py [--memory=memtype] [--skipstorageinit] [--maxpayload=bytes] [--sectorsize==bytes]
edl.py server [--tcpport=portnumber] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py printgpt [--memory=memtype] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py gpt <directory> [--memory=memtype] [--lun=lun] [--genxml] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py r <partitionname> <filename> [--memory=memtype] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py rl <directory> [--memory=memtype] [--lun=lun] [--skip=partnames] [--genxml] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py rf <filename> [--memory=memtype] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py rs <start_sector> <sectors> <filename> [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py w <partitionname> <filename> [--memory=memtype] [--lun=lun] [--skipwrite] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py wl <directory> [--memory=memtype] [--lun=lun] [--skip=partnames] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py wf <filename> [--memory=memtype] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py ws <start_sector> <filename> [--memory=memtype] [--lun=lun] [--skipwrite] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py e <partitionname> [--memory=memtype] [--skipwrite] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py es <start_sector> <sectors> [--memory=memtype] [--lun=lun] [--skipwrite] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py footer <filename> [--memory=memtype] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py peek <offset> <length> <filename> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py peekhex <offset> <length> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py peekdword <offset> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py peekqword <offset> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py memtbl <filename> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py poke <offset> <filename> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py pokehex <offset> <data> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py pokedword <offset> <data> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py pokeqword <offset> <data> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py memcpy <srcoffset> <dstoffset> <size> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py secureboot [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py pbl <filename> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py qfp <filename> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py getstorageinfo [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py setbootablestoragedrive <lun> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py send <command> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py xml <xmlfile> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py rawxml <xmlstring> [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid] [--prjid=projid]
edl.py reset [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py nop [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
edl.py oemunlock [--memory=memtype] [--lun=lun] [--loader=filename] [--debugmode] [--vid=vid] [--pid=pid]
Description:
server [--tcpport=portnumber] # Run tcp/ip server
printgpt [--memory=memtype] [--lun=lun] # Print GPT Table information
gpt <directory> [--memory=memtype] [--lun=lun] # Save gpt table to given directory
r <partitionname> <filename> [--memory=memtype] [--lun=lun] # Read flash to filename
rl <directory> [--memory=memtype] [--lun=lun] [--skip=partname] # Read all partitions from flash to a directory
rf <filename> [--memory=memtype] [--lun=lun] # Read whole flash to file
rs <start_sector> <sectors> <filename> [--lun=lun] # Read sectors starting at start_sector to filename
w <partitionname> <filename> [--memory=memtype] [--lun=lun] [--skipwrite] # Write filename to partition to flash
wl <directory> [--memory=memtype] [--lun=lun] # Write all files from directory to flash
wf <filename> [--memory=memtype] [--lun=lun] # Write whole filename to flash
ws <start_sector> <filename> [--memory=memtype] [--lun=lun] [--skipwrite] # Write filename to flash at start_sector
e <partitionname> [--memory=memtype] [--skipwrite] [--lun=lun] # Erase partition from flash
es <start_sector> <sectors> [--memory=memtype] [--lun=lun] [--skipwrite] # Erase sectors at start_sector from flash
footer <filename> [--memory=memtype] [--lun=lun] # Read crypto footer from flash
peek <offset> <length> <filename> # Dump memory at offset with given length to filename
peekhex <offset> <length> # Dump memory at offset and given length as hex string
peekdword <offset> # Dump DWORD at memory offset
peekqword <offset> # Dump QWORD at memory offset
memtbl <filename> # Dump memory table to file
poke <offset> <filename> # Write filename to memory at offset to memory
pokehex <offset> <data> # Write hex string data at offset to memory
pokedword <offset> <data> # Write DWORD to memory at offset
pokeqword <offset> <data> # Write QWORD to memory at offset
memcpy <srcoffset> <dstoffset> <size> # Copy memory from srcoffset with given size to dstoffset
secureboot # Print secureboot fields from qfprom fuses
pbl <filename> # Dump primary bootloader to filename
qfp <filename> # Dump QFPROM fuses to filename
getstorageinfo # Print storage info in firehose mode
setbootablestoragedrive <lun> # Change bootable storage drive to lun number
send <command> # Send firehose command
xml <xmlfile> # Send firehose xml file
rawxml <xmlstring> # Send firehose xml raw string
reset # Send firehose reset command
nop # Send firehose nop command
Options:
--loader=filename Use specific EDL loader, disable autodetection [default: None]
--vid=vid Set usb vendor id used for EDL [default: 0x05c6]
--pid=pid Set usb product id used for EDL [default: 0x9008]
--lun=lun Set lun to read/write from (UFS memory only) [default: None]
--maxpayload=bytes Set the maximum payload for EDL [default: 0x100000]
--sectorsize=bytes Set default sector size [default: 0x200]
--memory=memtype Set memory type (EMMC or UFS) [default: eMMC]
--skipwrite Do not allow any writes to flash (simulate only)
--skipstorageinit Skip storage initialisation
--debugmode Enable verbose mode
--gpt-num-part-entries=number Set GPT entry count [default: 0]
--gpt-part-entry-size=number Set GPT entry size [default: 0]
--gpt-part-entry-start-lba=number Set GPT entry start lba sector [default: 0]
--tcpport=portnumber Set port for tcp server [default:1340]
--skip=partnames Skip reading partition with names "partname1,partname2,etc."
--genxml Generate rawprogram[lun].xml
--prjid=projid Enable prjid mode
"""
from docopt import docopt
args = docopt(__doc__, version='EDL 2.1')
import time
from Library.utils import *
from Library.usblib import usb_class
from Library.sahara import qualcomm_sahara
from Library.firehose import qualcomm_firehose
from Library.streaming import qualcomm_streaming
from struct import unpack, pack
from Library.xmlparser import xmlparser
logger = logging.getLogger(__name__)
print("Qualcomm Sahara / Firehose Client (c) B.Kerler 2018-2020.")
msmids = {
0x009440E1: "2432", # 7be49b72f9e4337223ccb84d6eccca4e61ce16e3602ac2008cb18b75babe6d09
0x006220E1: "MSM7227A",
0x009680E1: "APQ8009",
0x007060E1: "APQ8016",
0x008040E1: "APQ8026",
0x000550E1: "APQ8017",
0x0090C0E1: "APQ8036",
0x0090F0E1: "APQ8037",
0x0090D0E1: "APQ8039",
0x009770E1: "APQ8052",
0x000660E1: "APQ8053",
0x009F00E1: "APQ8056",
0x007190E1: "APQ8064",
0x009D00E1: "APQ8076",
0x009000E1: "APQ8084",
0x009300E1: "APQ8092",
0x000620E1: "APQ8098",
0x008110E1: "MSM8210",
0x008140E1: "MSM8212",
0x008120E1: "MSM8610",
0x008150E1: "MSM8612",
0x008010E1: "MSM8626",
0x000940E1: "MSM8905",
0x009600E1: "MSM8909",
0x007050E1: "MSM8916",
0x000560E1: "MSM8917",
0x000860E1: "MSM8920",
0x008050E1: "MSM8926",
0x009180E1: "MSM8928",
0x0091B0E1: "MSM8929",
0x007210E1: "MSM8930",
0x0072C0E1: "MSM8930",
# 0x000000E1: "MSM8936",
0x0004F0E1: "MSM8937",
0x0090B0E1: "MSM8939", # 7be49b72f9e4337223ccb84d6eccca4e61ce16e3602ac2008cb18b75babe6d09
0x0006B0E1: "MSM8940",
0x009720E1: "MSM8952", # 0x9B00E1
0x000460E1: "MSM8953",
0x009B00E1: "MSM8956",
0x009100E1: "MSM8962",
0x007B00E1: "MSM8974",
0x007B40E1: "MSM8974AB",
0x007B80E1: "MSM8974Pro",
0x007BC0E1: "MSM8974ABv3",
0x006B10E1: "MSM8974AC",
0x009900E1: "MSM8976",
0x009690E1: "MSM8992",
0x009400E1: "MSM8994",
0x009470E1: "MSM8996",
0x0006F0E1: "MSM8996AU",
0x1006F0E1: "MSM8996AU",
0x4006F0E1: "MSM8996AU",
0x0005F0E1: "MSM8996Pro",
0x0005E0E1: "MSM8998",
0x0094B0E1: "MSM9055",
0x009730E1: "MDM9206",
0x000480E1: "MDM9207",
0x0004A0E1: "MDM9607",
0x007F50E1: "MDM9x25",
0x009500E1: "MDM9x40",
0x009540E1: "MDM9x45",
0x009210E1: "MDM9x35",
0x000320E1: "MDM9250",
0x000340E1: "MDM9255",
0x000390E1: "MDM9350",
0x0003A0E1: "MDM9650",
0x0003B0E1: "MDM9655",
0x0007D0E1: "MDM9x60",
0x0007F0E1: "MDM9x65",
0x008090E1: "MDM9916",
0x0080B0E1: "MDM9955",
0x000BE0E1: "SDM429",
0x000BF0E1: "SDM439",
0x0009A0E1: "SDM450",
0x000AC0E1: "SDM630", # 0x30070x00 #afca69d4235117e5bfc21467068b20df85e0115d7413d5821883a6d244961581
0x000BA0E1: "SDM632",
0x000BB0E1: "SDA632",
0x000CC0E1: "SDM636",
0x0008C0E1: "SDM660", # 0x30060000
0x000910E1: "SDM670", # 0x60040100
0x000930E1: "SDA670", # 0x60040100
# 0x000930E1: "SDA835", # 0x30020000 => HW_ID1 3002000000290022
0x0008B0E1: "SDM845", # 0x60000100 => HW_ID1 6000000000010000
0x000A50E1: "SDM855"
}
infotbl = {
"2432": [[], [0x01900000, 0x100000], []],
"APQ8009": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"APQ8016": [[0x100000, 0x18000], [0x0005C000, 0x1000], [0x200000, 0x24000]],
"APQ8017": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"APQ8036": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"APQ8037": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"APQ8039": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"APQ8053": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"APQ8056": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"APQ8076": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"APQ8084": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"APQ8092": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"APQ8098": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
"MSM7227A": [[], [], []],
"MSM8210": [[], [], []],
"MSM8212": [[], [], []],
"MSM8905": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"MSM8909": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"MSM8916": [[0x100000, 0x18000], [0x0005C000, 0x1000], [0x200000, 0x24000]],
"MSM8917": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM8920": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM8926": [[], [], []],
"MSM8928": [[], [], []],
"MSM8929": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"MSM8930": [[0x100000, 0x18000], [0x700000, 0x1000], []],
"MSM8936": [[0x100000, 0x18000], [0x700000, 0x1000], []],
"MSM8937": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM8939": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"MSM8940": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM8952": [[0x100000, 0x18000], [0x00058000, 0x1000], [0x200000, 0x24000]],
"MSM8953": [[0x100000, 0x18000], [0xA0000, 0x1000], [0x200000, 0x24000]],
"MSM8956": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM8974": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"MSM8974Pro": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"MSM8974AB": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"MSM8974ABv3": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"MSM8974AC": [[0xFC010000, 0x18000], [0xFC4B8000, 0x60F0], [0x200000, 0x24000]],
"MSM8976": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM8992": [[0xFC010000, 0x18000], [0xFC4B8000, 0x6FFF], [0xFE800000, 0x24000]],
"MSM8994": [[0xFC010000, 0x18000], [0xFC4B8000, 0x6FFF], [0xFE800000, 0x24000]],
"MSM8996": [[0x100000, 0x18000], [0x70000, 0x6158], [0x200000, 0x24000]],
"MSM8996AU": [[0x100000, 0x18000], [0x70000, 0x6158], [0x200000, 0x24000]],
"MSM8996Pro": [[0x100000, 0x18000], [0x70000, 0x6158], [0x200000, 0x24000]],
"MSM8998": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
"MSM9206": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM9207": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MDM9250": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MDM9350": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MSM9607": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MDM9650": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"MDM9x50": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"SDM429": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"SDM439": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"SDM450": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"SDM632": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"SDA632": [[0x100000, 0x18000], [0x000A0000, 0x6FFF], [0x200000, 0x24000]],
"SDM630": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
"SDM636": [[0x300000, 0x3c000], [0x780000, 0x10000], [0x14009003, 0x24000]],
"SDM660": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
"SDM670": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
"SDA670": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
"SDM845": [[0x300000, 0x3c000], [0x780000, 0x10000], []],
}
secureboottbl = {
"2432": 0x019018c8,
# "MSM7227A":[[], [], []],
# "MSM8210": [[], [], []],
# "MSM8212":
"APQ8009": 0x00058098,
"APQ8016": 0x0005C098,
"APQ8036": 0x00058098,
"APQ8039": 0x00058098,
"APQ8037": 0x000a01d0,
"APQ8053": 0x000a01d0,
"APQ8052": 0x00058098,
"APQ8056": 0x000a01d0,
"APQ8076": 0x000a01d0,
"APQ8084": 0xFC4B83E8,
"APQ8092": 0xFC4B83E8,
"APQ8098": 0x00780350,
"MSM8226": 0xFC4B83E8,
"MSM8610": 0xFC4B83E8,
"MSM8909": 0x00058098,
"MSM8916": 0x0005C098,
"MSM8917": 0x000A01D0,
"MSM8920": 0x000A01D0,
# "MSM8926": [[], [], []],
# "MSM8928": [[], [], []],
"MSM8929": 0x00058098,
"MSM8930": 0x700310,
"MSM8936": 0x700310,
"MSM8937": 0x000A01D0,
"MSM8939": 0x00058098,
"MSM8940": 0x000A01D0,
"MSM8952": 0x00058098,
"MSM8953": 0x000a01d0,
"MSM8956": 0x000a01d0,
"MSM8974": 0xFC4B83F8,
"MSM8974AB": 0xFC4B83F8,
"MSM8974ABv3": 0xFC4B83F8,
"MSM8974AC": 0xFC4B83F8,
"MSM8976": 0x000a01d0,
"MSM8992": 0xFC4B83F8,
"MSM8994": 0xFC4B83F8,
"MSM8996": 0x00070378,
"MSM8996AU": 0x00070378,
"MSM8996Pro": 0x00070378,
"MSM8998": 0x00780350,
"MDM9206": 0x000a01d0,
"MDM9207": 0x000a01d0,
"MDM9250": 0x000a01d0,
"MDM9350": 0x000a01d0,
"MDM9607": 0x000a01d0,
"MDM9650": 0x000a01d0,
"MDM9x50": 0x000a01d0,
"SDM429": 0x000a01d0,
"SDM439": 0x000a01d0,
"SDM450": 0x000a01d0,
# "SDM636": 0x70378,
"SDM630": 0x00780350,
"SDM632": 0x000a01d0,
"SDA632": 0x000a01d0,
"SDM636": 0x00780350,
"SDM660": 0x00780350,
"SDM670": 0x00780350,
"SDA670": 0x00780350,
"SDM845": 0x00780350
}
def check_cmd(supported_funcs, func):
if not supported_funcs:
return True
for sfunc in supported_funcs:
if func.lower() == sfunc.lower():
return True
return False
def main():
mode = ""
loop = 0
vid = int(args["--vid"], 16)
pid = int(args["--pid"], 16)
verbose=logger.level
if args["--debugmode"]:
verbose=logging.DEBUG
logger.setLevel(verbose)
fh = logging.FileHandler('log.txt')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# ch = logging.StreamHandler()
# ch.setLevel(logging.ERROR)
cdc = usb_class(vid=vid, pid=pid, verbose=verbose)
sahara = qualcomm_sahara(cdc)
if args["--loader"] == 'None':
logger.info("Trying with no loader given ...")
sahara.programmer = None
else:
loader = args["--loader"]
logger.info(f"Using loader {loader} ...")
with open(loader, "rb") as rf:
sahara.programmer = rf.read()
logger.info("Waiting for the device")
resp = None
cdc.timeout = 100
logger.debug("Ohuh")
mode, resp = doconnect(cdc, loop, mode, resp, sahara)
if resp == -1:
mode, resp = doconnect(cdc, loop, mode, resp, sahara)
if resp == -1:
logger.error("USB desync, please rerun command !")
exit(0)
if mode == "sahara":
if "mode" in resp:
mode = resp["mode"]
if mode == sahara.sahara_mode.SAHARA_MODE_MEMORY_DEBUG:
print("Device is in memory dump mode, dumping memory")
sahara.debug_mode()
exit(0)
else:
print("Device is in EDL mode .. continuing.")
cdc.timeout = None
m = sahara.info()
if m:
mode, resp = sahara.connect()
if mode == "sahara":
mode = sahara.upload_loader()
if mode != "":
time.sleep(0.3)
print("Successfully uploaded programmer :)")
else:
print("Device is in an unknown sahara state")
exit(0)
else:
print("Device is in an unknown state")
exit(0)
else:
sahara.bit64 = True
if mode == "firehose":
cdc.timeout = None
handle_firehose(args, cdc, sahara, verbose)
elif mode == "nandprg" or mode == "enandprg":
handle_streaming(args, cdc, sahara, verbose)
else:
logger.error("Sorry, couldn't talk to Sahara, please reboot the device !")
exit(0)
exit(0)
def doconnect(cdc, loop, mode, resp, sahara):
while not cdc.connected:
cdc.connected = cdc.connect()
if not cdc.connected:
sys.stdout.write('.')
if loop >= 20:
sys.stdout.write('\n')
loop = 0
loop += 1
time.sleep(1)
sys.stdout.flush()
else:
logger.info("Device detected :)")
try:
mode, resp = sahara.connect()
if mode == "" or resp == -1:
mode, resp = sahara.connect()
except:
if mode == "" or resp == -1:
mode, resp = sahara.connect()
if mode == "":
logger.info("Unknown mode. Aborting.")
cdc.close()
exit(0)
logger.info(f"Mode detected: {mode}")
break
return mode, resp
def handle_streaming(args, cdc, sahara, verbose):
fh = qualcomm_streaming(cdc, sahara, verbose)
def do_firehose_server(mainargs, cdc, sahara):
cfg = qualcomm_firehose.cfg()
cfg.MemoryName = mainargs["--memory"]
cfg.ZLPAwareHost = 1
cfg.SkipStorageInit = mainargs["--skipstorageinit"]
cfg.SkipWrite = mainargs["--skipwrite"]
cfg.MaxPayloadSizeToTargetInBytes = int(mainargs["--maxpayload"], 16)
cfg.SECTOR_SIZE_IN_BYTES = int(mainargs["--sectorsize"], 16)
cfg.bit64 = sahara.bit64
fh = qualcomm_firehose(cdc, xmlparser(), cfg)
supported_functions = fh.connect(0)
TargetName = "Unknown"
if "hwid" in dir(sahara):
hwid = sahara.hwid
if hwid >> 8 in msmids:
TargetName = msmids[hwid >> 8]
else:
TargetName = fh.cfg.TargetName
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', int(mainargs["--tcpport"]))
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
sock.listen(1)
while True:
print('waiting for a connection')
connection, client_address = sock.accept()
try:
print('connection from', client_address)
while True:
data = connection.recv(4096).decode('utf-8')
print('received %s' % data)
if data:
print('handling request')
lines = data.split("\n")
for line in lines:
if ":" in line:
cmd = line.split(":")[0]
arguments = line.split(":")[1]
if "," in arguments:
arguments = arguments.split(",")
else:
arguments = [arguments]
if cmd == "gpt":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: gpt:<lun>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
fh.cmd_read(lun, 0, 0x6000//cfg.SECTOR_SIZE_IN_BYTES, arguments[1])
response = f"<ACK>\n" + f"Dumped GPT to {arguments[1]}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "printgpt":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: printgpt:<lun>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is not None:
response = "<ACK>\n" + guid_gpt.tostring()
connection.sendall(bytes(response, 'utf-8'))
else:
response = "Error on reading GPT, maybe wrong memoryname given ?"
response = "<NAK>\n" + response
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "r":
if len(arguments) != 3:
response = "<NAK>\n" + "Usage: r:<lun>,<partitionname>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
partitionname = arguments[1]
filename = arguments[2]
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
response = "<NAK>\n" + f"Error reading GPT Table"
connection.sendall(bytes(response, 'utf-8'))
else:
found = False
for partition in guid_gpt.partentries:
if partition.name == partitionname:
fh.cmd_read(lun, partition.sector, partition.sectors, filename)
response = "<ACK>\n" + f"Dumped sector {str(partition.sector)} with sector count {str(partition.sectors)} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
found = True
break
if not found:
response = "<NAK>\n" + f"Error: Couldn't detect partition: {partitionname}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "rl":
if len(arguments) != 3:
response = "<NAK>\n" + "Usage: rl:<lun>,<directory><skip_partname>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
directory = arguments[1]
skip = arguments[2]
if not os.path.exists(directory):
os.mkdir(directory)
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
response = "<NAK>\n" + f"Error reading GPT Table"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<ACK>\n"
for partition in guid_gpt.partentries:
partitionname = partition.name
if partition.name == skip:
continue
filename = os.path.join(directory, partitionname + ".bin")
fh.cmd_read(lun, partition.sector, partition.sectors, filename)
response += f"Dumped partition {str(partition.name)} with sector count {str(partition.sectors)} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "rf":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: rf:<lun>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
filename = arguments[1]
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
response = "<NAK>\n" + f"Error: Couldn't reading GPT Table"
connection.sendall(bytes(response, 'utf-8'))
else:
fh.cmd_read(lun, 0, guid_gpt.totalsectors, filename)
response = "<ACK>\n" + f"Dumped sector 0 with sector count {str(guid_gpt.totalsectors)} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "pbl":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: pbl:<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
filename = arguments[0]
if TargetName in infotbl:
v = infotbl[TargetName]
if len(v[0]) > 0:
if fh.cmd_peek(v[0][0], v[0][1], filename, True):
response = "<ACK>\n" + f"Dumped pbl at offset {hex(v[0][0])} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "No known pbl offset for this chipset"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "Unknown target chipset"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "qfp":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: qfp:<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
filename = arguments
if TargetName in infotbl:
v = infotbl[TargetName]
if len(v[1]) > 0:
if fh.cmd_peek(v[1][0], v[1][1], filename):
response = "<ACK>\n" + "Dumped qfprom at offset {hex(v[1][0])} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "No known qfprom offset for this chipset"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "Unknown target chipset"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "secureboot":
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<ACK>\n"
if TargetName in secureboottbl:
v = secureboottbl[TargetName]
value = struct.unpack("<I",fh.cmd_peek(v, 4))[0]
is_secure = False
for area in range(0, 4):
sec_boot = (value >> (area * 8)) & 0xFF
pk_hashindex = sec_boot & 3
oem_pkhash = True if ((sec_boot >> 4) & 1) == 1 else False
auth_enabled = True if ((sec_boot >> 5) & 1) == 1 else False
use_serial = True if ((sec_boot >> 6) & 1) == 1 else False
if auth_enabled:
is_secure = True
response += f"Sec_Boot{str(area)} PKHash-Index:{str(pk_hashindex)} OEM_PKHash: {str(oem_pkhash)} Auth_Enabled: {str(auth_enabled)} Use_Serial: {str(use_serial)}\n"
if is_secure:
response += f"Secure boot enabled."
else:
response += "Secure boot disabled."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "Unknown target chipset"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "memtbl":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: memtbl:<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
filename = arguments[0]
if TargetName in infotbl:
v = infotbl[TargetName]
if len(v[2]) > 0:
if fh.cmd_peek(v[2][0], v[2][1], filename):
response = "<ACK>\n" + f"Dumped qfprom at offset {hex(v[2][0])} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "No known memory table offset for this chipset"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "Unknown target chipset"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "footer":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: footer:<lun>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
filename = arguments[1]
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
response = "<NAK>\n" + f"Error: Couldn't reading GPT Table"
connection.sendall(bytes(response, 'utf-8'))
else:
pnames = ["userdata2", "metadata", "userdata", "reserved1", "reserved2",
"reserved3"]
found = False
for partition in guid_gpt.partentries:
if partition.name in pnames:
response = "<ACK>\n" + f"Detected partition: {partition.name}\n"
if partition.name in ["userdata2","userdata"]:
data = fh.cmd_read_buffer(lun, partition.sector + (
partition.sectors - (0x4000 // cfg.SECTOR_SIZE_IN_BYTES)),
(0x4000 // cfg.SECTOR_SIZE_IN_BYTES),
filename)
else:
data = fh.cmd_read_buffer(lun, partition.sector,(0x4000 // cfg.SECTOR_SIZE_IN_BYTES),filename)
if data==b"":
continue
val = struct.unpack("<I", data[:4])[0]
if (val & 0xFFFFFFF0) == 0xD0B5B1C0:
with open(filename, "wb") as wf:
wf.write(data)
response += f"Dumped footer from {partition.name} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
break
else:
response = "<NAK>\n" + f"Unknown footer structure or no footer found."
connection.sendall(bytes(response, 'utf-8'))
found = True
if not found:
response = "<NAK>\n" + f"Error: Couldn't find footer"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "rs":
if len(arguments) != 4:
response = "<NAK>\n" + f"Usage: -rs <lun>,<start_sector> <sectors> <filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
start = int(arguments[1])
sectors = int(arguments[2])
filename = arguments[3]
fh.cmd_read(lun, start, sectors, filename)
response = "<ACK>\n" + f"Dumped sector {str(start)} with sector count {str(sectors)} as {filename}."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "peek":
if len(arguments) != 3:
response = "<NAK>\n" + "Usage: peek:<offset>,<length>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
length = int(arguments[1], 16)
filename = arguments[2]
fh.cmd_peek(offset, length, filename, False)
response = "<ACK>\n" + f"Dumped data from {str(offset)} with length {str(length)} to {filename}."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "peekhex":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: peekhex:<offset>,<length>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
length = int(arguments[1], 16)
resp = fh.cmd_peek(offset, length, "", False)
response = "<ACK>\n" + hexlify(resp)
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "peekqword":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: peekqword:<offset>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
resp = fh.cmd_peek(offset, 8, "", False)
response = "<ACK>\n" + hex(unpack("<Q", resp[:8])[0])
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "peekdword":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: peekdword:<offset>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "peek"):
response = "<NAK>\n" + "Peek command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
resp = fh.cmd_peek(offset, 4, "", False)
response = "<ACK>\n" + hex(unpack("<I", resp[:4])[0])
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "poke":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: poke:<offset>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "poke"):
response = "<NAK>\n" + "Poke command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
filename = unhexlify(arguments[1])
fh.cmd_poke(offset, "", filename, False)
response = "<ACK>\n" + f"Successfully wrote data to {hex(offset)} from {filename}."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "pokehex":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: pokehex:<offset>,<data>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "poke"):
response = "<NAK>\n" + "Poke command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
data = unhexlify(arguments[1])
fh.cmd_poke(offset, data, "", False)
resp = fh.cmd_peek(offset, len(data), "", False)
if resp == data:
response = "<ACK>\n" + f"Data correctly written to {hex(offset)}."
else:
response = "<NAK>\n" + f"Writing data to {hex(offset)} failed."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "pokeqword":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: pokeqword:<offset>,<qword>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "poke"):
response = "<NAK>\n" + "Poke command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
data = pack("<Q", int(arguments[1], 16))
fh.cmd_poke(offset, data, "", False)
resp = fh.cmd_peek(offset, 8, "", False)
if resp == data:
response = "<ACK>\n" + f"QWORD {arguments[1]} correctly written to {hex(offset)}."
else:
response = "<NAK>\n" + f"Error writing data to {hex(offset)}."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "pokedword":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: pokedword:<offset>,<dword>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "poke"):
response = "<NAK>\n" + "Poke command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
offset = int(arguments[0], 16)
data = pack("<I", int(arguments[1], 16))
fh.cmd_poke(offset, data, "", False)
resp = fh.cmd_peek(offset, 4, "", False)
response = "<ACK>\n" + hex(unpack("<I", resp[:4])[0])
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "memcpy":
if len(arguments) != 3:
response = "<NAK>\n" + "Usage: memcpy:<dstoffset>,<srcoffset>,<size>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "poke"):
response = "<NAK>\n" + "Poke command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
dstoffset = int(arguments[0], 16)
srcoffset = int(arguments[1], 16)
size = int(arguments[2], 16)
resp = fh.cmd_memcpy(dstoffset, srcoffset, size)
response = "<ACK>\n" + hex(unpack("<I", resp[:4])[0])
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "reset":
fh.cmd_reset()
response = "<ACK>\nSent reset cmd."
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "nop":
if not check_cmd(supported_functions, "Nop"):
response = "<NAK>\n" + "Nop command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
info = fh.cmd_nop()
if info:
response = "<ACK>\n" + info
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "Error sending nop cmd"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "setbootablestoragedrive":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: setbootablestoragedrive:<lun>"
connection.sendall(bytes(response, 'utf-8'))
else:
if not check_cmd(supported_functions, "setbootablestoragedrive"):
response = "<NAK>\n" + "setbootablestoragedrive command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
fh.cmd_setbootablestoragedrive(lun)
response = "<ACK>\n" + f"Bootable Storage Drive set to {arguments[0]}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "getstorageinfo":
if not check_cmd(supported_functions, "GetStorageInfo"):
response = "<NAK>\n" + "GetStorageInfo command isn't supported by edl loader"
connection.sendall(bytes(response, 'utf-8'))
else:
data = fh.cmd_getstorageinfo_string()
if data == "":
response = "<NAK>\nGetStorageInfo command isn't supported."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<ACK>\n" + data
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "send":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: send:<response:True/False>,<command>"
connection.sendall(bytes(response, 'utf-8'))
else:
scmd = arguments[1]
if arguments[0] == "True":
resp = fh.cmd_send(scmd)
if not resp:
response = f"<NAK>\nCommand {scmd} failed."
else:
response = "<ACK>\n" + resp.decode('utf-8').replace("\n", "")
else:
fh.cmd_send(scmd, False)
response = "<ACK>\n" + f"Executed {arguments[1]}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "w":
if len(arguments) != 3:
response = "<NAK>\n" + "Usage: w:<lun>,<partitionname>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
partitionname = arguments[1]
filename = arguments[2]
if not os.path.exists(filename):
response = "<NAK>\n" + f"Error: Couldn't find file: {filename}"
connection.sendall(bytes(response, 'utf-8'))
else:
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
response = "<NAK>\n" + f"Error: Couldn't reading GPT Table"
connection.sendall(bytes(response, 'utf-8'))
else:
found = False
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
for partition in guid_gpt.partentries:
if partition.name == partitionname:
found = True
sectors = os.stat(filename).st_size // fh.cfg.SECTOR_SIZE_IN_BYTES
if (os.stat(filename).st_size % fh.cfg.SECTOR_SIZE_IN_BYTES) > 0:
sectors += 1
if sectors > partition.sectors:
response = "<NAK>\n" + f"Error: {filename} has {sectors} sectors but partition only has {partition.sectors}."
else:
fh.cmd_program(lun, partition.sector, filename)
response = "<ACK>\n" + f"Wrote {filename} to sector {str(partition.sector)}."
connection.sendall(bytes(response, 'utf-8'))
if not found:
response = "<NAK>\n" + f"Error: Couldn't detect partition: {partitionname}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "ws":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: ws:<lun>,<start_sector>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
start = int(arguments[1])
filename = arguments[2]
if not os.path.exists(filename):
response = "<NAK>\n" + f"Error: Couldn't find file: {filename}"
connection.sendall(bytes(response, 'utf-8'))
else:
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
if fh.cmd_program(lun, start, filename):
response = "<ACK>\n" + f"Wrote {filename} to sector {str(start)}."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + f"Error on writing {filename} to sector {str(start)}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "wf":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: wf:<lun>,<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
start = 0
filename = arguments[1]
if not os.path.exists(filename):
response = "<NAK>\n" + f"Error: Couldn't find file: {filename}"
connection.sendall(bytes(response, 'utf-8'))
else:
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
if fh.cmd_program(lun, start, filename):
response = "<ACK>\n" + f"Wrote {filename} to sector {str(start)}."
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + f"Error on writing {filename} to sector {str(start)}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "e":
if len(arguments) != 2:
response = "<NAK>\n" + "Usage: e:<lun>,<partname>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
partitionname = arguments[1]
data, guid_gpt = fh.get_gpt(lun, int(mainargs["--gpt-num-part-entries"]),
int(mainargs["--gpt-part-entry-size"]),
int(mainargs["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
response = "<NAK>\n" + f"Error: Couldn't reading GPT Table"
connection.sendall(bytes(response, 'utf-8'))
else:
found = False
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
for partition in guid_gpt.partentries:
if partition.name == partitionname:
fh.cmd_erase(lun, partition.sector, partition.sectors)
response = "<ACK>\n" + f"Erased {partitionname} starting at sector {str(partition.sector)} with sector count " + f"{str(partition.sectors)}."
connection.sendall(bytes(response, 'utf-8'))
found = True
if not found:
response = "<NAK>\n" + f"Error: Couldn't detect partition: {partitionname}"
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "es":
if len(arguments) != 3:
response = "<NAK>\n" + "Usage: es:<lun>,<start_sector>,<sectors>"
connection.sendall(bytes(response, 'utf-8'))
else:
lun = int(arguments[0])
start = int(arguments[1])
sectors = int(arguments[2])
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
fh.cmd_erase(lun, start, sectors)
print(f"Erased sector {str(start)} with sector count {str(sectors)}.")
connection.sendall(bytes(response, 'utf-8'))
elif cmd == "xml":
if len(arguments) != 1:
response = "<NAK>\n" + "Usage: xml:<filename>"
connection.sendall(bytes(response, 'utf-8'))
else:
filename = arguments[0]
if fh.cmd_xml(filename):
response = "<ACK>\n" + f"Sent xml content of {filename}"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + f"Error running xml:{filename}"
connection.sendall(bytes(response, 'utf-8'))
else:
response = "<NAK>\n" + "Unknown/Missing command, a command is required."
connection.sendall(bytes(response, 'utf-8'))
else:
print('no more data from', client_address)
break
finally:
connection.close()
def getluns(argument):
if argument["--lun"] != "None":
return [int(argument["--lun"])]
luns = []
if not argument["--memory"].lower() == "emmc":
for i in range(0, 99):
luns.append(i)
else:
luns = [0]
return luns
def detect_partition(fh, arguments, partitionname):
luns = getluns(arguments)
fpartitions = {}
for lun in luns:
lunname = "Lun" + str(lun)
fpartitions[lunname] = []
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]),
int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
for partition in guid_gpt.partentries:
fpartitions[lunname].append(partition.name)
if partition.name == partitionname:
return [True,lun,partition]
return [False,fpartitions]
def handle_firehose(arguments, cdc, sahara, verbose):
cfg = qualcomm_firehose.cfg()
cfg.MemoryName = arguments["--memory"]
cfg.ZLPAwareHost = 1
cfg.SkipStorageInit = arguments["--skipstorageinit"]
cfg.SkipWrite = arguments["--skipwrite"]
cfg.MaxPayloadSizeToTargetInBytes = int(arguments["--maxpayload"], 16)
cfg.SECTOR_SIZE_IN_BYTES = int(arguments["--sectorsize"], 16)
cfg.bit64 = sahara.bit64
oppoprojid = ""
if "--prjid" in arguments:
if arguments["--prjid"] is not None:
oppoprojid=arguments["--prjid"]
if oppoprojid not in ["18821", "18825", "18827","18857", "18865", "19801", "19861", "19863","18831"]:
logger.error("Oppo project id is not supported, ask for support :)")
exit(0)
fh = qualcomm_firehose(cdc, xmlparser(), cfg, verbose,oppoprojid,sahara.serial)
supported_functions = fh.connect(0)
funcs="Supported functions:\n-----------------\n"
for function in supported_functions:
funcs+=function+","
funcs=funcs[:-1]
logger.info(funcs)
TargetName = fh.cfg.TargetName
if "hwid" in dir(sahara):
hwid = sahara.hwid >> 32
if hwid in msmids:
TargetName = msmids[hwid]
if arguments["gpt"]:
luns = getluns(arguments)
directory = arguments["<directory>"]
if directory is None:
directory=""
genxml = False
if "--genxml" in arguments:
if arguments["--genxml"]:
genxml = True
for lun in luns:
sfilename = os.path.join(directory, f"gpt_main{str(lun)}.bin")
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]),
int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is not None:
with open(sfilename,"wb") as wf:
wf.write(data)
print(f"Dumped GPT from Lun {str(lun)} to {sfilename}")
sfilename = os.path.join(directory, f"gpt_backup{str(lun)}.bin")
data = fh.get_backup_gpt(lun, int(arguments["--gpt-num-part-entries"]),
int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if data is not None:
with open(sfilename,"wb") as wf:
wf.write(data)
print(f"Dumped Backup GPT from Lun {str(lun)} to {sfilename}")
if genxml:
guid_gpt.generate_rawprogram(lun, cfg.SECTOR_SIZE_IN_BYTES, directory)
exit(0)
elif arguments["printgpt"]:
luns = getluns(arguments)
for lun in luns:
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]), int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
print(f"\nParsing Lun {str(lun)}:")
guid_gpt.print()
exit(0)
elif arguments["r"]:
partitionname = arguments["<partitionname>"]
filename = arguments["<filename>"]
filenames = filename.split(",")
partitions=partitionname.split(",")
if len(partitions)!=len(filenames):
logger.error("You need to gives as many filenames as given partitions.")
exit(0)
i=0
for partition in partitions:
partfilename=filenames[i]
i+=1
res=detect_partition(fh, arguments, partition)
if res[0]==True:
lun=res[1]
rpartition=res[2]
fh.cmd_read(lun, rpartition.sector, rpartition.sectors, partfilename)
print(f"Dumped sector {str(rpartition.sector)} with sector count {str(rpartition.sectors)} as {partfilename}.")
else:
fpartitions=res[1]
logger.error(f"Error: Couldn't detect partition: {partition}\nAvailable partitions:")
for lun in fpartitions:
for rpartition in fpartitions[lun]:
if arguments["--memory"].lower() == "emmc":
logger.error("\t"+rpartition)
else:
logger.error(lun + ":\t" + rpartition)
exit(0)
elif arguments["rl"]:
directory = arguments["<directory>"]
if arguments["--skip"]:
skip = arguments["--skip"].split(",")
else:
skip = []
genxml = False
if "--genxml" in arguments:
if arguments["--genxml"]:
genxml = True
if not os.path.exists(directory):
os.mkdir(directory)
luns = getluns(arguments)
for lun in luns:
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]),
int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
if len(luns) > 1:
storedir = os.path.join(directory, "lun" + str(lun))
else:
storedir = directory
if not os.path.exists(storedir):
os.mkdir(storedir)
sfilename = os.path.join(storedir, f"gpt_main{str(lun)}.bin")
with open(sfilename, "wb") as wf:
wf.write(data)
sfilename = os.path.join(storedir, f"gpt_backup{str(lun)}.bin")
with open(sfilename, "wb") as wf:
wf.write(data[fh.cfg.SECTOR_SIZE_IN_BYTES * 2:])
if genxml:
guid_gpt.generate_rawprogram(lun, cfg.SECTOR_SIZE_IN_BYTES, storedir)
for partition in guid_gpt.partentries:
partitionname = partition.name
if partition.name in skip:
continue
filename = os.path.join(storedir, partitionname + ".bin")
logging.info(
f"Dumping partition {str(partition.name)} with sector count {str(partition.sectors)} as {filename}.")
fh.cmd_read(lun, partition.sector, partition.sectors, filename)
exit(0)
elif arguments["rf"]:
filename = arguments["<filename>"]
luns = getluns(arguments)
for lun in luns:
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]), int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
if len(luns) > 1:
sfilename = f"lun{str(lun)}_" + filename
else:
sfilename = filename
print(f"Dumping sector 0 with sector count {str(guid_gpt.totalsectors)} as {filename}.")
fh.cmd_read(lun, 0, guid_gpt.totalsectors, sfilename)
print(f"Dumped sector 0 with sector count {str(guid_gpt.totalsectors)} as {filename}.")
exit(0)
elif arguments["pbl"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
filename = arguments["<filename>"]
if TargetName in infotbl:
v = infotbl[TargetName]
if len(v[0]) > 0:
if fh.cmd_peek(v[0][0], v[0][1], filename, True):
print(f"Dumped pbl at offset {hex(v[0][0])} as {filename}.")
exit(0)
else:
logger.error("No known pbl offset for this chipset")
else:
logger.error("Unknown target chipset")
logger.error("Error on dumping pbl")
exit(0)
elif arguments["qfp"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
filename = arguments["<filename>"]
if TargetName in infotbl:
v = infotbl[TargetName]
if len(v[1]) > 0:
if fh.cmd_peek(v[1][0], v[1][1], filename):
print(f"Dumped qfprom at offset {hex(v[1][0])} as {filename}.")
exit(0)
else:
logger.error("No known qfprom offset for this chipset")
else:
logger.error("Unknown target chipset")
logger.error("Error on dumping qfprom")
exit(0)
elif arguments["secureboot"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
if TargetName in secureboottbl:
v = secureboottbl[TargetName]
value = struct.unpack("<I",fh.cmd_peek(v, 4))[0]
is_secure = False
for area in range(0, 4):
sec_boot = (value >> (area * 8))&0xFF
pk_hashindex = sec_boot & 3
oem_pkhash = True if ((sec_boot >> 4) & 1) == 1 else False
auth_enabled = True if ((sec_boot >> 5) & 1) == 1 else False
use_serial = True if ((sec_boot >> 6) & 1) == 1 else False
if auth_enabled:
is_secure = True
print(
f"Sec_Boot{str(area)} PKHash-Index:{str(pk_hashindex)} OEM_PKHash: {str(oem_pkhash)} Auth_Enabled: {str(auth_enabled)} Use_Serial: {str(use_serial)}")
if is_secure:
print(f"Secure boot enabled.")
else:
print("Secure boot disabled.")
else:
logger.error("Unknown target chipset")
exit(0)
elif arguments["memtbl"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
filename = arguments["<filename>"]
if TargetName in infotbl:
v = infotbl[TargetName]
if len(v[2]) > 0:
if fh.cmd_peek(v[2][0], v[2][1], filename):
print(f"Dumped memtbl at offset {hex(v[2][0])} as {filename}.")
exit(0)
else:
logger.error("No known memtbl offset for this chipset")
else:
logger.error("Unknown target chipset")
logger.error("Error on dumping memtbl")
exit(0)
elif arguments["footer"]:
luns = getluns(arguments)
filename = arguments["<filename>"]
for lun in luns:
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]), int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
pnames = ["userdata2", "metadata", "userdata", "reserved1", "reserved2", "reserved3"]
for partition in guid_gpt.partentries:
if partition.name in pnames:
print(f"Detected partition: {partition.name}")
data = fh.cmd_read_buffer(lun,
partition.sector + (
partition.sectors - (0x4000 // cfg.SECTOR_SIZE_IN_BYTES)),
(0x4000 // cfg.SECTOR_SIZE_IN_BYTES), filename)
if data==b"":
continue
val = struct.unpack("<I", data[:4])[0]
if (val & 0xFFFFFFF0) == 0xD0B5B1C0:
with open(filename, "wb") as wf:
wf.write(data)
print(f"Dumped footer from {partition.name} as {filename}.")
exit(0)
logger.error(f"Error: Couldn't detect footer partition.")
exit(0)
elif arguments["rs"]:
lun = int(arguments["--lun"])
start = int(arguments["<start_sector>"])
sectors = int(arguments["<sectors>"])
filename = arguments["<filename"]
data = fh.cmd_read(lun, start, sectors, filename)
with open(filename, "wb") as wf:
wf.write(data)
print(f"Dumped sector {str(start)} with sector count {str(sectors)} as {filename}.")
exit(0)
logger.error(f"Error: Couldn't open {filename} for writing.")
exit(0)
elif arguments["peek"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
length = int(arguments["<length>"], 16)
filename = arguments["<filename"]
fh.cmd_peek(offset, length, filename, True)
exit(0)
elif arguments["peekhex"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
length = int(arguments["<length>"], 16)
resp = fh.cmd_peek(offset, length, "", True)
print("\n")
print(hexlify(resp))
exit(0)
elif arguments["peekqword"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
resp = fh.cmd_peek(offset, 8, "", True)
print("\n")
print(hex(unpack("<Q", resp[:8])[0]))
exit(0)
elif arguments["peekdword"]:
if not check_cmd(supported_functions, "peek"):
logger.error("Peek command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
resp = fh.cmd_peek(offset, 4, "", True)
print("\n")
print(hex(unpack("<I", resp[:4])[0]))
exit(0)
elif arguments["poke"]:
if not check_cmd(supported_functions, "poke"):
logger.error("Poke command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
filename = unhexlify(arguments["<filename>"])
fh.cmd_poke(offset, "", filename, True)
exit(0)
elif arguments["pokehex"]:
if not check_cmd(supported_functions, "poke"):
logger.error("Poke command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
data = unhexlify(arguments["<data>"])
fh.cmd_poke(offset, data, "", True)
resp = fh.cmd_peek(offset, len(data), "", True)
if resp == data:
print("Data correctly written")
else:
print("Sending data failed")
exit(0)
elif arguments["pokeqword"]:
if not check_cmd(supported_functions, "poke"):
logger.error("Poke command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
data = pack("<Q", int(arguments["<data>"], 16))
fh.cmd_poke(offset, data, "", True)
resp = fh.cmd_peek(offset, 8, "", True)
print(hex(unpack("<Q", resp[:8])[0]))
exit(0)
elif arguments["pokedword"]:
if not check_cmd(supported_functions, "poke"):
logger.error("Poke command isn't supported by edl loader")
exit(0)
else:
offset = int(arguments["<offset>"], 16)
data = pack("<I", int(arguments["<data>"], 16))
fh.cmd_poke(offset, data, "", True)
resp = fh.cmd_peek(offset, 4, "", True)
print(hex(unpack("<I", resp[:4])[0]))
exit(0)
elif arguments["reset"]:
fh.cmd_reset()
exit(0)
elif arguments["nop"]:
if not check_cmd(supported_functions, "nop"):
logger.error("Nop command isn't supported by edl loader")
exit(0)
else:
print(fh.cmd_nop())
exit(0)
elif arguments["setbootablestoragedrive"]:
if not check_cmd(supported_functions, "setbootablestoragedrive"):
logger.error("setbootablestoragedrive command isn't supported by edl loader")
exit(0)
else:
fh.cmd_setbootablestoragedrive(int(arguments["<lun>"]))
exit(0)
elif arguments["getstorageinfo"]:
if not check_cmd(supported_functions, "getstorageinfo"):
logger.error("getstorageinfo command isn't supported by edl loader")
exit(0)
else:
fh.cmd_getstorageinfo()
exit(0)
elif arguments["w"]:
partitionname = arguments["<partitionname>"]
filename = arguments["<filename>"]
if not os.path.exists(filename):
logger.error(f"Error: Couldn't find file: {filename}")
exit(0)
res=detect_partition(fh, arguments, partitionname)
if res[0]==True:
lun=res[1]
partition=res[2]
sectors = os.stat(filename).st_size // fh.cfg.SECTOR_SIZE_IN_BYTES
if (os.stat(filename).st_size % fh.cfg.SECTOR_SIZE_IN_BYTES) > 0:
sectors += 1
if sectors > partition.sectors:
logger.error(f"Error: {filename} has {sectors} sectors but partition only has {partition.sectors}.")
exit(0)
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
if fh.cmd_program(lun, partition.sector, filename):
print(f"Wrote {filename} to sector {str(partition.sector)}.")
else:
print(f"Error writing {filename} to sector {str(partition.sector)}.")
exit(0)
else:
fpartitions=res[1]
logger.error(f"Error: Couldn't detect partition: {partitionname}\nAvailable partitions:")
for lun in fpartitions:
for partition in fpartitions[lun]:
if arguments["--memory"].lower() == "emmc":
logger.error("\t"+partition)
else:
logger.error(lun + ":\t" + partition)
exit(0)
elif arguments["wl"]:
directory = arguments["<directory>"]
if arguments["--skip"]:
skip = arguments["--skip"].split(",")
else:
skip = []
luns = getluns(arguments)
if not os.path.exists(directory):
logger.error(f"Error: Couldn't find directory: {directory}")
exit(0)
filenames = []
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
for dirName, subdirList, fileList in os.walk(directory):
for fname in fileList:
filenames.append(os.path.join(dirName, fname))
for lun in luns:
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]), int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
if "partentries" in dir(guid_gpt):
for filename in filenames:
for partition in guid_gpt.partentries:
partname = filename[filename.rfind("/") + 1:]
if ".bin" in partname[-4:]:
partname = partname[:-4]
if partition.name == partname:
if partition.name in skip:
continue
sectors = os.stat(filename).st_size // fh.cfg.SECTOR_SIZE_IN_BYTES
if (os.stat(filename).st_size % fh.cfg.SECTOR_SIZE_IN_BYTES) > 0:
sectors += 1
if sectors > partition.sectors:
logger.error(
f"Error: {filename} has {sectors} sectors but partition only has {partition.sectors}.")
exit(0)
print(f"Writing {filename} to partition {str(partition.name)}.")
fh.cmd_program(lun, partition.sector, filename)
else:
print("Couldn't write partition. Either wrong memorytype given or no gpt partition.")
exit(0)
elif arguments["ws"]:
lun = int(arguments["--lun"])
start = int(arguments["<start_sector>"])
filename = arguments["<filename>"]
if not os.path.exists(filename):
logger.error(f"Error: Couldn't find file: {filename}")
exit(0)
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
if fh.cmd_program(lun, start, filename):
print(f"Wrote {filename} to sector {str(start)}.")
else:
logger.error(f"Error on writing {filename} to sector {str(start)}")
exit(0)
elif arguments["wf"]:
lun = int(arguments["--lun"])
start = 0
filename = arguments["<filename>"]
if not os.path.exists(filename):
logger.error(f"Error: Couldn't find file: {filename}")
exit(0)
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
if fh.cmd_program(lun, start, filename):
print(f"Wrote {filename} to sector {str(start)}.")
else:
logger.error(f"Error on writing {filename} to sector {str(start)}")
exit(0)
elif arguments["e"]:
luns = getluns(arguments)
partitionname = arguments["<partitionname>"]
for lun in luns:
data, guid_gpt = fh.get_gpt(lun, int(arguments["--gpt-num-part-entries"]), int(arguments["--gpt-part-entry-size"]),
int(arguments["--gpt-part-entry-start-lba"]))
if guid_gpt is None:
break
else:
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
if "partentries" in dir(guid_gpt):
for partition in guid_gpt.partentries:
if partition.name == partitionname:
fh.cmd_erase(lun, partition.sector, partition.sectors)
print(
f"Erased {partitionname} starting at sector {str(partition.sector)} with sector count " +
f"{str(partition.sectors)}.")
exit(0)
else:
print("Couldn't erase partition. Either wrong memorytype given or no gpt partition.")
exit(0)
logger.error(f"Error: Couldn't detect partition: {partitionname}")
exit(0)
elif arguments["es"]:
lun = int(arguments["--lun"])
start = int(arguments["<start_sector>"])
sectors = int(arguments["<sectors"])
if fh.oppoprjid is not None and fh.ops is not None:
if fh.oppoprjid != "":
if "demacia" in fh.supported_functions:
if not fh.ops.run(True):
exit(0)
elif "setprojmodel" in fh.supported_functions:
if not fh.ops.run(False):
exit(0)
fh.cmd_erase(lun, start, sectors)
print(f"Erased sector {str(start)} with sector count {str(sectors)}.")
exit(0)
elif arguments["xml"]:
fh.cmd_xml(arguments["<xmlfile>"])
exit(0)
elif arguments["rawxml"]:
fh.cmd_rawxml(arguments["<xmlstring>"])
exit(0)
elif arguments["send"]:
command = arguments["<command>"]
resp = fh.cmd_send(command, True)
print("\n")
print(resp)
exit(0)
elif arguments["server"]:
do_firehose_server(arguments, cdc, sahara)
exit(0)
elif arguments["oemunlock"]:
partition = "config"
res=detect_partition(fh, arguments, partition)
if res[0]==True:
lun=res[1]
rpartition=res[2]
offsettopatch=0x7FFFF
sector=rpartition.sector + (offsettopatch//cfg.SECTOR_SIZE_IN_BYTES)
offset=offsettopatch%cfg.SECTOR_SIZE_IN_BYTES
value=0x1
size_in_bytes=1
if fh.cmd_patch(lun, sector, offset, value, size_in_bytes, True):
print(f"Patched sector {str(rpartition.sector)}, offset {str(offset)} with value {value}, size in bytes {size_in_bytes}.")
else:
fpartitions=res[1]
logger.error(f"Error: Couldn't detect partition: {partition}\nAvailable partitions:")
for lun in fpartitions:
for rpartition in fpartitions[lun]:
if arguments["--memory"].lower() == "emmc":
logger.error("\t" + rpartition)
else:
logger.error(lun + ":\t" + rpartition)
exit(0)
else:
logger.error("Unknown/Missing command, a command is required.")
exit(0)
if __name__ == '__main__':
main()
|
[
"Library.sahara.qualcomm_sahara",
"Library.streaming.qualcomm_streaming",
"docopt.docopt",
"socket.socket",
"struct.unpack",
"Library.firehose.qualcomm_firehose.cfg",
"time.sleep",
"Library.xmlparser.xmlparser",
"Library.usblib.usb_class"
] |
[((9714, 9748), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""EDL 2.1"""'}), "(__doc__, version='EDL 2.1')\n", (9720, 9748), False, 'from docopt import docopt\n'), ((19723, 19767), 'Library.usblib.usb_class', 'usb_class', ([], {'vid': 'vid', 'pid': 'pid', 'verbose': 'verbose'}), '(vid=vid, pid=pid, verbose=verbose)\n', (19732, 19767), False, 'from Library.usblib import usb_class\n'), ((19781, 19801), 'Library.sahara.qualcomm_sahara', 'qualcomm_sahara', (['cdc'], {}), '(cdc)\n', (19796, 19801), False, 'from Library.sahara import qualcomm_sahara\n'), ((22744, 22784), 'Library.streaming.qualcomm_streaming', 'qualcomm_streaming', (['cdc', 'sahara', 'verbose'], {}), '(cdc, sahara, verbose)\n', (22762, 22784), False, 'from Library.streaming import qualcomm_streaming\n'), ((22844, 22867), 'Library.firehose.qualcomm_firehose.cfg', 'qualcomm_firehose.cfg', ([], {}), '()\n', (22865, 22867), False, 'from Library.firehose import qualcomm_firehose\n'), ((23531, 23580), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (23544, 23580), False, 'import socket\n'), ((71454, 71477), 'Library.firehose.qualcomm_firehose.cfg', 'qualcomm_firehose.cfg', ([], {}), '()\n', (71475, 71477), False, 'from Library.firehose import qualcomm_firehose\n'), ((23235, 23246), 'Library.xmlparser.xmlparser', 'xmlparser', ([], {}), '()\n', (23244, 23246), False, 'from Library.xmlparser import xmlparser\n'), ((72213, 72224), 'Library.xmlparser.xmlparser', 'xmlparser', ([], {}), '()\n', (72222, 72224), False, 'from Library.xmlparser import xmlparser\n'), ((22083, 22096), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (22093, 22096), False, 'import time\n'), ((21119, 21134), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (21129, 21134), False, 'import time\n'), ((85669, 85691), 'struct.unpack', 'unpack', (['"""<Q"""', 'resp[:8]'], {}), "('<Q', resp[:8])\n", (85675, 85691), False, 'from struct import unpack, pack\n'), ((86056, 86078), 'struct.unpack', 'unpack', (['"""<I"""', 'resp[:4]'], {}), "('<I', resp[:4])\n", (86062, 86078), False, 'from struct import unpack, pack\n'), ((87450, 87472), 'struct.unpack', 'unpack', (['"""<Q"""', 'resp[:8]'], {}), "('<Q', resp[:8])\n", (87456, 87472), False, 'from struct import unpack, pack\n'), ((87921, 87943), 'struct.unpack', 'unpack', (['"""<I"""', 'resp[:4]'], {}), "('<I', resp[:4])\n", (87927, 87943), False, 'from struct import unpack, pack\n'), ((46541, 46563), 'struct.unpack', 'unpack', (['"""<Q"""', 'resp[:8]'], {}), "('<Q', resp[:8])\n", (46547, 46563), False, 'from struct import unpack, pack\n'), ((47505, 47527), 'struct.unpack', 'unpack', (['"""<I"""', 'resp[:4]'], {}), "('<I', resp[:4])\n", (47511, 47527), False, 'from struct import unpack, pack\n'), ((52429, 52451), 'struct.unpack', 'unpack', (['"""<I"""', 'resp[:4]'], {}), "('<I', resp[:4])\n", (52435, 52451), False, 'from struct import unpack, pack\n'), ((53563, 53585), 'struct.unpack', 'unpack', (['"""<I"""', 'resp[:4]'], {}), "('<I', resp[:4])\n", (53569, 53585), False, 'from struct import unpack, pack\n')]
|
""" Game fix for Persona 4 Golden
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" installs devenum, quartz, wmp9 and adjust pulse latency
"""
# Fix pre-rendered cutscene playback
util.protontricks('devenum')
util.protontricks('quartz')
util.protontricks('wmp9')
# Fix crackling audio
util.set_environment('PULSE_LATENCY_MSEC', '60')
|
[
"protonfixes.util.set_environment",
"protonfixes.util.protontricks"
] |
[((222, 250), 'protonfixes.util.protontricks', 'util.protontricks', (['"""devenum"""'], {}), "('devenum')\n", (239, 250), False, 'from protonfixes import util\n'), ((255, 282), 'protonfixes.util.protontricks', 'util.protontricks', (['"""quartz"""'], {}), "('quartz')\n", (272, 282), False, 'from protonfixes import util\n'), ((287, 312), 'protonfixes.util.protontricks', 'util.protontricks', (['"""wmp9"""'], {}), "('wmp9')\n", (304, 312), False, 'from protonfixes import util\n'), ((344, 392), 'protonfixes.util.set_environment', 'util.set_environment', (['"""PULSE_LATENCY_MSEC"""', '"""60"""'], {}), "('PULSE_LATENCY_MSEC', '60')\n", (364, 392), False, 'from protonfixes import util\n')]
|
import pyxhook
import os
import bluetooth
import time
global s
# function that finds the MAC address of the RC car
def Find_RC():
needed = "ESP32" #name of the car
while True:
nearby_devices = bluetooth.discover_devices(lookup_names = True)
#print("found %d devices" % len(nearby_devices))
for addr, name in nearby_devices:
#print(" %s - %s" % (addr, name))
if needed in name:
return addr
# function that connects a bluetooth socket to the RC car for transmission
def Connect_RC(Address, port):
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
while True:
try:
sock.connect((Address, port))
break;
except:
print('Could not connect to the device')
time.sleep(5)
return sock
#creating key pressing event and saving it into log file
def OnKeyPress(event):
if "KeyboardInterrupt" == event.Key:
quit()
print(event.Key) #show the command being sent
s.send(event.Key) #send to the car
#Find the MAC address
ad = Find_RC()
print(ad)
#Connect to the RC car
s = Connect_RC(ad, 1)
# create a hook manager object
new_hook = pyxhook.HookManager()
new_hook.KeyDown = OnKeyPress
# set the hook
new_hook.HookKeyboard()
try:
new_hook.start() # start the hook
except KeyboardInterrupt:
quit()# User cancelled from command line.
except Exception as ex:
# Write exceptions to the log file, for analysis later.
msg = 'Error while catching events:\n {}'.format(ex)
pyxhook.print_err(msg)
new_hook.close()
s.close()
#########################
#resources used for code so far
#
#https://www.geeksforgeeks.org/design-a-keylogger-in-python/
#www.bitforestinfo.com/2017/03/how-to-create-virtual-keyboard-using.html
# https://circuitdigest.com/microcontroller-projects/using-classic-bluetooth-in-esp32-and-toogle-an-led
# https://stackoverflow.com/questions/48512695/using-pybluez-to-connect-to-already-paired-bluetooth-device
#########################
|
[
"bluetooth.BluetoothSocket",
"bluetooth.discover_devices",
"time.sleep",
"pyxhook.HookManager",
"pyxhook.print_err"
] |
[((1192, 1213), 'pyxhook.HookManager', 'pyxhook.HookManager', ([], {}), '()\n', (1211, 1213), False, 'import pyxhook\n'), ((585, 628), 'bluetooth.BluetoothSocket', 'bluetooth.BluetoothSocket', (['bluetooth.RFCOMM'], {}), '(bluetooth.RFCOMM)\n', (610, 628), False, 'import bluetooth\n'), ((211, 256), 'bluetooth.discover_devices', 'bluetooth.discover_devices', ([], {'lookup_names': '(True)'}), '(lookup_names=True)\n', (237, 256), False, 'import bluetooth\n'), ((1553, 1575), 'pyxhook.print_err', 'pyxhook.print_err', (['msg'], {}), '(msg)\n', (1570, 1575), False, 'import pyxhook\n'), ((800, 813), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (810, 813), False, 'import time\n')]
|
# 3p
import pymemcache
from pymemcache.exceptions import MemcacheClientError
from pymemcache.exceptions import MemcacheIllegalInputError
from pymemcache.exceptions import MemcacheServerError
from pymemcache.exceptions import MemcacheUnknownCommandError
from pymemcache.exceptions import MemcacheUnknownError
import pytest
# project
from ddtrace import Pin
from ddtrace.contrib.pymemcache.client import WrappedClient
from ddtrace.contrib.pymemcache.patch import patch
from ddtrace.contrib.pymemcache.patch import unpatch
from ddtrace.vendor import wrapt
from tests.utils import DummyTracer
from tests.utils import TracerTestCase
from .test_client_mixin import PYMEMCACHE_VERSION
from .test_client_mixin import PymemcacheClientTestCaseMixin
from .test_client_mixin import TEST_HOST
from .test_client_mixin import TEST_PORT
from .utils import MockSocket
from .utils import _str
_Client = pymemcache.client.base.Client
class PymemcacheClientTestCase(PymemcacheClientTestCaseMixin):
"""Tests for a patched pymemcache.client.base.Client."""
def test_patch(self):
assert issubclass(pymemcache.client.base.Client, wrapt.ObjectProxy)
client = self.make_client([])
self.assertIsInstance(client, wrapt.ObjectProxy)
def test_unpatch(self):
unpatch()
from pymemcache.client.base import Client
self.assertEqual(Client, _Client)
def test_set_get(self):
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
client.set(b"key", b"value", noreply=False)
result = client.get(b"key")
assert _str(result) == "value"
self.check_spans(2, ["set", "get"], ["set key", "get key"])
def test_append_stored(self):
client = self.make_client([b"STORED\r\n"])
result = client.append(b"key", b"value", noreply=False)
assert result is True
self.check_spans(1, ["append"], ["append key"])
def test_prepend_stored(self):
client = self.make_client([b"STORED\r\n"])
result = client.prepend(b"key", b"value", noreply=False)
assert result is True
self.check_spans(1, ["prepend"], ["prepend key"])
def test_cas_stored(self):
client = self.make_client([b"STORED\r\n"])
result = client.cas(b"key", b"value", b"0", noreply=False)
assert result is True
self.check_spans(1, ["cas"], ["cas key"])
def test_cas_exists(self):
client = self.make_client([b"EXISTS\r\n"])
result = client.cas(b"key", b"value", b"0", noreply=False)
assert result is False
self.check_spans(1, ["cas"], ["cas key"])
def test_cas_not_found(self):
client = self.make_client([b"NOT_FOUND\r\n"])
result = client.cas(b"key", b"value", b"0", noreply=False)
assert result is None
self.check_spans(1, ["cas"], ["cas key"])
def test_delete_exception(self):
client = self.make_client([Exception("fail")])
def _delete():
client.delete(b"key", noreply=False)
pytest.raises(Exception, _delete)
spans = self.check_spans(1, ["delete"], ["delete key"])
self.assertEqual(spans[0].error, 1)
def test_flush_all(self):
client = self.make_client([b"OK\r\n"])
result = client.flush_all(noreply=False)
assert result is True
self.check_spans(1, ["flush_all"], ["flush_all"])
def test_incr_exception(self):
client = self.make_client([Exception("fail")])
def _incr():
client.incr(b"key", 1)
pytest.raises(Exception, _incr)
spans = self.check_spans(1, ["incr"], ["incr key"])
self.assertEqual(spans[0].error, 1)
def test_get_error(self):
client = self.make_client([b"ERROR\r\n"])
def _get():
client.get(b"key")
pytest.raises(MemcacheUnknownCommandError, _get)
spans = self.check_spans(1, ["get"], ["get key"])
self.assertEqual(spans[0].error, 1)
def test_get_unknown_error(self):
client = self.make_client([b"foobarbaz\r\n"])
def _get():
client.get(b"key")
pytest.raises(MemcacheUnknownError, _get)
self.check_spans(1, ["get"], ["get key"])
def test_gets_found(self):
client = self.make_client([b"VALUE key 0 5 10\r\nvalue\r\nEND\r\n"])
result = client.gets(b"key")
assert result == (b"value", b"10")
self.check_spans(1, ["gets"], ["gets key"])
def test_touch_not_found(self):
client = self.make_client([b"NOT_FOUND\r\n"])
result = client.touch(b"key", noreply=False)
assert result is False
self.check_spans(1, ["touch"], ["touch key"])
def test_set_client_error(self):
client = self.make_client([b"CLIENT_ERROR some message\r\n"])
def _set():
client.set("key", "value", noreply=False)
pytest.raises(MemcacheClientError, _set)
spans = self.check_spans(1, ["set"], ["set key"])
self.assertEqual(spans[0].error, 1)
def test_set_server_error(self):
client = self.make_client([b"SERVER_ERROR some message\r\n"])
def _set():
client.set(b"key", b"value", noreply=False)
pytest.raises(MemcacheServerError, _set)
spans = self.check_spans(1, ["set"], ["set key"])
self.assertEqual(spans[0].error, 1)
def test_set_key_with_space(self):
client = self.make_client([b""])
def _set():
client.set(b"key has space", b"value", noreply=False)
pytest.raises(MemcacheIllegalInputError, _set)
spans = self.check_spans(1, ["set"], ["set key has space"])
self.assertEqual(spans[0].error, 1)
def test_quit(self):
client = self.make_client([])
result = client.quit()
assert result is None
self.check_spans(1, ["quit"], ["quit"])
def test_replace_not_stored(self):
client = self.make_client([b"NOT_STORED\r\n"])
result = client.replace(b"key", b"value", noreply=False)
assert result is False
self.check_spans(1, ["replace"], ["replace key"])
def test_version_success(self):
client = self.make_client([b"VERSION 1.2.3\r\n"], default_noreply=False)
result = client.version()
assert result == b"1.2.3"
self.check_spans(1, ["version"], ["version"])
def test_stats(self):
client = self.make_client([b"STAT fake_stats 1\r\n", b"END\r\n"])
result = client.stats()
if PYMEMCACHE_VERSION >= (3, 4, 0):
assert client.sock.send_bufs == [b"stats\r\n"]
else:
assert client.sock.send_bufs == [b"stats \r\n"]
assert result == {b"fake_stats": 1}
self.check_spans(1, ["stats"], ["stats"])
def test_service_name_override(self):
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
Pin.override(client, service="testsvcname")
client.set(b"key", b"value", noreply=False)
result = client.get(b"key")
assert _str(result) == "value"
spans = self.get_spans()
self.assertEqual(spans[0].service, "testsvcname")
self.assertEqual(spans[1].service, "testsvcname")
class PymemcacheHashClientTestCase(PymemcacheClientTestCaseMixin):
"""Tests for a patched pymemcache.client.hash.HashClient."""
def make_client_pool(self, hostname, mock_socket_values, serializer=None, **kwargs):
mock_client = pymemcache.client.base.Client(hostname, serializer=serializer, **kwargs)
tracer = DummyTracer()
Pin.override(mock_client, tracer=tracer)
mock_client.sock = MockSocket(mock_socket_values)
client = pymemcache.client.base.PooledClient(hostname, serializer=serializer)
client.client_pool = pymemcache.pool.ObjectPool(lambda: mock_client)
return mock_client
def make_client(self, mock_socket_values, **kwargs):
from pymemcache.client.hash import HashClient
tracer = DummyTracer()
Pin.override(pymemcache, tracer=tracer)
self.client = HashClient([(TEST_HOST, TEST_PORT)], **kwargs)
for _c in self.client.clients.values():
_c.sock = MockSocket(list(mock_socket_values))
return self.client
def test_patched_hash_client(self):
client = self.make_client([b"STORED\r\n"])
if PYMEMCACHE_VERSION >= (3, 2, 0):
assert client.client_class == WrappedClient
assert len(client.clients)
for _c in client.clients.values():
assert isinstance(_c, wrapt.ObjectProxy)
def test_delete_many_found(self):
"""
delete_many internally calls client.delete so we should expect to get
delete for our span resource.
for base.Clients self.delete() is called which by-passes our tracing
on delete()
"""
client = self.make_client([b"STORED\r", b"\n", b"DELETED\r\n"])
result = client.add(b"key", b"value", noreply=False)
result = client.delete_many([b"key"], noreply=False)
assert result is True
self.check_spans(2, ["add", "delete"], ["add key", "delete key"])
class PymemcacheClientConfiguration(TracerTestCase):
"""Ensure that pymemache can be configured properly."""
def setUp(self):
patch()
def tearDown(self):
unpatch()
def make_client(self, mock_socket_values, **kwargs):
tracer = DummyTracer()
Pin.override(pymemcache, tracer=tracer)
self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs)
self.client.sock = MockSocket(list(mock_socket_values))
return self.client
def test_same_tracer(self):
"""Ensure same tracer reference is used by the pin on pymemache and
Clients.
"""
client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT))
self.assertEqual(Pin.get_from(client).tracer, Pin.get_from(pymemcache).tracer)
def test_override_parent_pin(self):
"""Test that the service set on `pymemcache` is used for Clients."""
Pin.override(pymemcache, service="mysvc")
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
client.set(b"key", b"value", noreply=False)
pin = Pin.get_from(pymemcache)
tracer = pin.tracer
spans = tracer.pop()
self.assertEqual(spans[0].service, "mysvc")
def test_override_client_pin(self):
"""Test that the service set on `pymemcache` is used for Clients."""
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
Pin.override(client, service="mysvc2")
client.set(b"key", b"value", noreply=False)
pin = Pin.get_from(pymemcache)
tracer = pin.tracer
spans = tracer.pop()
self.assertEqual(spans[0].service, "mysvc2")
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_user_specified_service(self):
"""
When a user specifies a service for the app
The pymemcache integration should not use it.
"""
# Ensure that the service name was configured
from ddtrace import config
assert config.service == "mysvc"
client = self.make_client([b"STORED\r\n", b"VALUE key 0 5\r\nvalue\r\nEND\r\n"])
client.set(b"key", b"value", noreply=False)
pin = Pin.get_from(pymemcache)
tracer = pin.tracer
spans = tracer.pop()
assert spans[0].service != "mysvc"
|
[
"tests.utils.DummyTracer",
"ddtrace.contrib.pymemcache.patch.unpatch",
"pytest.raises",
"pymemcache.client.base.Client",
"ddtrace.Pin.override",
"pymemcache.client.base.PooledClient",
"pymemcache.client.hash.HashClient",
"ddtrace.Pin.get_from",
"pymemcache.pool.ObjectPool",
"ddtrace.contrib.pymemcache.patch.patch"
] |
[((1280, 1289), 'ddtrace.contrib.pymemcache.patch.unpatch', 'unpatch', ([], {}), '()\n', (1287, 1289), False, 'from ddtrace.contrib.pymemcache.patch import unpatch\n'), ((3050, 3083), 'pytest.raises', 'pytest.raises', (['Exception', '_delete'], {}), '(Exception, _delete)\n', (3063, 3083), False, 'import pytest\n'), ((3566, 3597), 'pytest.raises', 'pytest.raises', (['Exception', '_incr'], {}), '(Exception, _incr)\n', (3579, 3597), False, 'import pytest\n'), ((3845, 3893), 'pytest.raises', 'pytest.raises', (['MemcacheUnknownCommandError', '_get'], {}), '(MemcacheUnknownCommandError, _get)\n', (3858, 3893), False, 'import pytest\n'), ((4151, 4192), 'pytest.raises', 'pytest.raises', (['MemcacheUnknownError', '_get'], {}), '(MemcacheUnknownError, _get)\n', (4164, 4192), False, 'import pytest\n'), ((4908, 4948), 'pytest.raises', 'pytest.raises', (['MemcacheClientError', '_set'], {}), '(MemcacheClientError, _set)\n', (4921, 4948), False, 'import pytest\n'), ((5246, 5286), 'pytest.raises', 'pytest.raises', (['MemcacheServerError', '_set'], {}), '(MemcacheServerError, _set)\n', (5259, 5286), False, 'import pytest\n'), ((5567, 5613), 'pytest.raises', 'pytest.raises', (['MemcacheIllegalInputError', '_set'], {}), '(MemcacheIllegalInputError, _set)\n', (5580, 5613), False, 'import pytest\n'), ((6937, 6980), 'ddtrace.Pin.override', 'Pin.override', (['client'], {'service': '"""testsvcname"""'}), "(client, service='testsvcname')\n", (6949, 6980), False, 'from ddtrace import Pin\n'), ((7504, 7576), 'pymemcache.client.base.Client', 'pymemcache.client.base.Client', (['hostname'], {'serializer': 'serializer'}), '(hostname, serializer=serializer, **kwargs)\n', (7533, 7576), False, 'import pymemcache\n'), ((7594, 7607), 'tests.utils.DummyTracer', 'DummyTracer', ([], {}), '()\n', (7605, 7607), False, 'from tests.utils import DummyTracer\n'), ((7616, 7656), 'ddtrace.Pin.override', 'Pin.override', (['mock_client'], {'tracer': 'tracer'}), '(mock_client, tracer=tracer)\n', (7628, 7656), False, 'from ddtrace import Pin\n'), ((7733, 7801), 'pymemcache.client.base.PooledClient', 'pymemcache.client.base.PooledClient', (['hostname'], {'serializer': 'serializer'}), '(hostname, serializer=serializer)\n', (7768, 7801), False, 'import pymemcache\n'), ((7831, 7879), 'pymemcache.pool.ObjectPool', 'pymemcache.pool.ObjectPool', (['(lambda : mock_client)'], {}), '(lambda : mock_client)\n', (7857, 7879), False, 'import pymemcache\n'), ((8036, 8049), 'tests.utils.DummyTracer', 'DummyTracer', ([], {}), '()\n', (8047, 8049), False, 'from tests.utils import DummyTracer\n'), ((8058, 8097), 'ddtrace.Pin.override', 'Pin.override', (['pymemcache'], {'tracer': 'tracer'}), '(pymemcache, tracer=tracer)\n', (8070, 8097), False, 'from ddtrace import Pin\n'), ((8120, 8166), 'pymemcache.client.hash.HashClient', 'HashClient', (['[(TEST_HOST, TEST_PORT)]'], {}), '([(TEST_HOST, TEST_PORT)], **kwargs)\n', (8130, 8166), False, 'from pymemcache.client.hash import HashClient\n'), ((9345, 9352), 'ddtrace.contrib.pymemcache.patch.patch', 'patch', ([], {}), '()\n', (9350, 9352), False, 'from ddtrace.contrib.pymemcache.patch import patch\n'), ((9386, 9395), 'ddtrace.contrib.pymemcache.patch.unpatch', 'unpatch', ([], {}), '()\n', (9393, 9395), False, 'from ddtrace.contrib.pymemcache.patch import unpatch\n'), ((9471, 9484), 'tests.utils.DummyTracer', 'DummyTracer', ([], {}), '()\n', (9482, 9484), False, 'from tests.utils import DummyTracer\n'), ((9493, 9532), 'ddtrace.Pin.override', 'Pin.override', (['pymemcache'], {'tracer': 'tracer'}), '(pymemcache, tracer=tracer)\n', (9505, 9532), False, 'from ddtrace import Pin\n'), ((9555, 9618), 'pymemcache.client.base.Client', 'pymemcache.client.base.Client', (['(TEST_HOST, TEST_PORT)'], {}), '((TEST_HOST, TEST_PORT), **kwargs)\n', (9584, 9618), False, 'import pymemcache\n'), ((9865, 9918), 'pymemcache.client.base.Client', 'pymemcache.client.base.Client', (['(TEST_HOST, TEST_PORT)'], {}), '((TEST_HOST, TEST_PORT))\n', (9894, 9918), False, 'import pymemcache\n'), ((10132, 10173), 'ddtrace.Pin.override', 'Pin.override', (['pymemcache'], {'service': '"""mysvc"""'}), "(pymemcache, service='mysvc')\n", (10144, 10173), False, 'from ddtrace import Pin\n'), ((10330, 10354), 'ddtrace.Pin.get_from', 'Pin.get_from', (['pymemcache'], {}), '(pymemcache)\n', (10342, 10354), False, 'from ddtrace import Pin\n'), ((10680, 10718), 'ddtrace.Pin.override', 'Pin.override', (['client'], {'service': '"""mysvc2"""'}), "(client, service='mysvc2')\n", (10692, 10718), False, 'from ddtrace import Pin\n'), ((10787, 10811), 'ddtrace.Pin.get_from', 'Pin.get_from', (['pymemcache'], {}), '(pymemcache)\n', (10799, 10811), False, 'from ddtrace import Pin\n'), ((11467, 11491), 'ddtrace.Pin.get_from', 'Pin.get_from', (['pymemcache'], {}), '(pymemcache)\n', (11479, 11491), False, 'from ddtrace import Pin\n'), ((9944, 9964), 'ddtrace.Pin.get_from', 'Pin.get_from', (['client'], {}), '(client)\n', (9956, 9964), False, 'from ddtrace import Pin\n'), ((9973, 9997), 'ddtrace.Pin.get_from', 'Pin.get_from', (['pymemcache'], {}), '(pymemcache)\n', (9985, 9997), False, 'from ddtrace import Pin\n')]
|
"""
Copyright (c) 2020-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, division
import math
import torch.nn as nn
from torchreid.losses import AngleSimpleLinear
from .common import ModelInterface
__all__ = ['mobile_face_net_se_1x', 'mobile_face_net_se_2x']
def init_block(in_channels, out_channels, stride, activation=nn.PReLU):
"""Builds the first block of the MobileFaceNet"""
return nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
make_activation(activation)
)
def make_activation(activation):
"""Factory for activation functions"""
if activation != nn.PReLU:
return activation(inplace=True)
return activation()
class SELayer(nn.Module):
"""Implementation of the Squeeze-Excitaion layer from https://arxiv.org/abs/1709.01507"""
def __init__(self, inplanes, squeeze_ratio=8, activation=nn.PReLU, size=None):
super(SELayer, self).__init__()
assert squeeze_ratio >= 1
assert inplanes > 0
if size is not None:
self.global_avgpool = nn.AvgPool2d(size)
else:
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(inplanes, int(inplanes / squeeze_ratio), kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(int(inplanes / squeeze_ratio), inplanes, kernel_size=1, stride=1)
self.relu = make_activation(activation)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.sigmoid(out)
return x * out
class InvertedResidual(nn.Module):
"""Implementation of the modified Inverted residual block"""
def __init__(self, in_channels, out_channels, stride, expand_ratio, outp_size=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and in_channels == out_channels
self.inv_block = nn.Sequential(
nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, 1,
groups=in_channels * expand_ratio, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
SELayer(out_channels, 8, nn.PReLU, outp_size)
)
def forward(self, x):
if self.use_res_connect:
return x + self.inv_block(x)
return self.inv_block(x)
class MobileFaceNet(ModelInterface):
"""Implements modified MobileFaceNet from https://arxiv.org/abs/1804.07573"""
def __init__(self,
num_classes,
feature=False,
feature_dim=256,
width_multiplier=1.,
loss='softmax',
input_size=(128, 128),
**kwargs):
super().__init__(**kwargs)
assert feature_dim > 0
assert num_classes > 0
assert width_multiplier > 0
self.feature = feature
self.loss = loss
self.input_size = input_size
# Set up of inverted residual blocks
inverted_residual_setting = [
# t, c, n, s
[2, 64, 5, 2],
[4, 128, 1, 2],
[2, 128, 6, 1],
[4, 128, 1, 2],
[2, 128, 2, 1]
]
first_channel_num = 64
last_channel_num = 512
self.features = [init_block(3, first_channel_num, 2)]
self.features.append(nn.Conv2d(first_channel_num, first_channel_num, 3, 1, 1,
groups=first_channel_num, bias=False))
self.features.append(nn.BatchNorm2d(64))
self.features.append(nn.PReLU())
# Inverted Residual Blocks
in_channel_num = first_channel_num
size_h, size_w = self.input_size
size_h, size_w = size_h // 2, size_w // 2
for t, c, n, s in inverted_residual_setting:
output_channel = int(c * width_multiplier)
for i in range(n):
if i == 0:
size_h, size_w = size_h // s, size_w // s
self.features.append(InvertedResidual(in_channel_num, output_channel,
s, t, outp_size=(size_h, size_w)))
else:
self.features.append(InvertedResidual(in_channel_num, output_channel,
1, t, outp_size=(size_h, size_w)))
in_channel_num = output_channel
# 1x1 expand block
self.features.append(nn.Sequential(nn.Conv2d(in_channel_num, last_channel_num, 1, 1, 0, bias=False),
nn.BatchNorm2d(last_channel_num),
nn.PReLU()))
self.features = nn.Sequential(*self.features)
# Depth-wise pooling
k_size = (self.input_size[0] // 16, self.input_size[1] // 16)
self.dw_pool = nn.Conv2d(last_channel_num, last_channel_num, k_size,
groups=last_channel_num, bias=False)
self.dw_bn = nn.BatchNorm2d(last_channel_num)
self.conv1_extra = nn.Conv2d(last_channel_num, feature_dim, 1, stride=1, padding=0, bias=False)
if not self.feature:
classifier_block = nn.Linear if self.loss not in ['am_softmax'] else AngleSimpleLinear
self.fc_angular = classifier_block(feature_dim, num_classes)
self.init_weights()
def forward(self, x, return_featuremaps=False, get_embeddings=False):
x = self.features(x)
if return_featuremaps:
return x
x = self.dw_bn(self.dw_pool(x))
x = self.conv1_extra(x)
if self.feature or not self.training:
return x
x = x.view(x.size(0), -1)
y = self.fc_angular(x)
if get_embeddings:
return y, x
if self.loss in ['softmax', 'am_softmax']:
return y
elif self.loss in ['triplet', ]:
return y, x
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def get_input_res(self):
return self.input_size
def init_weights(self):
"""Initializes weights of the model before training"""
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def mobile_face_net_se_1x(num_classes, pretrained=False, download_weights=False, **kwargs):
model = MobileFaceNet(
num_classes=num_classes,
width_multiplier=1.0,
**kwargs
)
return model
def mobile_face_net_se_2x(num_classes, pretrained=False, download_weights=False, **kwargs):
model = MobileFaceNet(
num_classes=num_classes,
width_multiplier=1.5,
**kwargs
)
return model
|
[
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.PReLU",
"math.sqrt",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.AvgPool2d",
"torch.nn.Sigmoid"
] |
[((975, 992), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(3)'], {}), '(3)\n', (989, 992), True, 'import torch.nn as nn\n'), ((1002, 1064), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', 'stride', '(1)'], {'bias': '(False)'}), '(in_channels, out_channels, 3, stride, 1, bias=False)\n', (1011, 1064), True, 'import torch.nn as nn\n'), ((1074, 1102), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1088, 1102), True, 'import torch.nn as nn\n'), ((2046, 2058), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2056, 2058), True, 'import torch.nn as nn\n'), ((5800, 5829), 'torch.nn.Sequential', 'nn.Sequential', (['*self.features'], {}), '(*self.features)\n', (5813, 5829), True, 'import torch.nn as nn\n'), ((5953, 6048), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel_num', 'last_channel_num', 'k_size'], {'groups': 'last_channel_num', 'bias': '(False)'}), '(last_channel_num, last_channel_num, k_size, groups=\n last_channel_num, bias=False)\n', (5962, 6048), True, 'import torch.nn as nn\n'), ((6098, 6130), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['last_channel_num'], {}), '(last_channel_num)\n', (6112, 6130), True, 'import torch.nn as nn\n'), ((6158, 6234), 'torch.nn.Conv2d', 'nn.Conv2d', (['last_channel_num', 'feature_dim', '(1)'], {'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(last_channel_num, feature_dim, 1, stride=1, padding=0, bias=False)\n', (6167, 6234), True, 'import torch.nn as nn\n'), ((1690, 1708), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['size'], {}), '(size)\n', (1702, 1708), True, 'import torch.nn as nn\n'), ((1757, 1780), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (1777, 1780), True, 'import torch.nn as nn\n'), ((2702, 2773), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(in_channels * expand_ratio)', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False)\n', (2711, 2773), True, 'import torch.nn as nn\n'), ((2787, 2829), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(in_channels * expand_ratio)'], {}), '(in_channels * expand_ratio)\n', (2801, 2829), True, 'import torch.nn as nn\n'), ((2843, 2853), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (2851, 2853), True, 'import torch.nn as nn\n'), ((2868, 2998), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_channels * expand_ratio)', '(in_channels * expand_ratio)', '(3)', 'stride', '(1)'], {'groups': '(in_channels * expand_ratio)', 'bias': '(False)'}), '(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride,\n 1, groups=in_channels * expand_ratio, bias=False)\n', (2877, 2998), True, 'import torch.nn as nn\n'), ((3030, 3072), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(in_channels * expand_ratio)'], {}), '(in_channels * expand_ratio)\n', (3044, 3072), True, 'import torch.nn as nn\n'), ((3086, 3096), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (3094, 3096), True, 'import torch.nn as nn\n'), ((3111, 3183), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_channels * expand_ratio)', 'out_channels', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False)\n', (3120, 3183), True, 'import torch.nn as nn\n'), ((3197, 3225), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (3211, 3225), True, 'import torch.nn as nn\n'), ((4447, 4546), 'torch.nn.Conv2d', 'nn.Conv2d', (['first_channel_num', 'first_channel_num', '(3)', '(1)', '(1)'], {'groups': 'first_channel_num', 'bias': '(False)'}), '(first_channel_num, first_channel_num, 3, 1, 1, groups=\n first_channel_num, bias=False)\n', (4456, 4546), True, 'import torch.nn as nn\n'), ((4611, 4629), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (4625, 4629), True, 'import torch.nn as nn\n'), ((4660, 4670), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (4668, 4670), True, 'import torch.nn as nn\n'), ((5577, 5641), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel_num', 'last_channel_num', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(in_channel_num, last_channel_num, 1, 1, 0, bias=False)\n', (5586, 5641), True, 'import torch.nn as nn\n'), ((5686, 5718), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['last_channel_num'], {}), '(last_channel_num)\n', (5700, 5718), True, 'import torch.nn as nn\n'), ((5763, 5773), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (5771, 5773), True, 'import torch.nn as nn\n'), ((7442, 7460), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (7451, 7460), False, 'import math\n')]
|
import logging
import logging.handlers
def set_logger(header,fn):
logger = logging.getLogger(header)
handler1 = logging.StreamHandler()
handler2 = logging.handlers.RotatingFileHandler(filename=fn, maxBytes=10000000, backupCount=10)
logger.setLevel(logging.DEBUG)
handler1.setLevel(logging.ERROR)
handler2.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(lineno)s %(levelname)s %(message)s")
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.handlers.RotatingFileHandler",
"logging.getLogger"
] |
[((80, 105), 'logging.getLogger', 'logging.getLogger', (['header'], {}), '(header)\n', (97, 105), False, 'import logging\n'), ((126, 149), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (147, 149), False, 'import logging\n'), ((165, 253), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', ([], {'filename': 'fn', 'maxBytes': '(10000000)', 'backupCount': '(10)'}), '(filename=fn, maxBytes=10000000,\n backupCount=10)\n', (201, 253), False, 'import logging\n'), ((385, 463), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)s %(lineno)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(name)s %(lineno)s %(levelname)s %(message)s')\n", (402, 463), False, 'import logging\n')]
|
from pygame import image, transform
class TileLoader:
def __init__(self, tile_size):
self.images = [
transform.scale(image.load('assets/water-deep.png'), (tile_size, tile_size)),
transform.scale(image.load('assets/water-shallow.png'), (tile_size, tile_size)),
transform.scale(image.load('assets/sand.png'), (tile_size, tile_size)),
transform.scale(image.load('assets/grass.png'), (tile_size, tile_size)),
transform.scale(image.load('assets/rock.png'), (tile_size, tile_size)),
transform.scale(image.load('assets/lava.png'), (tile_size, tile_size)),
]
def image(self, tile):
return self.images[tile]
|
[
"pygame.image.load"
] |
[((142, 177), 'pygame.image.load', 'image.load', (['"""assets/water-deep.png"""'], {}), "('assets/water-deep.png')\n", (152, 177), False, 'from pygame import image, transform\n'), ((232, 270), 'pygame.image.load', 'image.load', (['"""assets/water-shallow.png"""'], {}), "('assets/water-shallow.png')\n", (242, 270), False, 'from pygame import image, transform\n'), ((325, 354), 'pygame.image.load', 'image.load', (['"""assets/sand.png"""'], {}), "('assets/sand.png')\n", (335, 354), False, 'from pygame import image, transform\n'), ((409, 439), 'pygame.image.load', 'image.load', (['"""assets/grass.png"""'], {}), "('assets/grass.png')\n", (419, 439), False, 'from pygame import image, transform\n'), ((494, 523), 'pygame.image.load', 'image.load', (['"""assets/rock.png"""'], {}), "('assets/rock.png')\n", (504, 523), False, 'from pygame import image, transform\n'), ((578, 607), 'pygame.image.load', 'image.load', (['"""assets/lava.png"""'], {}), "('assets/lava.png')\n", (588, 607), False, 'from pygame import image, transform\n')]
|
"""
This platform provides binary sensors for OpenUV data.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.openuv/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.components.openuv import (
BINARY_SENSORS, DATA_OPENUV_CLIENT, DATA_PROTECTION_WINDOW, DOMAIN,
TOPIC_UPDATE, TYPE_PROTECTION_WINDOW, OpenUvEntity)
from homeassistant.util.dt import as_local, parse_datetime, utcnow
DEPENDENCIES = ['openuv']
_LOGGER = logging.getLogger(__name__)
ATTR_PROTECTION_WINDOW_STARTING_TIME = 'start_time'
ATTR_PROTECTION_WINDOW_STARTING_UV = 'start_uv'
ATTR_PROTECTION_WINDOW_ENDING_TIME = 'end_time'
ATTR_PROTECTION_WINDOW_ENDING_UV = 'end_uv'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up an OpenUV sensor based on existing config."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up an OpenUV sensor based on a config entry."""
openuv = hass.data[DOMAIN][DATA_OPENUV_CLIENT][entry.entry_id]
binary_sensors = []
for sensor_type in openuv.binary_sensor_conditions:
name, icon = BINARY_SENSORS[sensor_type]
binary_sensors.append(
OpenUvBinarySensor(
openuv, sensor_type, name, icon, entry.entry_id))
async_add_entities(binary_sensors, True)
class OpenUvBinarySensor(OpenUvEntity, BinarySensorDevice):
"""Define a binary sensor for OpenUV."""
def __init__(self, openuv, sensor_type, name, icon, entry_id):
"""Initialize the sensor."""
super().__init__(openuv)
self._entry_id = entry_id
self._icon = icon
self._latitude = openuv.client.latitude
self._longitude = openuv.client.longitude
self._name = name
self._dispatch_remove = None
self._sensor_type = sensor_type
self._state = None
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_{1}_{2}'.format(
self._latitude, self._longitude, self._sensor_type)
@callback
def _update_data(self):
"""Update the state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Register callbacks."""
self._dispatch_remove = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, self._update_data)
self.async_on_remove(self._dispatch_remove)
async def async_update(self):
"""Update the state."""
data = self.openuv.data[DATA_PROTECTION_WINDOW]['result']
if self._sensor_type == TYPE_PROTECTION_WINDOW:
self._state = parse_datetime(
data['from_time']) <= utcnow() <= parse_datetime(
data['to_time'])
self._attrs.update({
ATTR_PROTECTION_WINDOW_ENDING_TIME:
as_local(parse_datetime(data['to_time'])),
ATTR_PROTECTION_WINDOW_ENDING_UV: data['to_uv'],
ATTR_PROTECTION_WINDOW_STARTING_UV: data['from_uv'],
ATTR_PROTECTION_WINDOW_STARTING_TIME:
as_local(parse_datetime(data['from_time'])),
})
|
[
"homeassistant.util.dt.utcnow",
"homeassistant.util.dt.parse_datetime",
"homeassistant.helpers.dispatcher.async_dispatcher_connect",
"logging.getLogger"
] |
[((672, 699), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (689, 699), False, 'import logging\n'), ((2839, 2907), 'homeassistant.helpers.dispatcher.async_dispatcher_connect', 'async_dispatcher_connect', (['self.hass', 'TOPIC_UPDATE', 'self._update_data'], {}), '(self.hass, TOPIC_UPDATE, self._update_data)\n', (2863, 2907), False, 'from homeassistant.helpers.dispatcher import async_dispatcher_connect\n'), ((3188, 3221), 'homeassistant.util.dt.parse_datetime', 'parse_datetime', (["data['from_time']"], {}), "(data['from_time'])\n", (3202, 3221), False, 'from homeassistant.util.dt import as_local, parse_datetime, utcnow\n'), ((3242, 3250), 'homeassistant.util.dt.utcnow', 'utcnow', ([], {}), '()\n', (3248, 3250), False, 'from homeassistant.util.dt import as_local, parse_datetime, utcnow\n'), ((3254, 3285), 'homeassistant.util.dt.parse_datetime', 'parse_datetime', (["data['to_time']"], {}), "(data['to_time'])\n", (3268, 3285), False, 'from homeassistant.util.dt import as_local, parse_datetime, utcnow\n'), ((3421, 3452), 'homeassistant.util.dt.parse_datetime', 'parse_datetime', (["data['to_time']"], {}), "(data['to_time'])\n", (3435, 3452), False, 'from homeassistant.util.dt import as_local, parse_datetime, utcnow\n'), ((3672, 3705), 'homeassistant.util.dt.parse_datetime', 'parse_datetime', (["data['from_time']"], {}), "(data['from_time'])\n", (3686, 3705), False, 'from homeassistant.util.dt import as_local, parse_datetime, utcnow\n')]
|
from os import environ
from shlex import split
from subprocess import DEVNULL, CalledProcessError, Popen, TimeoutExpired, run
from pitop.common.current_session_info import get_first_display
from pitop.common.logger import PTLogger
def __get_env():
env_plus_display = environ.copy()
first_display = get_first_display()
if first_display is not None:
env_plus_display["DISPLAY"] = first_display
return env_plus_display
def run_command_background(command_str: str, print_output=False) -> Popen:
PTLogger.debug("Function: run_command_background(command_str=%s)" % command_str)
return Popen(
split(command_str),
env=__get_env(),
stderr=None if print_output else DEVNULL,
stdout=None if print_output else DEVNULL,
)
def run_command(
command_str: str,
timeout: int,
check: bool = True,
capture_output: bool = True,
log_errors: bool = True,
lower_priority: bool = False,
) -> str:
PTLogger.debug(
f"Function: run_command(command_str={command_str}, timeout={timeout}, check={check}, capture_output={capture_output}, \
log_errors={log_errors}, lower_priority={lower_priority})"
)
resp_stdout = None
if lower_priority:
command_str = "nice -n 10 " + command_str
try:
resp = run(
split(command_str),
check=check,
capture_output=capture_output,
timeout=timeout,
env=__get_env(),
)
except (CalledProcessError, TimeoutExpired) as e:
if log_errors:
PTLogger.error(str(e))
raise e
except Exception as e:
if log_errors:
PTLogger.error(str(e))
return ""
if capture_output:
resp_stdout = str(resp.stdout, "utf8")
resp_stderr = str(resp.stderr, "utf8")
PTLogger.debug(
f"run_command("
f"command_str='{command_str}', "
f"timeout={timeout}, "
f"check='{check}', "
f"capture_output='{capture_output}'"
f") stdout:\n{resp_stdout}"
)
PTLogger.debug(
f"run_command("
f"command_str='{command_str}', "
f"timeout={timeout}, "
f"check='{check}', "
f"capture_output='{capture_output}'"
f") stderr:\n{resp_stderr}"
)
if not check:
PTLogger.debug(
f"run_command("
f"command_str='{command_str}', "
f"timeout={timeout}, "
f"check='{check}', "
f"capture_output='{capture_output}'"
f") exit code: {resp.returncode}"
)
return resp_stdout
|
[
"pitop.common.current_session_info.get_first_display",
"os.environ.copy",
"pitop.common.logger.PTLogger.debug",
"shlex.split"
] |
[((274, 288), 'os.environ.copy', 'environ.copy', ([], {}), '()\n', (286, 288), False, 'from os import environ\n'), ((309, 328), 'pitop.common.current_session_info.get_first_display', 'get_first_display', ([], {}), '()\n', (326, 328), False, 'from pitop.common.current_session_info import get_first_display\n'), ((524, 609), 'pitop.common.logger.PTLogger.debug', 'PTLogger.debug', (["('Function: run_command_background(command_str=%s)' % command_str)"], {}), "('Function: run_command_background(command_str=%s)' % command_str\n )\n", (538, 609), False, 'from pitop.common.logger import PTLogger\n'), ((976, 1187), 'pitop.common.logger.PTLogger.debug', 'PTLogger.debug', (['f"""Function: run_command(command_str={command_str}, timeout={timeout}, check={check}, capture_output={capture_output}, log_errors={log_errors}, lower_priority={lower_priority})"""'], {}), "(\n f'Function: run_command(command_str={command_str}, timeout={timeout}, check={check}, capture_output={capture_output}, log_errors={log_errors}, lower_priority={lower_priority})'\n )\n", (990, 1187), False, 'from pitop.common.logger import PTLogger\n'), ((632, 650), 'shlex.split', 'split', (['command_str'], {}), '(command_str)\n', (637, 650), False, 'from shlex import split\n'), ((1849, 2015), 'pitop.common.logger.PTLogger.debug', 'PTLogger.debug', (['f"""run_command(command_str=\'{command_str}\', timeout={timeout}, check=\'{check}\', capture_output=\'{capture_output}\') stdout:\n{resp_stdout}"""'], {}), '(\n f"""run_command(command_str=\'{command_str}\', timeout={timeout}, check=\'{check}\', capture_output=\'{capture_output}\') stdout:\n{resp_stdout}"""\n )\n', (1863, 2015), False, 'from pitop.common.logger import PTLogger\n'), ((2113, 2279), 'pitop.common.logger.PTLogger.debug', 'PTLogger.debug', (['f"""run_command(command_str=\'{command_str}\', timeout={timeout}, check=\'{check}\', capture_output=\'{capture_output}\') stderr:\n{resp_stderr}"""'], {}), '(\n f"""run_command(command_str=\'{command_str}\', timeout={timeout}, check=\'{check}\', capture_output=\'{capture_output}\') stderr:\n{resp_stderr}"""\n )\n', (2127, 2279), False, 'from pitop.common.logger import PTLogger\n'), ((2396, 2565), 'pitop.common.logger.PTLogger.debug', 'PTLogger.debug', (['f"""run_command(command_str=\'{command_str}\', timeout={timeout}, check=\'{check}\', capture_output=\'{capture_output}\') exit code: {resp.returncode}"""'], {}), '(\n f"run_command(command_str=\'{command_str}\', timeout={timeout}, check=\'{check}\', capture_output=\'{capture_output}\') exit code: {resp.returncode}"\n )\n', (2410, 2565), False, 'from pitop.common.logger import PTLogger\n'), ((1334, 1352), 'shlex.split', 'split', (['command_str'], {}), '(command_str)\n', (1339, 1352), False, 'from shlex import split\n')]
|
# -*- coding: utf-8 -*-
from convlab2.util.multiwoz.paraphrase_span_detection import phrase_idx_utt
def paraphrase_span_detection(new_text,span_info):
new_words=new_text.split()
new_span_info=[]
for span in span_info:
span_words=span[2].split()
result=phrase_idx_utt(span_words,new_words)
if result is not None:
max_start,max_end=result
new_span_info.append([span[0],span[1],' '.join(new_words[max_start:max_end+1]),max_start,max_end])
return new_span_info
def span2tuple(span_info):
t=[]
for span in span_info:
t.append((span[0].split('-')[1],span[0].split('-')[0],span[1],span[2]))
return t
|
[
"convlab2.util.multiwoz.paraphrase_span_detection.phrase_idx_utt"
] |
[((289, 326), 'convlab2.util.multiwoz.paraphrase_span_detection.phrase_idx_utt', 'phrase_idx_utt', (['span_words', 'new_words'], {}), '(span_words, new_words)\n', (303, 326), False, 'from convlab2.util.multiwoz.paraphrase_span_detection import phrase_idx_utt\n')]
|
# Python 2.7 requires pygame
import pygame, sys
pygame.init()
dots = [[221, 432], [225, 331], [133, 342], [141, 310],
[51, 230], [74, 217], [58, 153], [114, 164],
[123, 135], [176, 190], [159, 77], [193, 93],
[230, 28], [267, 93], [301, 77], [284, 190],
[327, 135], [336, 164], [402, 153], [386, 217],
[409, 230], [319, 310], [327, 342], [233, 331],
[237, 432]]
screen = pygame.display.set_mode([640,480])
screen.fill([255, 255, 255])
pygame.draw.lines(screen, [255,0,0],True, dots, 2)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
|
[
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.flip",
"pygame.draw.lines"
] |
[((49, 62), 'pygame.init', 'pygame.init', ([], {}), '()\n', (60, 62), False, 'import pygame, sys\n'), ((373, 408), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[640, 480]'], {}), '([640, 480])\n', (396, 408), False, 'import pygame, sys\n'), ((438, 491), 'pygame.draw.lines', 'pygame.draw.lines', (['screen', '[255, 0, 0]', '(True)', 'dots', '(2)'], {}), '(screen, [255, 0, 0], True, dots, 2)\n', (455, 491), False, 'import pygame, sys\n'), ((489, 510), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (508, 510), False, 'import pygame, sys\n'), ((628, 641), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (639, 641), False, 'import pygame, sys\n'), ((556, 574), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (572, 574), False, 'import pygame, sys\n')]
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import simpy, numpy
from mds_sim import *
from rvs import *
from patch import *
# ######################## Models ######################## #
def ar_ub_fj(n, X):
return float(1/moment_i_n_k(1, n, n, X) )
def E_T_fj(ar, n, X):
# def max_cdf(x):
# return X.cdf(x)**n
# def max_moment(i):
# return mpmath.quad(lambda x: i*x**(i-1) * (1 - max_cdf(x) ), [0, mpmath.inf] )
# return PK(max_moment(1), max_moment(2), ar)
return PK(moment_i_n_k(1, n, n, X), moment_i_n_k(2, n, n, X), ar)
# ######################## Sim ######################## #
def test_fj(num_f_run, ar, n, serv, serv_dist_m):
E_T_f_sum = 0
for f in range(num_f_run):
log(WARNING, "ar= {}, n= {}, serv= {}, serv_dist_m= {}".format(ar, n, serv, serv_dist_m) )
env = simpy.Environment()
pg = PG(env, "pg", ar)
q = MDSQ("mdsq", env, n, range(n), serv, serv_dist_m)
pg.out = q
pg.init()
env.run(until=10*10*50000)
l = q.jsink.st_l
if len(l): E_T_f_sum += float(sum(l) )/len(l)
total_n_wins = sum([n for i, n in q.jsink.qid__num_win_map.items() ] )
print("pg.n_sent= {}, total_n_wins= {}".format(pg.n_sent, total_n_wins) )
qid__win_freq_map = {i:float(n)/total_n_wins for i, n in q.jsink.qid__num_win_map.items() }
print("qid__win_freq_map= {}".format(pprint.pformat(qid__win_freq_map) ) )
E_T = E_T_f_sum/num_f_run
print(">> E_T= {}".format(E_T) )
return E_T
def plot_fj():
n = 2
serv = "Pareto" # "TPareto"
l, u, a = 1, 10**6, 2
if serv == "TPareto":
X = TPareto(l, u, a)
serv_dist_m = {'l':l, 'u':u, 'a':a}
elif serv == "Pareto":
X = Pareto(l, a)
serv_dist_m = {'loc':l, 'a':a}
ar_ub = ar_ub_fj(n, X)
log(WARNING, "n= {}, serv= {}, serv_dist_m= {}, ar_ub= {}".format(n, serv, serv_dist_m, ar_ub) )
E_T_l, E_T_sim_l = [], []
num_f_run = 1
sim = False
if serv == "TPareto":
if n == 22:
pass
else:
sim = True
elif serv == "Pareto":
if n == 22:
E_T_sim_l= [
3.7875159802925884,
3.6594505295950768,
4.223943206950012,
4.589334674521958,
6.524796278389641,
5.64633614293259,
7.252958280015537,
8.035109860019876,
8.463351261567757,
39.12300569764332,
11.573032446153153,
13.929789522860153,
14.965936063862987,
20.40743954754556,
27.105625093446594]
else:
sim = True
ar_l = []
for ar in numpy.linspace(0.05, ar_ub, 15):
ar_l.append(ar)
if sim:
E_T_sim_l.append(test_fj(num_f_run, ar, n, serv, serv_dist_m) )
E_T_l.append(E_T_fj(ar, n, X) )
log(WARNING, "E_T_sim_l= {}".format(pprint.pformat(E_T_sim_l) ) )
plot.plot(ar_l, E_T_sim_l, label=r'sim, n={}'.format(n), color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.plot(ar_l, E_T_l, label=r'n={}'.format(n), color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')
plot.legend(prop={'size':12})
plot.xlabel(r'Arrival rate $\lambda$ (Request/s)', fontsize=12)
plot.ylabel(r'Average download time (s)', fontsize=12)
if serv == "TPareto":
serv_in_latex = r'TPareto(l={}, u={}, a={})'.format(l, u, a)
elif serv == "Pareto":
serv_in_latex = r'Pareto(l={}, a={})'.format(l, a)
plot.title(r'$X \sim {}$, $n= {}$'.format(serv_in_latex, n) )
fig = plot.gcf()
def_size = fig.get_size_inches()
fig.set_size_inches(def_size[0]/1.2, def_size[1]/1.2)
fig.tight_layout()
plot.savefig("plot_fj_n_{}.pdf".format(n) )
fig.clear()
log(WARNING, "done; n= {}".format(n) )
if __name__ == "__main__":
plot_fj()
|
[
"matplotlib.pyplot.legend",
"matplotlib.use",
"numpy.linspace",
"simpy.Environment",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((2525, 2556), 'numpy.linspace', 'numpy.linspace', (['(0.05)', 'ar_ub', '(15)'], {}), '(0.05, ar_ub, 15)\n', (2539, 2556), False, 'import simpy, numpy\n'), ((3030, 3060), 'matplotlib.pyplot.legend', 'plot.legend', ([], {'prop': "{'size': 12}"}), "(prop={'size': 12})\n", (3041, 3060), True, 'import matplotlib.pyplot as plot\n'), ((3062, 3125), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Arrival rate $\\\\lambda$ (Request/s)"""'], {'fontsize': '(12)'}), "('Arrival rate $\\\\lambda$ (Request/s)', fontsize=12)\n", (3073, 3125), True, 'import matplotlib.pyplot as plot\n'), ((3128, 3181), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Average download time (s)"""'], {'fontsize': '(12)'}), "('Average download time (s)', fontsize=12)\n", (3139, 3181), True, 'import matplotlib.pyplot as plot\n'), ((3424, 3434), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (3432, 3434), True, 'import matplotlib.pyplot as plot\n'), ((840, 859), 'simpy.Environment', 'simpy.Environment', ([], {}), '()\n', (857, 859), False, 'import simpy, numpy\n')]
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: Gear Generator
# Purpose: Just for fun
#
# Author: <NAME>
# Email: <EMAIL>
# Web: https://sites.google.com/view/interpolation/home
#
# Created: 25/06/2021
# Copyright: (c) astros 2021
# Licence: MIT
# Based on: Gear Drawing with Bézier Curves (https://www.arc.id.au/GearDrawing.html)
# -------------------------------------------------------------------------------
#
# Reelases:
# 0.1: First Release
# ______________________________________________________________________________________
import sys
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QMainWindow
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MplCanvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=4, dpi=150):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
super(MplCanvas, self).__init__(fig)
class MplWidget(QWidget):
def __init__(self, parent=None, values=0):
QWidget.__init__(self, parent)
gear_location = values[0]
gear_outline = values[1]
fig = Figure()
fig.tight_layout()
self.canvas = MplCanvas(fig)
vertical_layout = QVBoxLayout()
vertical_layout.addWidget(self.canvas)
self.canvas.axes.clear()
if isinstance(gear_outline, list):
x = 0
y = 1
for i in range(len(gear_outline)):
x_gear = []
y_gear = []
rr = False # Switch to detect difference between gear format and Radius (shaft or rim) format data
# print('valor I: ', i)
for j in range(len(gear_outline[i][1])):
# print('valor J:', j)
if isinstance(gear_outline[i][1][j], list) and rr == False:
x_gear.append(gear_outline[i][1][j][x])
y_gear.append(gear_outline[i][1][j][y])
elif gear_outline[i][1][j] == 'R':
radio = gear_outline[i][1][j + 1]
x_location = gear_location[i][1][x]
y_location = gear_location[i][1][y]
rr = True
self.canvas.axes.add_patch(plt.Circle((x_location, y_location), radio, fill=False))
self.canvas.axes.plot(x_gear, y_gear)
print('print value ' + str(i))
print(x_gear)
# self.canvas.axes = self.canvas.figure.add_subplot(111)
# self.canvas.axes.plot([0,1,2,3,4], [10,1,20,3,40])
self.setLayout(vertical_layout)
# class MainWindow(QMainWindow):
# def __init__(self, *args, **kwargs):
# super(MainWindow, self).__init__(*args, **kwargs)
# # Create the maptlotlib FigureCanvas object,
# # which defines a single set of axes as self.axes.
# sc = MplWidget(self, width=5, height=4, dpi=100)
# sc.axes.plot([0,1,2,3,4], [10,1,20,3,40])
# self.setCentralWidget(sc)
# class MainWindow(QMainWindow):
# def __init__(self):
# super(MainWindow, self).__init__()
# self.layout = QVBoxLayout()
# self.mplW = MplWidget()
# self.layout.addWidget(self.mplW)
# app = QApplication(sys.argv)
# window = MainWindow()
# window.show()
# app.exec()
|
[
"PyQt5.QtWidgets.QVBoxLayout",
"matplotlib.figure.Figure",
"PyQt5.QtWidgets.QWidget.__init__",
"matplotlib.pyplot.Circle"
] |
[((1072, 1112), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (1078, 1112), False, 'from matplotlib.figure import Figure\n'), ((1289, 1319), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1305, 1319), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QMainWindow\n'), ((1409, 1417), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (1415, 1417), False, 'from matplotlib.figure import Figure\n'), ((1513, 1526), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (1524, 1526), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QMainWindow\n'), ((2590, 2645), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x_location, y_location)', 'radio'], {'fill': '(False)'}), '((x_location, y_location), radio, fill=False)\n', (2600, 2645), True, 'import matplotlib.pyplot as plt\n')]
|
"""
A file to contain specific logic to handle version upgrades in Kolibri.
"""
import logging
import os
from le_utils.constants import content_kinds
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import exists
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy.exc import DatabaseError
from kolibri.core.content.apps import KolibriContentConfig
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.utils.annotation import set_content_visibility_from_disk
from kolibri.core.content.utils.channel_import import FutureSchemaError
from kolibri.core.content.utils.channel_import import import_channel_from_local_db
from kolibri.core.content.utils.channel_import import InvalidSchemaVersionError
from kolibri.core.content.utils.channels import get_channel_ids_for_content_database_dir
from kolibri.core.content.utils.paths import get_content_database_dir_path
from kolibri.core.content.utils.paths import get_content_database_file_path
from kolibri.core.content.utils.sqlalchemybridge import Bridge
from kolibri.core.upgrade import version_upgrade
logger = logging.getLogger(__name__)
# Only bother doing this if we are moving from
# a version of Kolibri before we imported
# content databases.
@version_upgrade(old_version="<0.6.0")
def import_external_content_dbs():
"""
If we are potentially moving from a version of Kolibri that did not import its content data,
scan through the content database folder for all channel content databases,
and pull the data from each database if we have not already imported it.
"""
channel_ids = get_channel_ids_for_content_database_dir(
get_content_database_dir_path()
)
for channel_id in channel_ids:
if not ChannelMetadata.objects.filter(id=channel_id).exists():
try:
import_channel_from_local_db(channel_id)
set_content_visibility_from_disk(channel_id)
except (InvalidSchemaVersionError, FutureSchemaError):
logger.warning(
"Tried to import channel {channel_id}, but database file was incompatible".format(
channel_id=channel_id
)
)
except DatabaseError:
logger.warning(
"Tried to import channel {channel_id}, but database file was corrupted.".format(
channel_id=channel_id
)
)
# This issue was fixed by 0.9.2, so only do this
# when upgrading from versions prior to this.
@version_upgrade(old_version="<0.9.2")
def fix_multiple_trees_with_tree_id1():
# Do a check for improperly imported ContentNode trees
# These trees have been naively imported, and so there are multiple trees
# with tree_ids set to 1. Just check the root nodes to reduce the query size.
tree_id_one_channel_ids = ContentNode.objects.filter(
parent=None, tree_id=1
).values_list("channel_id", flat=True)
if len(tree_id_one_channel_ids) > 1:
logger.warning("Improperly imported channels discovered")
# There is more than one channel with a tree_id of 1
# Find which channel has the most content nodes, and then delete and reimport the rest.
channel_sizes = {}
for channel_id in tree_id_one_channel_ids:
channel_sizes[channel_id] = ContentNode.objects.filter(
channel_id=channel_id
).count()
# Get sorted list of ids by increasing number of nodes
sorted_channel_ids = sorted(channel_sizes, key=channel_sizes.get)
# Loop through all but the largest channel, delete and reimport
count = 0
for channel_id in sorted_channel_ids[:-1]:
# Double check that we have a content db to import from before deleting any metadata
if os.path.exists(get_content_database_file_path(channel_id)):
logger.warning(
"Deleting and reimporting channel metadata for {channel_id}".format(
channel_id=channel_id
)
)
ChannelMetadata.objects.get(
id=channel_id
).delete_content_tree_and_files()
import_channel_from_local_db(channel_id)
logger.info(
"Successfully reimported channel metadata for {channel_id}".format(
channel_id=channel_id
)
)
count += 1
else:
logger.warning(
"Attempted to reimport channel metadata for channel {channel_id} but no content database found".format(
channel_id=channel_id
)
)
if count:
logger.info(
"Successfully reimported channel metadata for {count} channels".format(
count=count
)
)
failed_count = len(sorted_channel_ids) - 1 - count
if failed_count:
logger.warning(
"Failed to reimport channel metadata for {count} channels".format(
count=failed_count
)
)
# This was introduced in 0.12.4, so only annotate
# when upgrading from versions prior to this.
@version_upgrade(old_version="<0.12.4")
def update_num_coach_contents():
"""
Function to set num_coach_content on all topic trees to account for
those that were imported before annotations were performed
"""
bridge = Bridge(app_name=KolibriContentConfig.label)
ContentNodeClass = bridge.get_class(ContentNode)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
child = ContentNodeTable.alias()
logger.info("Updating num_coach_content on existing channels")
# start a transaction
trans = connection.begin()
# Update all leaf ContentNodes to have num_coach_content to 1 or 0
connection.execute(
ContentNodeTable.update()
.where(
# That are not topics
ContentNodeTable.c.kind
!= content_kinds.TOPIC
)
.values(num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()))
)
# Expression to capture all available child nodes of a contentnode
available_nodes = select([child.c.available]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of coach contents for each child node
# of a contentnode
coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
for channel_id in ChannelMetadata.objects.all().values_list("id", flat=True):
node_depth = (
bridge.session.query(func.max(ContentNodeClass.level))
.filter_by(channel_id=channel_id)
.scalar()
)
# Go from the deepest level to the shallowest
for level in range(node_depth, 0, -1):
# Only modify topic availability here
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level - 1,
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
# Because we have set availability to False on all topics as a starting point
# we only need to make updates to topics with available children.
.where(exists(available_nodes))
.values(num_coach_contents=coach_content_num)
)
# commit the transaction
trans.commit()
bridge.end()
# This was introduced in 0.13.0, so only annotate
# when upgrading from versions prior to this.
@version_upgrade(old_version="<0.13.0")
def update_on_device_resources():
"""
Function to set on_device_resource on all topic trees to account for
those that were imported before annotations were performed
"""
bridge = Bridge(app_name=KolibriContentConfig.label)
ContentNodeClass = bridge.get_class(ContentNode)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
child = ContentNodeTable.alias()
logger.info("Updating on_device_resource on existing channels")
# start a transaction
trans = connection.begin()
# Update all leaf ContentNodes to have on_device_resource to 1 or 0
connection.execute(
ContentNodeTable.update()
.where(
# That are not topics
ContentNodeTable.c.kind
!= content_kinds.TOPIC
)
.values(on_device_resources=cast(ContentNodeTable.c.available, Integer()))
)
# Expression to capture all available child nodes of a contentnode
available_nodes = select([child.c.available]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of coach contents for each child node
# of a contentnode
on_device_num = select([func.sum(child.c.on_device_resources)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
for channel_id in ChannelMetadata.objects.all().values_list("id", flat=True):
node_depth = (
bridge.session.query(func.max(ContentNodeClass.level))
.filter_by(channel_id=channel_id)
.scalar()
)
# Go from the deepest level to the shallowest
for level in range(node_depth, 0, -1):
# Only modify topic availability here
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level - 1,
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
# Because we have set availability to False on all topics as a starting point
# we only need to make updates to topics with available children.
.where(exists(available_nodes))
.values(on_device_resources=on_device_num)
)
# commit the transaction
trans.commit()
bridge.end()
|
[
"kolibri.core.content.models.ContentNode.objects.filter",
"sqlalchemy.func.sum",
"kolibri.core.content.models.ChannelMetadata.objects.get",
"kolibri.core.content.models.ChannelMetadata.objects.all",
"sqlalchemy.exists",
"sqlalchemy.and_",
"sqlalchemy.select",
"sqlalchemy.Integer",
"sqlalchemy.func.max",
"kolibri.core.content.utils.channel_import.import_channel_from_local_db",
"kolibri.core.content.models.ChannelMetadata.objects.filter",
"kolibri.core.content.utils.annotation.set_content_visibility_from_disk",
"kolibri.core.content.utils.paths.get_content_database_dir_path",
"kolibri.core.upgrade.version_upgrade",
"kolibri.core.content.utils.paths.get_content_database_file_path",
"logging.getLogger",
"kolibri.core.content.utils.sqlalchemybridge.Bridge"
] |
[((1216, 1243), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1233, 1243), False, 'import logging\n'), ((1357, 1394), 'kolibri.core.upgrade.version_upgrade', 'version_upgrade', ([], {'old_version': '"""<0.6.0"""'}), "(old_version='<0.6.0')\n", (1372, 1394), False, 'from kolibri.core.upgrade import version_upgrade\n'), ((2686, 2723), 'kolibri.core.upgrade.version_upgrade', 'version_upgrade', ([], {'old_version': '"""<0.9.2"""'}), "(old_version='<0.9.2')\n", (2701, 2723), False, 'from kolibri.core.upgrade import version_upgrade\n'), ((5479, 5517), 'kolibri.core.upgrade.version_upgrade', 'version_upgrade', ([], {'old_version': '"""<0.12.4"""'}), "(old_version='<0.12.4')\n", (5494, 5517), False, 'from kolibri.core.upgrade import version_upgrade\n'), ((8245, 8283), 'kolibri.core.upgrade.version_upgrade', 'version_upgrade', ([], {'old_version': '"""<0.13.0"""'}), "(old_version='<0.13.0')\n", (8260, 8283), False, 'from kolibri.core.upgrade import version_upgrade\n'), ((5715, 5758), 'kolibri.core.content.utils.sqlalchemybridge.Bridge', 'Bridge', ([], {'app_name': 'KolibriContentConfig.label'}), '(app_name=KolibriContentConfig.label)\n', (5721, 5758), False, 'from kolibri.core.content.utils.sqlalchemybridge import Bridge\n'), ((8483, 8526), 'kolibri.core.content.utils.sqlalchemybridge.Bridge', 'Bridge', ([], {'app_name': 'KolibriContentConfig.label'}), '(app_name=KolibriContentConfig.label)\n', (8489, 8526), False, 'from kolibri.core.content.utils.sqlalchemybridge import Bridge\n'), ((1768, 1799), 'kolibri.core.content.utils.paths.get_content_database_dir_path', 'get_content_database_dir_path', ([], {}), '()\n', (1797, 1799), False, 'from kolibri.core.content.utils.paths import get_content_database_dir_path\n'), ((6564, 6639), 'sqlalchemy.and_', 'and_', (['(child.c.available == True)', '(ContentNodeTable.c.id == child.c.parent_id)'], {}), '(child.c.available == True, ContentNodeTable.c.id == child.c.parent_id)\n', (6568, 6639), False, 'from sqlalchemy import and_\n'), ((6881, 6956), 'sqlalchemy.and_', 'and_', (['(child.c.available == True)', '(ContentNodeTable.c.id == child.c.parent_id)'], {}), '(child.c.available == True, ContentNodeTable.c.id == child.c.parent_id)\n', (6885, 6956), False, 'from sqlalchemy import and_\n'), ((9331, 9406), 'sqlalchemy.and_', 'and_', (['(child.c.available == True)', '(ContentNodeTable.c.id == child.c.parent_id)'], {}), '(child.c.available == True, ContentNodeTable.c.id == child.c.parent_id)\n', (9335, 9406), False, 'from sqlalchemy import and_\n'), ((9645, 9720), 'sqlalchemy.and_', 'and_', (['(child.c.available == True)', '(ContentNodeTable.c.id == child.c.parent_id)'], {}), '(child.c.available == True, ContentNodeTable.c.id == child.c.parent_id)\n', (9649, 9720), False, 'from sqlalchemy import and_\n'), ((3013, 3063), 'kolibri.core.content.models.ContentNode.objects.filter', 'ContentNode.objects.filter', ([], {'parent': 'None', 'tree_id': '(1)'}), '(parent=None, tree_id=1)\n', (3039, 3063), False, 'from kolibri.core.content.models import ContentNode\n'), ((6521, 6548), 'sqlalchemy.select', 'select', (['[child.c.available]'], {}), '([child.c.available])\n', (6527, 6548), False, 'from sqlalchemy import select\n'), ((7029, 7058), 'kolibri.core.content.models.ChannelMetadata.objects.all', 'ChannelMetadata.objects.all', ([], {}), '()\n', (7056, 7058), False, 'from kolibri.core.content.models import ChannelMetadata\n'), ((9288, 9315), 'sqlalchemy.select', 'select', (['[child.c.available]'], {}), '([child.c.available])\n', (9294, 9315), False, 'from sqlalchemy import select\n'), ((9793, 9822), 'kolibri.core.content.models.ChannelMetadata.objects.all', 'ChannelMetadata.objects.all', ([], {}), '()\n', (9820, 9822), False, 'from kolibri.core.content.models import ChannelMetadata\n'), ((1945, 1985), 'kolibri.core.content.utils.channel_import.import_channel_from_local_db', 'import_channel_from_local_db', (['channel_id'], {}), '(channel_id)\n', (1973, 1985), False, 'from kolibri.core.content.utils.channel_import import import_channel_from_local_db\n'), ((2002, 2046), 'kolibri.core.content.utils.annotation.set_content_visibility_from_disk', 'set_content_visibility_from_disk', (['channel_id'], {}), '(channel_id)\n', (2034, 2046), False, 'from kolibri.core.content.utils.annotation import set_content_visibility_from_disk\n'), ((3991, 4033), 'kolibri.core.content.utils.paths.get_content_database_file_path', 'get_content_database_file_path', (['channel_id'], {}), '(channel_id)\n', (4021, 4033), False, 'from kolibri.core.content.utils.paths import get_content_database_file_path\n'), ((4388, 4428), 'kolibri.core.content.utils.channel_import.import_channel_from_local_db', 'import_channel_from_local_db', (['channel_id'], {}), '(channel_id)\n', (4416, 4428), False, 'from kolibri.core.content.utils.channel_import import import_channel_from_local_db\n'), ((1856, 1901), 'kolibri.core.content.models.ChannelMetadata.objects.filter', 'ChannelMetadata.objects.filter', ([], {'id': 'channel_id'}), '(id=channel_id)\n', (1886, 1901), False, 'from kolibri.core.content.models import ChannelMetadata\n'), ((3497, 3546), 'kolibri.core.content.models.ContentNode.objects.filter', 'ContentNode.objects.filter', ([], {'channel_id': 'channel_id'}), '(channel_id=channel_id)\n', (3523, 3546), False, 'from kolibri.core.content.models import ContentNode\n'), ((6409, 6418), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (6416, 6418), False, 'from sqlalchemy import Integer\n'), ((6827, 6863), 'sqlalchemy.func.sum', 'func.sum', (['child.c.num_coach_contents'], {}), '(child.c.num_coach_contents)\n', (6835, 6863), False, 'from sqlalchemy import func\n'), ((9176, 9185), 'sqlalchemy.Integer', 'Integer', ([], {}), '()\n', (9183, 9185), False, 'from sqlalchemy import Integer\n'), ((9590, 9627), 'sqlalchemy.func.sum', 'func.sum', (['child.c.on_device_resources'], {}), '(child.c.on_device_resources)\n', (9598, 9627), False, 'from sqlalchemy import func\n'), ((4259, 4301), 'kolibri.core.content.models.ChannelMetadata.objects.get', 'ChannelMetadata.objects.get', ([], {'id': 'channel_id'}), '(id=channel_id)\n', (4286, 4301), False, 'from kolibri.core.content.models import ChannelMetadata\n'), ((7146, 7178), 'sqlalchemy.func.max', 'func.max', (['ContentNodeClass.level'], {}), '(ContentNodeClass.level)\n', (7154, 7178), False, 'from sqlalchemy import func\n'), ((7978, 8001), 'sqlalchemy.exists', 'exists', (['available_nodes'], {}), '(available_nodes)\n', (7984, 8001), False, 'from sqlalchemy import exists\n'), ((9910, 9942), 'sqlalchemy.func.max', 'func.max', (['ContentNodeClass.level'], {}), '(ContentNodeClass.level)\n', (9918, 9942), False, 'from sqlalchemy import func\n'), ((10742, 10765), 'sqlalchemy.exists', 'exists', (['available_nodes'], {}), '(available_nodes)\n', (10748, 10765), False, 'from sqlalchemy import exists\n'), ((7529, 7669), 'sqlalchemy.and_', 'and_', (['(ContentNodeTable.c.level == level - 1)', '(ContentNodeTable.c.channel_id == channel_id)', '(ContentNodeTable.c.kind == content_kinds.TOPIC)'], {}), '(ContentNodeTable.c.level == level - 1, ContentNodeTable.c.channel_id ==\n channel_id, ContentNodeTable.c.kind == content_kinds.TOPIC)\n', (7533, 7669), False, 'from sqlalchemy import and_\n'), ((10293, 10433), 'sqlalchemy.and_', 'and_', (['(ContentNodeTable.c.level == level - 1)', '(ContentNodeTable.c.channel_id == channel_id)', '(ContentNodeTable.c.kind == content_kinds.TOPIC)'], {}), '(ContentNodeTable.c.level == level - 1, ContentNodeTable.c.channel_id ==\n channel_id, ContentNodeTable.c.kind == content_kinds.TOPIC)\n', (10297, 10433), False, 'from sqlalchemy import and_\n')]
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers import preprocessing
class GrayscaleTest(tf.test.TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, [2, 512, 512, 1])
self.assertEqual(xs2.shape, [2, 512, 512, 3])
def test_in_tf_function(self):
xs = tf.cast(
tf.stack([2 * tf.ones((100, 100, 3)), tf.ones((100, 100, 3))], axis=0),
tf.float32,
)
# test 1
layer = preprocessing.Grayscale(
output_channels=1,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs1 = augment(xs)
# test 2
layer = preprocessing.Grayscale(
output_channels=3,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs2 = augment(xs)
self.assertEqual(xs1.shape, [2, 100, 100, 1])
self.assertEqual(xs2.shape, [2, 100, 100, 3])
def test_non_square_image(self):
xs = tf.cast(
tf.stack([2 * tf.ones((512, 1024, 3)), tf.ones((512, 1024, 3))], axis=0),
tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, [2, 512, 1024, 1])
self.assertEqual(xs2.shape, [2, 512, 1024, 3])
def test_in_single_image(self):
xs = tf.cast(
tf.ones((512, 512, 3)),
dtype=tf.float32,
)
layer = preprocessing.Grayscale(
output_channels=1,
)
xs1 = layer(xs, training=True)
layer = preprocessing.Grayscale(
output_channels=3,
)
xs2 = layer(xs, training=True)
self.assertEqual(xs1.shape, [512, 512, 1])
self.assertEqual(xs2.shape, [512, 512, 3])
|
[
"tensorflow.ones",
"keras_cv.layers.preprocessing.Grayscale"
] |
[((739, 764), 'tensorflow.ones', 'tf.ones', (['(2, 512, 512, 3)'], {}), '((2, 512, 512, 3))\n', (746, 764), True, 'import tensorflow as tf\n'), ((782, 824), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(1)'}), '(output_channels=1)\n', (805, 824), False, 'from keras_cv.layers import preprocessing\n'), ((904, 946), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(3)'}), '(output_channels=3)\n', (927, 946), False, 'from keras_cv.layers import preprocessing\n'), ((1328, 1370), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(1)'}), '(output_channels=1)\n', (1351, 1370), False, 'from keras_cv.layers import preprocessing\n'), ((1544, 1586), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(3)'}), '(output_channels=3)\n', (1567, 1586), False, 'from keras_cv.layers import preprocessing\n'), ((2032, 2074), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(1)'}), '(output_channels=1)\n', (2055, 2074), False, 'from keras_cv.layers import preprocessing\n'), ((2154, 2196), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(3)'}), '(output_channels=3)\n', (2177, 2196), False, 'from keras_cv.layers import preprocessing\n'), ((2522, 2564), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(1)'}), '(output_channels=1)\n', (2545, 2564), False, 'from keras_cv.layers import preprocessing\n'), ((2644, 2686), 'keras_cv.layers.preprocessing.Grayscale', 'preprocessing.Grayscale', ([], {'output_channels': '(3)'}), '(output_channels=3)\n', (2667, 2686), False, 'from keras_cv.layers import preprocessing\n'), ((2441, 2463), 'tensorflow.ones', 'tf.ones', (['(512, 512, 3)'], {}), '((512, 512, 3))\n', (2448, 2463), True, 'import tensorflow as tf\n'), ((1226, 1248), 'tensorflow.ones', 'tf.ones', (['(100, 100, 3)'], {}), '((100, 100, 3))\n', (1233, 1248), True, 'import tensorflow as tf\n'), ((1946, 1969), 'tensorflow.ones', 'tf.ones', (['(512, 1024, 3)'], {}), '((512, 1024, 3))\n', (1953, 1969), True, 'import tensorflow as tf\n'), ((1202, 1224), 'tensorflow.ones', 'tf.ones', (['(100, 100, 3)'], {}), '((100, 100, 3))\n', (1209, 1224), True, 'import tensorflow as tf\n'), ((1921, 1944), 'tensorflow.ones', 'tf.ones', (['(512, 1024, 3)'], {}), '((512, 1024, 3))\n', (1928, 1944), True, 'import tensorflow as tf\n')]
|
"""
Program: randomnumber.py
Author: <NAME>
Date: October 9, 2017
"""
import random
smaller = int(input("Enter the smaller number: "))
larger = int(input("Enter the larger number: "))
myNumber = random.randint(smaller, larger)
count = 0
while True:
count += 1
userNumber = int(input("Enter your guess: "))
if userNumber < myNumber:
print("Too small")
elif userNumber > myNumber:
print("Too large")
else:
print("Congratulations! You've got it in", count, "tries!")
break
|
[
"random.randint"
] |
[((202, 233), 'random.randint', 'random.randint', (['smaller', 'larger'], {}), '(smaller, larger)\n', (216, 233), False, 'import random\n')]
|
import uuid
class Team:
"""
A seeded entry in the tournament. Create these through the Tournament class.
"""
def __init__(self, seed, players = None, **kwargs):
"""
Team data:
id: BrawlBracket id (uuid)
seed: Tournament seeding (int)
name: Team name (string)
players: Players on this team (list of Player)
Tournament data:
eliminated: Has this team been eliminated (boolean)
checkedIn: Has this team checked in (boolean)
"""
self.id = kwargs.get('uuid', uuid.uuid1())
self.seed = seed
self.name = kwargs.get('name', '')
# addPlayer, removePlayer
if players is None:
self.players = []
else:
self.players = players
self.eliminated = False
self.checkedIn = False
def __setattr__(self, name, value):
"""
Override default setting value functionality to let us send things to
the database on updates.
"""
super().__setattr__(name, value)
# Could be picky about names of vars changing where we don't want to
# write out to the database
if name in ['_dbCallback']:
return
if '_dbCallback' in self.__dict__ and self._dbCallback is not None:
self._dbCallback(self)
def __repr__(self):
return '{} ({})'.format(self.name, self.seed)
def addPlayer(self, player):
"""
Add player to this team.
"""
if player in self.players:
return
self.players.append(player)
if '_dbCallback' in self.__dict__ and self._dbCallback is not None:
self._dbCallback(self)
def removePlayer(self, player):
"""
Remove player from this team.
"""
if player not in self.players:
return
self.players.remove(player)
if '_dbCallback' in self.__dict__ and self._dbCallback is not None:
self._dbCallback(self)
|
[
"uuid.uuid1"
] |
[((574, 586), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (584, 586), False, 'import uuid\n')]
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""APIs to train an on-device recommendation model."""
import collections
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api import mm_export
from tensorflow_examples.lite.model_maker.core.data_util import data_util
from tensorflow_examples.lite.model_maker.core.data_util import recommendation_config
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import custom_model
from tensorflow_examples.lite.model_maker.core.task import model_util
from tensorflow_examples.lite.model_maker.core.task.model_spec import recommendation_spec
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import input_pipeline
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import metrics as _metrics
from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher
@mm_export('recommendation.Recommendation')
class Recommendation(custom_model.CustomModel):
"""Recommendation task class."""
DEFAULT_EXPORT_FORMAT = (ExportFormat.TFLITE,)
ALLOWED_EXPORT_FORMAT = (ExportFormat.LABEL, ExportFormat.TFLITE,
ExportFormat.SAVED_MODEL)
# ID = 0 means a placeholder to OOV. Used for padding.
OOV_ID = 0
def __init__(self,
model_spec,
model_dir,
shuffle=True,
learning_rate=0.1,
gradient_clip_norm=1.0):
"""Init recommendation model.
Args:
model_spec: recommendation model spec.
model_dir: str, path to export model checkpoints and summaries.
shuffle: boolean, whether the training data should be shuffled.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
"""
if not isinstance(model_spec, recommendation_spec.RecommendationSpec):
raise ValueError(
'Expect RecommendationSpec but got model_spec: {}'.format(model_spec))
self._model_dir = model_dir
self._learning_rate = learning_rate
self._gradient_clip_norm = gradient_clip_norm
super(Recommendation, self).__init__(model_spec, shuffle=shuffle)
@property
def input_spec(self) -> recommendation_config.InputSpec:
return self.model_spec.input_spec
@property
def model_hparams(self) -> recommendation_config.ModelHParams:
return self.model_spec.model_hparams
def create_model(self, do_train=True):
"""Creates a model.
Args:
do_train: boolean. Whether to train the model.
Returns:
Keras model.
"""
self.model = self.model_spec.create_model()
if do_train:
_launcher.compile_model(self.model, self.model_hparams.eval_top_k,
self._learning_rate, self._gradient_clip_norm)
def train(self,
train_data,
validation_data=None,
batch_size=16,
steps_per_epoch=100,
epochs=1):
"""Feeds the training data for training.
Args:
train_data: Training dataset.
validation_data: Validation data. If None, skips validation process.
batch_size: int, the batch size.
steps_per_epoch: int, the step of each epoch.
epochs: int, number of epochs.
Returns:
History from model.fit().
"""
batch_size = batch_size if batch_size else self.model_spec.batch_size
train_ds = train_data.gen_dataset(
batch_size, is_training=True, shuffle=self.shuffle)
if validation_data:
validation_ds = validation_data.gen_dataset(batch_size, is_training=False)
else:
validation_ds = None
self.create_model(do_train=True)
history = self.model.fit(
x=train_ds,
validation_data=validation_ds,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=self._keras_callbacks(self._model_dir))
tf.get_logger().info(history)
return history
def evaluate(self, data, batch_size=10):
"""Evaluate the model.
Args:
data: Evaluation data.
batch_size: int, batch size for evaluation.
Returns:
History from model.evaluate().
"""
batch_size = batch_size if batch_size else self.model_spec.batch_size
eval_ds = data.gen_dataset(batch_size, is_training=False)
history = self.model.evaluate(eval_ds)
tf.get_logger().info(history)
return history
def _keras_callbacks(self, model_dir):
"""Returns a list of default keras callbacks for `model.fit`."""
return _launcher.get_callbacks(self.model, model_dir)
def _get_serve_fn(self, keras_model):
"""Gets serve fn for exporting model."""
input_specs = input_pipeline.get_serving_input_specs(self.input_spec)
return keras_model.serve.get_concrete_function(**input_specs)
def _export_tflite(self, tflite_filepath):
"""Exports tflite model."""
serve_fn = self._get_serve_fn(self.model)
# Providing trackable objects is now recommended since it will make the
# concrete function conversion API be based on the new SavedModel importer,
# which will enable new TensorFlow Lite features including variable support,
# resources and variant tensor, and signature concept.
if float('.'.join(tf.__version__.split('.')[:2])) >= 2.7:
converter = tf.lite.TFLiteConverter.from_concrete_functions([serve_fn],
self.model)
else:
converter = tf.lite.TFLiteConverter.from_concrete_functions([serve_fn])
tflite_model = converter.convert()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
def _export_saved_model(self, filepath):
serve_fn = self._get_serve_fn(self.model)
signatures = {tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: serve_fn}
tf.saved_model.save(self.model, export_dir=filepath, signatures=signatures)
def evaluate_tflite(self, tflite_filepath, data):
"""Evaluates the tflite model.
The data is padded to required length, and multiple metrics are evaluated.
Args:
tflite_filepath: File path to the TFLite model.
data: Data to be evaluated.
Returns:
Dict of (metric, value), evaluation result of TFLite model.
"""
label_name = self.input_spec.label_feature.feature_name
lite_runner = model_util.get_lite_runner(tflite_filepath, self.model_spec)
ds = data.gen_dataset(batch_size=1, is_training=False)
max_output_size = data.max_vocab_id + 1 # +1 because 0 is reserved for OOV.
eval_top_k = self.model_hparams.eval_top_k
metrics = [
_metrics.GlobalRecall(top_k=k, name=f'Global_Recall/Recall_{k}')
for k in eval_top_k
]
for feature, y_true in data_util.generate_elements(ds):
feature.pop(label_name)
x = feature
ids, scores = lite_runner.run(x)
# y_true: shape [1, 1]
# y_pred: shape [1, max_output_size]; fill only scores with top-k ids.
y_pred = np.zeros([1, max_output_size])
for i, score in zip(ids, scores):
if i in data.vocab: # Only set if id is in vocab.
y_pred[0, i] = score
# Update metrics.
for m in metrics:
m.update_state(y_true, y_pred)
result = collections.OrderedDict([(m.name, m.result()) for m in metrics])
return result
@classmethod
def create(cls,
train_data,
model_spec: recommendation_spec.RecommendationSpec,
model_dir: str = None,
validation_data=None,
batch_size: int = 16,
steps_per_epoch: int = 10000,
epochs: int = 1,
learning_rate: float = 0.1,
gradient_clip_norm: float = 1.0,
shuffle: bool = True,
do_train: bool = True):
"""Loads data and train the model for recommendation.
Args:
train_data: Training data.
model_spec: ModelSpec, Specification for the model.
model_dir: str, path to export model checkpoints and summaries.
validation_data: Validation data.
batch_size: Batch size for training.
steps_per_epoch: int, Number of step per epoch.
epochs: int, Number of epochs for training.
learning_rate: float, learning rate.
gradient_clip_norm: float, clip threshold (<= 0 meaning no clip).
shuffle: boolean, whether the training data should be shuffled.
do_train: boolean, whether to run training.
Returns:
An instance based on Recommendation.
"""
# Use model_dir or a temp folder to store intermediate checkpoints, etc.
if model_dir is None:
model_dir = tempfile.mkdtemp()
recommendation = cls(
model_spec,
model_dir=model_dir,
shuffle=shuffle,
learning_rate=learning_rate,
gradient_clip_norm=gradient_clip_norm)
if do_train:
tf.compat.v1.logging.info('Training recommendation model...')
recommendation.train(
train_data,
validation_data,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
epochs=epochs)
else:
recommendation.create_model(do_train=False)
return recommendation
# Shortcut function.
create = Recommendation.create
mm_export('recommendation.create').export_constant(__name__, 'create')
|
[
"tensorflow.io.gfile.GFile",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.get_callbacks",
"numpy.zeros",
"tensorflow.__version__.split",
"tensorflow.compat.v1.logging.info",
"tensorflow_examples.lite.model_maker.core.api.mm_export",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.input_pipeline.get_serving_input_specs",
"tensorflow.saved_model.save",
"tensorflow_examples.lite.model_maker.core.task.model_util.get_lite_runner",
"tensorflow_examples.lite.model_maker.core.data_util.data_util.generate_elements",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.compile_model",
"tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.metrics.GlobalRecall",
"tempfile.mkdtemp",
"tensorflow.get_logger",
"tensorflow.lite.TFLiteConverter.from_concrete_functions"
] |
[((1620, 1662), 'tensorflow_examples.lite.model_maker.core.api.mm_export', 'mm_export', (['"""recommendation.Recommendation"""'], {}), "('recommendation.Recommendation')\n", (1629, 1662), False, 'from tensorflow_examples.lite.model_maker.core.api import mm_export\n'), ((5207, 5253), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.get_callbacks', '_launcher.get_callbacks', (['self.model', 'model_dir'], {}), '(self.model, model_dir)\n', (5230, 5253), True, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher\n'), ((5358, 5413), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.input_pipeline.get_serving_input_specs', 'input_pipeline.get_serving_input_specs', (['self.input_spec'], {}), '(self.input_spec)\n', (5396, 5413), False, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import input_pipeline\n'), ((6501, 6576), 'tensorflow.saved_model.save', 'tf.saved_model.save', (['self.model'], {'export_dir': 'filepath', 'signatures': 'signatures'}), '(self.model, export_dir=filepath, signatures=signatures)\n', (6520, 6576), True, 'import tensorflow as tf\n'), ((7010, 7070), 'tensorflow_examples.lite.model_maker.core.task.model_util.get_lite_runner', 'model_util.get_lite_runner', (['tflite_filepath', 'self.model_spec'], {}), '(tflite_filepath, self.model_spec)\n', (7036, 7070), False, 'from tensorflow_examples.lite.model_maker.core.task import model_util\n'), ((7409, 7440), 'tensorflow_examples.lite.model_maker.core.data_util.data_util.generate_elements', 'data_util.generate_elements', (['ds'], {}), '(ds)\n', (7436, 7440), False, 'from tensorflow_examples.lite.model_maker.core.data_util import data_util\n'), ((9904, 9938), 'tensorflow_examples.lite.model_maker.core.api.mm_export', 'mm_export', (['"""recommendation.create"""'], {}), "('recommendation.create')\n", (9913, 9938), False, 'from tensorflow_examples.lite.model_maker.core.api import mm_export\n'), ((3362, 3480), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.recommendation_model_launcher.compile_model', '_launcher.compile_model', (['self.model', 'self.model_hparams.eval_top_k', 'self._learning_rate', 'self._gradient_clip_norm'], {}), '(self.model, self.model_hparams.eval_top_k, self.\n _learning_rate, self._gradient_clip_norm)\n', (3385, 3480), True, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import recommendation_model_launcher as _launcher\n'), ((5980, 6051), 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[serve_fn]', 'self.model'], {}), '([serve_fn], self.model)\n', (6027, 6051), True, 'import tensorflow as tf\n'), ((6146, 6205), 'tensorflow.lite.TFLiteConverter.from_concrete_functions', 'tf.lite.TFLiteConverter.from_concrete_functions', (['[serve_fn]'], {}), '([serve_fn])\n', (6193, 6205), True, 'import tensorflow as tf\n'), ((6254, 6294), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['tflite_filepath', '"""wb"""'], {}), "(tflite_filepath, 'wb')\n", (6271, 6294), True, 'import tensorflow as tf\n'), ((7283, 7347), 'tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model.metrics.GlobalRecall', '_metrics.GlobalRecall', ([], {'top_k': 'k', 'name': 'f"""Global_Recall/Recall_{k}"""'}), "(top_k=k, name=f'Global_Recall/Recall_{k}')\n", (7304, 7347), True, 'from tensorflow_examples.lite.model_maker.third_party.recommendation.ml.model import metrics as _metrics\n'), ((7651, 7681), 'numpy.zeros', 'np.zeros', (['[1, max_output_size]'], {}), '([1, max_output_size])\n', (7659, 7681), True, 'import numpy as np\n'), ((9296, 9314), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9312, 9314), False, 'import tempfile\n'), ((9524, 9585), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Training recommendation model..."""'], {}), "('Training recommendation model...')\n", (9549, 9585), True, 'import tensorflow as tf\n'), ((4584, 4599), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (4597, 4599), True, 'import tensorflow as tf\n'), ((5036, 5051), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (5049, 5051), True, 'import tensorflow as tf\n'), ((5922, 5947), 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (5942, 5947), True, 'import tensorflow as tf\n')]
|
import time
import datetime
import itertools
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.autograd as autograd
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import dataset
import utils
import sys
import networks.pwcnet as pwcnet
def Train_single(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_MSE = torch.nn.MSELoss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
discriminator = utils.create_discriminator(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
discriminator = nn.DataParallel(discriminator)
discriminator = discriminator.cuda()
else:
discriminator = discriminator.cuda()
generatorNet = generatorNet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr = opt.lr_d, betas = (opt.b1, opt.b2))
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('ILSVRC2012_train_sal_name.txt')[:1272480]
# Define the dataset
trainset = dataset.ColorizationDataset(opt, imglist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
# ----------------------------------------
# Training
# ----------------------------------------
# Tensor type
Tensor = torch.cuda.FloatTensor
# Count start time
prev_time = time.time()
# For loop training
# For loop training
for epoch in range(opt.epochs):
for iteration, (x_t, y_t) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
optimizer_D.zero_grad()
lstm_state = None
x_t = x_t.cuda()
y_t = y_t.cuda()
valid = Tensor(np.ones((x_t.shape[0], 1, 30, 30)))
fake = Tensor(np.zeros((x_t.shape[0], 1, 30, 30)))
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
# Train Discriminator
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
# Fake samples
fake_scalar = discriminator(x_t, p_t.detach())
loss_fake = criterion_MSE(fake_scalar, fake)
# True samples
true_scalar = discriminator(x_t, y_t)
loss_true = criterion_MSE(true_scalar, valid)
# Overall Loss and optimize
loss_D = 0.5 * (loss_fake + loss_true)
# Train Generator
# GAN Loss
fake_scalar = discriminator(x_t, p_t)
loss_G = criterion_MSE(fake_scalar, valid)
# Pixel-level loss
loss_L1 = criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1 + opt.lambda_gan * loss_G
loss.backward()
loss_D.backward()
optimizer_G.step()
optimizer_D.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [G Loss: %.4f] [D Loss: %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), loss_G.item(), loss_D.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_D)
def Pre_train_single(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
print("Pre_train_single")
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_MSE = torch.nn.MSELoss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
else:
generatorNet = generatorNet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('ILSVRC2012_train_sal_name.txt')[:1272480]
# Define the dataset
trainset = dataset.ColorizationDataset(opt, imglist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = DataLoader(trainset, batch_size = opt.batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
# ----------------------------------------
# Training
# ----------------------------------------
# Tensor type
Tensor = torch.cuda.FloatTensor
# Count start time
prev_time = time.time()
# For loop training
# For loop training
for epoch in range(opt.epochs):
for iteration, (x_t, y_t) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
lstm_state = None
x_t = x_t.cuda()
y_t = y_t.cuda()
valid = Tensor(np.ones((x_t.shape[0], 1, 30, 30)))
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
# Pixel-level loss
loss_L1 = criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1
loss.backward()
optimizer_G.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
def Train_GAN(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_MSE = torch.nn.MSELoss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
discriminator = utils.create_discriminator(opt)
flownet = utils.create_pwcnet(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
discriminator = nn.DataParallel(discriminator)
discriminator = discriminator.cuda()
flownet = nn.DataParallel(flownet)
flownet = flownet.cuda()
else:
discriminator = discriminator.cuda()
generatorNet = generatorNet.cuda()
flownet = flownet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr = opt.lr_d, betas = (opt.b1, opt.b2))
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d_Gan%d_os%d_ol%d.pth' % (opt.task, epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d_GAN%d_os%d_ol%d.pth' % (opt.task, epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('videocolor_linux.txt')
classlist = utils.get_dirs(opt.baseroot)
'''
imgnumber = len(imglist) - (len(imglist) % opt.batch_size)
imglist = imglist[:imgnumber]
'''
# Define the dataset
trainset = dataset.MultiFramesDataset(opt, imglist, classlist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = utils.create_dataloader(trainset, opt)
# ----------------------------------------
# Training
# ----------------------------------------
# Tensor type
Tensor = torch.cuda.FloatTensor
# Count start time
prev_time = time.time()
# For loop training
for epoch in range(opt.epochs):
for iteration, (in_part, out_part) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
optimizer_D.zero_grad()
lstm_state = None
loss_flow = 0
loss_flow_long = 0
loss_L1 = 0
loss_D = 0
loss_G = 0
x_0 = in_part[0].cuda()
p_t_0 = in_part[0].cuda()
# Adversarial ground truth
valid = Tensor(np.ones((in_part[0].shape[0], 1, 30, 30)))
fake = Tensor(np.zeros((in_part[0].shape[0], 1, 30, 30)))
for iter_frame in range(opt.iter_frames):
# Read data
x_t = in_part[iter_frame].cuda()
y_t = out_part[iter_frame].cuda()
# Initialize the second input and compute flow loss
if iter_frame == 0:
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
elif iter_frame == 1:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_0 = p_t.detach()
p_t_last.requires_grad = False
p_t_0.requires_grad = False
# o_t_last_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
# y_t_warp range is [0, 1]
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
else:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_last.requires_grad = False
# o_t_last_2_t o_t_first_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
o_t_first_2_t = pwcnet.PWCEstimate(flownet,x_t, x_0)
# y_t_warp, y_t_warp_long range is [0, 1]
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
x_t_warp_long = pwcnet.PWCNetBackward((x_0 + 1) / 2, o_t_first_2_t)
p_t_warp_long = pwcnet.PWCNetBackward((p_t_0 + 1) / 2, o_t_first_2_t)
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
lstm_state = utils.repackage_hidden(lstm_state)
if iter_frame == 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
elif iter_frame > 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
mask_flow_long = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp_long, dim=1).pow(2) ).unsqueeze(1)
loss_flow_long += criterion_L1(mask_flow_long * (p_t + 1) / 2, mask_flow_long * p_t_warp_long)
# Fake samples
fake_scalar = discriminator(x_t, p_t.detach())
loss_fake = criterion_MSE(fake_scalar, fake)
# True samples
true_scalar = discriminator(x_t, y_t)
loss_true = criterion_MSE(true_scalar, valid)
# Train Discriminator
loss_D += 0.5 * (loss_fake + loss_true)
# Train Generator
# GAN Loss
fake_scalar = discriminator(x_t, p_t)
loss_G += criterion_MSE(fake_scalar, valid)
# Pixel-level loss
loss_L1 += criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1 + opt.lambda_flow * loss_flow + opt.lambda_flow_long * loss_flow_long + opt.lambda_gan * loss_G
loss.backward()
loss_D.backward()
optimizer_G.step()
optimizer_D.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [Flow Loss Short: %.8f] [Flow Loss Long: %.8f] [G Loss: %.4f] [D Loss: %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), loss_flow.item(), loss_flow_long.item(), loss_G.item(), loss_D.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_D)
def Train_No_GAN(opt): # w / o GAN
# ----------------------------------------
# Network training parameters
# ----------------------------------------
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# Loss functions
criterion_L1 = torch.nn.L1Loss().cuda()
# Initialize Generator
generatorNet = utils.create_generator(opt)
flownet = utils.create_pwcnet(opt)
# To device
if opt.multi_gpu:
generatorNet = nn.DataParallel(generatorNet)
generatorNet = generatorNet.cuda()
flownet = nn.DataParallel(flownet)
flownet = flownet.cuda()
else:
generatorNet = generatorNet.cuda()
flownet = flownet.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(generatorNet.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
# Learning rate decrease
def adjust_learning_rate(opt, epoch, iteration, optimizer):
#Set the learning rate to the initial LR decayed by "lr_decrease_factor" every "lr_decrease_epoch" epochs
if opt.lr_decrease_mode == 'epoch':
lr = opt.lr_g * (opt.lr_decrease_factor ** (epoch // opt.lr_decrease_epoch))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if opt.lr_decrease_mode == 'iter':
lr = opt.lr_g * (opt.lr_decrease_factor ** (iteration // opt.lr_decrease_iter))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
if opt.save_name_mode:
torch.save(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Define the class list
imglist = utils.text_readlines('videocolor_linux.txt')
classlist = utils.get_dirs(opt.baseroot)
'''
imgnumber = len(imglist) - (len(imglist) % opt.batch_size)
imglist = imglist[:imgnumber]
'''
# Define the dataset
trainset = dataset.MultiFramesDataset(opt, imglist, classlist)
print('The overall number of classes:', len(trainset))
# Define the dataloader
dataloader = utils.create_dataloader(trainset, opt)
# ----------------------------------------
# Training
# ----------------------------------------
# Count start time
prev_time = time.time()
# For loop training
for epoch in range(opt.epochs):
for iteration, (in_part, out_part) in enumerate(dataloader):
# Train Generator
optimizer_G.zero_grad()
lstm_state = None
loss_flow = 0
loss_flow_long = 0
loss_L1 = 0
x_0 = in_part[0].cuda()
p_t_0 = in_part[0].cuda()
for iter_frame in range(opt.iter_frames):
# Read data
x_t = in_part[iter_frame].cuda()
y_t = out_part[iter_frame].cuda()
# Initialize the second input and compute flow loss
if iter_frame == 0:
p_t_last = torch.zeros(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w).cuda()
elif iter_frame == 1:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_0 = p_t.detach()
p_t_last.requires_grad = False
p_t_0.requires_grad = False
# o_t_last_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
# y_t_warp range is [0, 1]
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
else:
x_t_last = in_part[iter_frame - 1].cuda()
p_t_last = p_t.detach()
p_t_last.requires_grad = False
# o_t_last_2_t o_t_first_2_t range is [-20, +20]
o_t_last_2_t = pwcnet.PWCEstimate(flownet, x_t, x_t_last)
o_t_first_2_t = pwcnet.PWCEstimate(flownet,x_t, x_0)
# y_t_warp, y_t_warp_long range is [0, 1]
x_t_warp = pwcnet.PWCNetBackward((x_t_last + 1) / 2, o_t_last_2_t)
p_t_warp = pwcnet.PWCNetBackward((p_t_last + 1) / 2, o_t_last_2_t)
x_t_warp_long = pwcnet.PWCNetBackward((x_0 + 1) / 2, o_t_first_2_t)
p_t_warp_long = pwcnet.PWCNetBackward((p_t_0 + 1) / 2, o_t_first_2_t)
# Generator output
p_t, lstm_state = generatorNet(x_t, p_t_last, lstm_state)
lstm_state = utils.repackage_hidden(lstm_state)
if iter_frame == 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
elif iter_frame > 1:
mask_flow = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp, dim=1).pow(2) ).unsqueeze(1)
loss_flow += criterion_L1(mask_flow * (p_t + 1) / 2, mask_flow * p_t_warp)
mask_flow_long = torch.exp( -opt.mask_para * torch.sum((x_t + 1) / 2 - x_t_warp_long, dim=1).pow(2) ).unsqueeze(1)
loss_flow_long += criterion_L1(mask_flow_long * (p_t + 1) / 2, mask_flow_long * p_t_warp_long)
# Pixel-level loss
loss_L1 += criterion_L1(p_t, y_t)
# Overall Loss and optimize
loss = loss_L1 + opt.lambda_flow * loss_flow + opt.lambda_flow_long * loss_flow_long
loss.backward()
optimizer_G.step()
# Determine approximate time left
iters_done = epoch * len(dataloader) + iteration
iters_left = opt.epochs * len(dataloader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [L1 Loss: %.4f] [Flow Loss Short: %.8f] [Flow Loss Long: %.8f] Time_left: %s" %
((epoch + 1), opt.epochs, iteration, len(dataloader), loss_L1.item(), loss_flow.item(), loss_flow_long.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(dataloader), generatorNet)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), (iters_done + 1), optimizer_G)
|
[
"utils.repackage_hidden",
"numpy.ones",
"utils.create_generator",
"utils.create_dataloader",
"utils.create_discriminator",
"networks.pwcnet.PWCEstimate",
"torch.nn.MSELoss",
"dataset.ColorizationDataset",
"torch.utils.data.DataLoader",
"torch.zeros",
"networks.pwcnet.PWCNetBackward",
"utils.text_readlines",
"torch.sum",
"torch.nn.L1Loss",
"numpy.zeros",
"time.time",
"utils.create_pwcnet",
"dataset.MultiFramesDataset",
"torch.save",
"torch.nn.DataParallel",
"utils.get_dirs"
] |
[((702, 729), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (724, 729), False, 'import utils\n'), ((750, 781), 'utils.create_discriminator', 'utils.create_discriminator', (['opt'], {}), '(opt)\n', (776, 781), False, 'import utils\n'), ((4021, 4062), 'dataset.ColorizationDataset', 'dataset.ColorizationDataset', (['opt', 'imglist'], {}), '(opt, imglist)\n', (4048, 4062), False, 'import dataset\n'), ((4168, 4280), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)', 'num_workers': 'opt.num_workers', 'pin_memory': '(True)'}), '(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=\n opt.num_workers, pin_memory=True)\n', (4178, 4280), False, 'from torch.utils.data import DataLoader\n'), ((4509, 4520), 'time.time', 'time.time', ([], {}), '()\n', (4518, 4520), False, 'import time\n'), ((7444, 7471), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (7466, 7471), False, 'import utils\n'), ((10462, 10503), 'dataset.ColorizationDataset', 'dataset.ColorizationDataset', (['opt', 'imglist'], {}), '(opt, imglist)\n', (10489, 10503), False, 'import dataset\n'), ((10609, 10721), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)', 'num_workers': 'opt.num_workers', 'pin_memory': '(True)'}), '(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=\n opt.num_workers, pin_memory=True)\n', (10619, 10721), False, 'from torch.utils.data import DataLoader\n'), ((10950, 10961), 'time.time', 'time.time', ([], {}), '()\n', (10959, 10961), False, 'import time\n'), ((12954, 12981), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (12976, 12981), False, 'import utils\n'), ((13002, 13033), 'utils.create_discriminator', 'utils.create_discriminator', (['opt'], {}), '(opt)\n', (13028, 13033), False, 'import utils\n'), ((13048, 13072), 'utils.create_pwcnet', 'utils.create_pwcnet', (['opt'], {}), '(opt)\n', (13067, 13072), False, 'import utils\n'), ((16458, 16502), 'utils.text_readlines', 'utils.text_readlines', (['"""videocolor_linux.txt"""'], {}), "('videocolor_linux.txt')\n", (16478, 16502), False, 'import utils\n'), ((16519, 16547), 'utils.get_dirs', 'utils.get_dirs', (['opt.baseroot'], {}), '(opt.baseroot)\n', (16533, 16547), False, 'import utils\n'), ((16702, 16753), 'dataset.MultiFramesDataset', 'dataset.MultiFramesDataset', (['opt', 'imglist', 'classlist'], {}), '(opt, imglist, classlist)\n', (16728, 16753), False, 'import dataset\n'), ((16859, 16897), 'utils.create_dataloader', 'utils.create_dataloader', (['trainset', 'opt'], {}), '(trainset, opt)\n', (16882, 16897), False, 'import utils\n'), ((17123, 17134), 'time.time', 'time.time', ([], {}), '()\n', (17132, 17134), False, 'import time\n'), ((22936, 22963), 'utils.create_generator', 'utils.create_generator', (['opt'], {}), '(opt)\n', (22958, 22963), False, 'import utils\n'), ((22978, 23002), 'utils.create_pwcnet', 'utils.create_pwcnet', (['opt'], {}), '(opt)\n', (22997, 23002), False, 'import utils\n'), ((25997, 26041), 'utils.text_readlines', 'utils.text_readlines', (['"""videocolor_linux.txt"""'], {}), "('videocolor_linux.txt')\n", (26017, 26041), False, 'import utils\n'), ((26058, 26086), 'utils.get_dirs', 'utils.get_dirs', (['opt.baseroot'], {}), '(opt.baseroot)\n', (26072, 26086), False, 'import utils\n'), ((26241, 26292), 'dataset.MultiFramesDataset', 'dataset.MultiFramesDataset', (['opt', 'imglist', 'classlist'], {}), '(opt, imglist, classlist)\n', (26267, 26292), False, 'import dataset\n'), ((26398, 26436), 'utils.create_dataloader', 'utils.create_dataloader', (['trainset', 'opt'], {}), '(trainset, opt)\n', (26421, 26436), False, 'import utils\n'), ((26602, 26613), 'time.time', 'time.time', ([], {}), '()\n', (26611, 26613), False, 'import time\n'), ((844, 873), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (859, 873), True, 'import torch.nn as nn\n'), ((941, 971), 'torch.nn.DataParallel', 'nn.DataParallel', (['discriminator'], {}), '(discriminator)\n', (956, 971), True, 'import torch.nn as nn\n'), ((3916, 3969), 'utils.text_readlines', 'utils.text_readlines', (['"""ILSVRC2012_train_sal_name.txt"""'], {}), "('ILSVRC2012_train_sal_name.txt')\n", (3936, 3969), False, 'import utils\n'), ((7534, 7563), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (7549, 7563), True, 'import torch.nn as nn\n'), ((10357, 10410), 'utils.text_readlines', 'utils.text_readlines', (['"""ILSVRC2012_train_sal_name.txt"""'], {}), "('ILSVRC2012_train_sal_name.txt')\n", (10377, 10410), False, 'import utils\n'), ((13135, 13164), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (13150, 13164), True, 'import torch.nn as nn\n'), ((13232, 13262), 'torch.nn.DataParallel', 'nn.DataParallel', (['discriminator'], {}), '(discriminator)\n', (13247, 13262), True, 'import torch.nn as nn\n'), ((13326, 13350), 'torch.nn.DataParallel', 'nn.DataParallel', (['flownet'], {}), '(flownet)\n', (13341, 13350), True, 'import torch.nn as nn\n'), ((23065, 23094), 'torch.nn.DataParallel', 'nn.DataParallel', (['generatorNet'], {}), '(generatorNet)\n', (23080, 23094), True, 'import torch.nn as nn\n'), ((23156, 23180), 'torch.nn.DataParallel', 'nn.DataParallel', (['flownet'], {}), '(flownet)\n', (23171, 23180), True, 'import torch.nn as nn\n'), ((584, 601), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (599, 601), False, 'import torch\n'), ((629, 647), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (645, 647), False, 'import torch\n'), ((6384, 6395), 'time.time', 'time.time', ([], {}), '()\n', (6393, 6395), False, 'import time\n'), ((7326, 7343), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (7341, 7343), False, 'import torch\n'), ((7371, 7389), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7387, 7389), False, 'import torch\n'), ((12072, 12083), 'time.time', 'time.time', ([], {}), '()\n', (12081, 12083), False, 'import time\n'), ((12836, 12853), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (12851, 12853), False, 'import torch\n'), ((12881, 12899), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (12897, 12899), False, 'import torch\n'), ((21847, 21858), 'time.time', 'time.time', ([], {}), '()\n', (21856, 21858), False, 'import time\n'), ((22864, 22881), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (22879, 22881), False, 'import torch\n'), ((30407, 30418), 'time.time', 'time.time', ([], {}), '()\n', (30416, 30418), False, 'import time\n'), ((4915, 4949), 'numpy.ones', 'np.ones', (['(x_t.shape[0], 1, 30, 30)'], {}), '((x_t.shape[0], 1, 30, 30))\n', (4922, 4949), True, 'import numpy as np\n'), ((4977, 5012), 'numpy.zeros', 'np.zeros', (['(x_t.shape[0], 1, 30, 30)'], {}), '((x_t.shape[0], 1, 30, 30))\n', (4985, 5012), True, 'import numpy as np\n'), ((11320, 11354), 'numpy.ones', 'np.ones', (['(x_t.shape[0], 1, 30, 30)'], {}), '((x_t.shape[0], 1, 30, 30))\n', (11327, 11354), True, 'import numpy as np\n'), ((17683, 17724), 'numpy.ones', 'np.ones', (['(in_part[0].shape[0], 1, 30, 30)'], {}), '((in_part[0].shape[0], 1, 30, 30))\n', (17690, 17724), True, 'import numpy as np\n'), ((17752, 17794), 'numpy.zeros', 'np.zeros', (['(in_part[0].shape[0], 1, 30, 30)'], {}), '((in_part[0].shape[0], 1, 30, 30))\n', (17760, 17794), True, 'import numpy as np\n'), ((19800, 19834), 'utils.repackage_hidden', 'utils.repackage_hidden', (['lstm_state'], {}), '(lstm_state)\n', (19822, 19834), False, 'import utils\n'), ((29018, 29052), 'utils.repackage_hidden', 'utils.repackage_hidden', (['lstm_state'], {}), '(lstm_state)\n', (29040, 29052), False, 'import utils\n'), ((5038, 5111), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (5049, 5111), False, 'import torch\n'), ((11380, 11453), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (11391, 11453), False, 'import torch\n'), ((2456, 2551), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch,\n opt.batch_size))\n", (2466, 2551), False, 'import torch\n'), ((2804, 2902), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (2814, 2902), False, 'import torch\n'), ((3212, 3301), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.\n batch_size))\n", (3222, 3301), False, 'import torch\n'), ((3553, 3645), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (3563, 3645), False, 'import torch\n'), ((8897, 8992), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch,\n opt.batch_size))\n", (8907, 8992), False, 'import torch\n'), ((9245, 9343), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (9255, 9343), False, 'import torch\n'), ((9653, 9742), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.\n batch_size))\n", (9663, 9742), False, 'import torch\n'), ((9994, 10086), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (10004, 10086), False, 'import torch\n'), ((14856, 15028), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d_Gan%d_os%d_ol%d.pth' % (opt.task, epoch, opt.\n batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d_Gan%d_os%d_ol%d.pth' % (\n opt.task, epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.\n lambda_flow_long))\n", (14866, 15028), False, 'import torch\n'), ((15275, 15373), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (15285, 15373), False, 'import torch\n'), ((15683, 15847), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d_GAN%d_os%d_ol%d.pth' % (opt.task, epoch, opt.\n batch_size, opt.lambda_gan, opt.lambda_flow, opt.lambda_flow_long))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d_GAN%d_os%d_ol%d.pth' % (opt.task,\n epoch, opt.batch_size, opt.lambda_gan, opt.lambda_flow, opt.\n lambda_flow_long))\n", (15693, 15847), False, 'import torch\n'), ((16095, 16187), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (16105, 16187), False, 'import torch\n'), ((18585, 18627), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (18603, 18627), True, 'import networks.pwcnet as pwcnet\n'), ((18659, 18714), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (18680, 18714), True, 'import networks.pwcnet as pwcnet\n'), ((18793, 18848), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (18814, 18848), True, 'import networks.pwcnet as pwcnet\n'), ((19132, 19174), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (19150, 19174), True, 'import networks.pwcnet as pwcnet\n'), ((19211, 19248), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_0'], {}), '(flownet, x_t, x_0)\n', (19229, 19248), True, 'import networks.pwcnet as pwcnet\n'), ((19341, 19396), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (19362, 19396), True, 'import networks.pwcnet as pwcnet\n'), ((19428, 19483), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (19449, 19483), True, 'import networks.pwcnet as pwcnet\n'), ((19520, 19571), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((x_0 + 1) / 2, o_t_first_2_t)\n', (19541, 19571), True, 'import networks.pwcnet as pwcnet\n'), ((19608, 19661), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((p_t_0 + 1) / 2, o_t_first_2_t)\n', (19629, 19661), True, 'import networks.pwcnet as pwcnet\n'), ((24537, 24632), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch,\n opt.batch_size))\n", (24547, 24632), False, 'import torch\n'), ((24885, 24983), 'torch.save', 'torch.save', (['generator.module', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator.module, 'Pre_%s_iter%d_bs%d.pth' % (opt.task,\n iteration, opt.batch_size))\n", (24895, 24983), False, 'import torch\n'), ((25293, 25382), 'torch.save', 'torch.save', (['generator', "('Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.batch_size))"], {}), "(generator, 'Pre_%s_epoch%d_bs%d.pth' % (opt.task, epoch, opt.\n batch_size))\n", (25303, 25382), False, 'import torch\n'), ((25634, 25726), 'torch.save', 'torch.save', (['generator', "('Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.batch_size))"], {}), "(generator, 'Pre_%s_iter%d_bs%d.pth' % (opt.task, iteration, opt.\n batch_size))\n", (25644, 25726), False, 'import torch\n'), ((27803, 27845), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (27821, 27845), True, 'import networks.pwcnet as pwcnet\n'), ((27877, 27932), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (27898, 27932), True, 'import networks.pwcnet as pwcnet\n'), ((28011, 28066), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (28032, 28066), True, 'import networks.pwcnet as pwcnet\n'), ((28350, 28392), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_t_last'], {}), '(flownet, x_t, x_t_last)\n', (28368, 28392), True, 'import networks.pwcnet as pwcnet\n'), ((28429, 28466), 'networks.pwcnet.PWCEstimate', 'pwcnet.PWCEstimate', (['flownet', 'x_t', 'x_0'], {}), '(flownet, x_t, x_0)\n', (28447, 28466), True, 'import networks.pwcnet as pwcnet\n'), ((28559, 28614), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((x_t_last + 1) / 2, o_t_last_2_t)\n', (28580, 28614), True, 'import networks.pwcnet as pwcnet\n'), ((28646, 28701), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_last + 1) / 2)', 'o_t_last_2_t'], {}), '((p_t_last + 1) / 2, o_t_last_2_t)\n', (28667, 28701), True, 'import networks.pwcnet as pwcnet\n'), ((28738, 28789), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((x_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((x_0 + 1) / 2, o_t_first_2_t)\n', (28759, 28789), True, 'import networks.pwcnet as pwcnet\n'), ((28826, 28879), 'networks.pwcnet.PWCNetBackward', 'pwcnet.PWCNetBackward', (['((p_t_0 + 1) / 2)', 'o_t_first_2_t'], {}), '((p_t_0 + 1) / 2, o_t_first_2_t)\n', (28847, 28879), True, 'import networks.pwcnet as pwcnet\n'), ((6334, 6345), 'time.time', 'time.time', ([], {}), '()\n', (6343, 6345), False, 'import time\n'), ((12022, 12033), 'time.time', 'time.time', ([], {}), '()\n', (12031, 12033), False, 'import time\n'), ((18130, 18203), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (18141, 18203), False, 'import torch\n'), ((21797, 21808), 'time.time', 'time.time', ([], {}), '()\n', (21806, 21808), False, 'import time\n'), ((27348, 27421), 'torch.zeros', 'torch.zeros', (['opt.batch_size', 'opt.out_channels', 'opt.resize_h', 'opt.resize_w'], {}), '(opt.batch_size, opt.out_channels, opt.resize_h, opt.resize_w)\n', (27359, 27421), False, 'import torch\n'), ((30357, 30368), 'time.time', 'time.time', ([], {}), '()\n', (30366, 30368), False, 'import time\n'), ((19931, 19973), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (19940, 19973), False, 'import torch\n'), ((29149, 29191), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (29158, 29191), False, 'import torch\n'), ((20188, 20230), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (20197, 20230), False, 'import torch\n'), ((20413, 20460), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp_long)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp_long, dim=1)\n', (20422, 20460), False, 'import torch\n'), ((29406, 29448), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp, dim=1)\n', (29415, 29448), False, 'import torch\n'), ((29631, 29678), 'torch.sum', 'torch.sum', (['((x_t + 1) / 2 - x_t_warp_long)'], {'dim': '(1)'}), '((x_t + 1) / 2 - x_t_warp_long, dim=1)\n', (29640, 29678), False, 'import torch\n')]
|
#!/usr/bin/env python3
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# for your packages to be recognized by python
d = generate_distutils_setup(
packages=['rosplan_planning_system'],
package_dir={'rosplan_planning_system': 'src/rosplan_planning_system'}
)
setup(**d)
|
[
"distutils.core.setup",
"catkin_pkg.python_setup.generate_distutils_setup"
] |
[((170, 309), 'catkin_pkg.python_setup.generate_distutils_setup', 'generate_distutils_setup', ([], {'packages': "['rosplan_planning_system']", 'package_dir': "{'rosplan_planning_system': 'src/rosplan_planning_system'}"}), "(packages=['rosplan_planning_system'], package_dir=\n {'rosplan_planning_system': 'src/rosplan_planning_system'})\n", (194, 309), False, 'from catkin_pkg.python_setup import generate_distutils_setup\n'), ((312, 322), 'distutils.core.setup', 'setup', ([], {}), '(**d)\n', (317, 322), False, 'from distutils.core import setup\n')]
|
# coding: utf-8
#
from __future__ import absolute_import, print_function
import threading
import re
import time
import datetime
import csv
import sys
import atexit
from collections import namedtuple
_MEM_PATTERN = re.compile(r'TOTAL[:\s]+(\d+)')
# acct_tag_hex is a socket tag
# cnt_set==0 are for background data
# cnt_set==1 are for foreground data
_NetStats = namedtuple(
"NetStats",
"""idx iface acct_tag_hex uid_tag_int cnt_set rx_bytes rx_packets
tx_bytes tx_packets rx_tcp_bytes rx_tcp_packets rx_udp_bytes rx_udp_packets rx_other_bytes rx_other_packets
tx_tcp_bytes tx_tcp_packets tx_udp_bytes tx_udp_packets tx_other_bytes tx_other_packets"""
.split())
class Perf(object):
def __init__(self, d, package_name=None):
self.d = d
self.package_name = package_name
self.csv_output = "perf.csv"
self.debug = False
self.interval = 1.0
self._th = None
self._event = threading.Event()
self._condition = threading.Condition()
self._data = {}
def shell(self, *args, **kwargs):
# print("Shell:", args)
return self.d.shell(*args, **kwargs)
def memory(self):
""" PSS(KB) """
output = self.shell(['dumpsys', 'meminfo', self.package_name]).output
m = _MEM_PATTERN.search(output)
if m:
return int(m.group(1))
return 0
def _cpu_rawdata_collect(self, pid):
"""
pjiff maybe 0 if /proc/<pid>stat not exists
"""
first_line = self.shell(['cat', '/proc/stat']).output.splitlines()[0]
assert first_line.startswith('cpu ')
# ds: user, nice, system, idle, iowait, irq, softirq, stealstolen, guest, guest_nice
ds = list(map(int, first_line.split()[1:]))
total_cpu = sum(ds)
idle = ds[3]
proc_stat = self.shell(['cat',
'/proc/%d/stat' % pid]).output.split(') ')
pjiff = 0
if len(proc_stat) > 1:
proc_values = proc_stat[1].split()
utime = int(proc_values[11])
stime = int(proc_values[12])
pjiff = utime + stime
return (total_cpu, idle, pjiff)
def cpu(self, pid):
""" CPU
Refs:
- http://man7.org/linux/man-pages/man5/proc.5.html
- [安卓性能测试之cpu占用率统计方法总结](https://www.jianshu.com/p/6bf564f7cdf0)
"""
store_key = 'cpu-%d' % pid
# first time jiffies, t: total, p: process
if store_key in self._data:
tjiff1, idle1, pjiff1 = self._data[store_key]
else:
tjiff1, idle1, pjiff1 = self._cpu_rawdata_collect(pid)
time.sleep(.3)
# second time jiffies
self._data[
store_key] = tjiff2, idle2, pjiff2 = self._cpu_rawdata_collect(pid)
# calculate
pcpu = 0.0
if pjiff1 > 0 and pjiff2 > 0:
pcpu = 100.0 * (pjiff2 - pjiff1) / (tjiff2 - tjiff1) # process cpu
scpu = 100.0 * ((tjiff2 - idle2) -
(tjiff1 - idle1)) / (tjiff2 - tjiff1) # system cpu
assert scpu > -1 # maybe -0.5, sometimes happens
scpu = max(0, scpu)
return round(pcpu, 1), round(scpu, 1)
def netstat(self, pid):
"""
Returns:
(rall, tall, rtcp, ttcp, rudp, tudp)
"""
m = re.search(r'^Uid:\s+(\d+)',
self.shell(['cat', '/proc/%d/status' % pid]).output,
re.M)
if not m:
return (0, 0)
uid = m.group(1)
lines = self.shell(['cat',
'/proc/net/xt_qtaguid/stats']).output.splitlines()
traffic = [0] * 6
def plus_array(arr, *args):
for i, v in enumerate(args):
arr[i] = arr[i] + int(v)
for line in lines:
vs = line.split()
if len(vs) != 21:
continue
v = _NetStats(*vs)
if v.uid_tag_int != uid:
continue
if v.iface != 'wlan0':
continue
# all, tcp, udp
plus_array(traffic, v.rx_bytes, v.tx_bytes, v.rx_tcp_bytes,
v.tx_tcp_bytes, v.rx_udp_bytes, v.tx_udp_bytes)
store_key = 'netstat-%s' % uid
result = []
if store_key in self._data:
last_traffic = self._data[store_key]
for i in range(len(traffic)):
result.append(traffic[i] - last_traffic[i])
self._data[store_key] = traffic
return result or [0] * 6
def _current_view(self, app=None):
d = self.d
views = self.shell(['dumpsys', 'SurfaceFlinger',
'--list']).output.splitlines()
if not app:
app = d.current_app()
current = app['package'] + "/" + app['activity']
surface_curr = 'SurfaceView - ' + current
if surface_curr in views:
return surface_curr
return current
def _dump_surfaceflinger(self, view):
valid_lines = []
MAX_N = 9223372036854775807
for line in self.shell(
['dumpsys', 'SurfaceFlinger', '--latency',
view]).output.splitlines():
fields = line.split()
if len(fields) != 3:
continue
a, b, c = map(int, fields)
if a == 0:
continue
if MAX_N in (a, b, c):
continue
valid_lines.append((a, b, c))
return valid_lines
def _fps_init(self):
view = self._current_view()
self.shell(["dumpsys", "SurfaceFlinger", "--latency-clear", view])
self._data['fps-start-time'] = time.time()
self._data['fps-last-vsync'] = None
self._data['fps-inited'] = True
def fps(self, app=None):
"""
Return float
"""
if 'fps-inited' not in self._data:
self._fps_init()
view = self._current_view(app)
values = self._dump_surfaceflinger(view)
last_vsync = self._data.get('fps-last-vsync')
last_start = self._data.get('fps-start-time')
try:
idx = values.index(last_vsync)
values = values[idx + 1:]
except ValueError:
pass
duration = time.time() - last_start
if len(values):
self._data['fps-last-vsync'] = values[-1]
self._data['fps-start-time'] = time.time()
return round(len(values) / duration, 1)
def collect(self):
pid = self.d._pidof_app(self.package_name)
if pid is None:
return
app = self.d.current_app()
pss = self.memory()
cpu, scpu = self.cpu(pid)
rbytes, tbytes, rtcp, ttcp = self.netstat(pid)[:4]
fps = self.fps(app)
timestr = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
return {
'time': timestr,
'package': app['package'],
'pss': round(pss / 1024.0, 2), # MB
'cpu': cpu,
'systemCpu': scpu,
'rxBytes': rbytes,
'txBytes': tbytes,
'rxTcpBytes': rtcp,
'txTcpBytes': ttcp,
'fps': fps,
}
def continue_collect(self, f):
try:
headers = [
'time', 'package', 'pss', 'cpu', 'systemCpu', 'rxBytes',
'txBytes', 'rxTcpBytes', 'txTcpBytes', 'fps'
]
fcsv = csv.writer(f)
fcsv.writerow(headers)
update_time = time.time()
while not self._event.isSet():
perfdata = self.collect()
if self.debug:
print("DEBUG:", perfdata)
if not perfdata:
print("perf package is not alive:", self.package_name)
time.sleep(1)
continue
fcsv.writerow([perfdata[k] for k in headers])
wait_seconds = max(0,
self.interval - (time.time() - update_time))
time.sleep(wait_seconds)
update_time = time.time()
f.close()
finally:
self._condition.acquire()
self._th = None
self._condition.notify()
self._condition.release()
def start(self):
if sys.version_info.major < 3:
f = open(self.csv_output, "wb")
else:
f = open(self.csv_output, "w", newline='\n')
def defer_close():
if not f.closed:
f.close()
atexit.register(defer_close)
if self._th:
raise RuntimeError("perf is already running")
if not self.package_name:
raise EnvironmentError("package_name need to be set")
self._data.clear()
self._event = threading.Event()
self._condition = threading.Condition()
self._th = threading.Thread(target=self.continue_collect, args=(f, ))
self._th.daemon = True
self._th.start()
def stop(self):
self._event.set()
self._condition.acquire()
self._condition.wait(timeout=2)
self._condition.release()
if self.debug:
print("DEBUG: perf collect stopped")
def csv2images(self, src=None, target_dir='.'):
"""
Args:
src: csv file, default to perf record csv path
target_dir: images store dir
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import datetime
import os
import humanize
src = src or self.csv_output
if not os.path.exists(target_dir):
os.makedirs(target_dir)
data = pd.read_csv(src)
data['time'] = data['time'].apply(
lambda x: datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"))
timestr = time.strftime("%Y-%m-%d %H:%M")
# network
rx_str = humanize.naturalsize(data['rxBytes'].sum(), gnu=True)
tx_str = humanize.naturalsize(data['txBytes'].sum(), gnu=True)
plt.subplot(2, 1, 1)
plt.plot(data['time'], data['rxBytes'] / 1024, label='all')
plt.plot(data['time'], data['rxTcpBytes'] / 1024, 'r--', label='tcp')
plt.legend()
plt.title(
'\n'.join(
["Network", timestr,
'Recv %s, Send %s' % (rx_str, tx_str)]),
loc='left')
plt.gca().xaxis.set_major_formatter(ticker.NullFormatter())
plt.ylabel('Recv(KB)')
plt.ylim(ymin=0)
plt.subplot(2, 1, 2)
plt.plot(data['time'], data['txBytes'] / 1024, label='all')
plt.plot(data['time'], data['txTcpBytes'] / 1024, 'r--', label='tcp')
plt.legend()
plt.xlabel('Time')
plt.ylabel('Send(KB)')
plt.ylim(ymin=0)
plt.savefig(os.path.join(target_dir, "net.png"))
plt.clf()
plt.subplot(3, 1, 1)
plt.title(
'\n'.join(['Summary', timestr, self.package_name]), loc='left')
plt.plot(data['time'], data['pss'], '-')
plt.ylabel('PSS(MB)')
plt.gca().xaxis.set_major_formatter(ticker.NullFormatter())
plt.subplot(3, 1, 2)
plt.plot(data['time'], data['cpu'], '-')
plt.ylim(0, max(100, data['cpu'].max()))
plt.ylabel('CPU')
plt.gca().xaxis.set_major_formatter(ticker.NullFormatter())
plt.subplot(3, 1, 3)
plt.plot(data['time'], data['fps'], '-')
plt.ylabel('FPS')
plt.ylim(0, 60)
plt.xlabel('Time')
plt.savefig(os.path.join(target_dir, "summary.png"))
if __name__ == '__main__':
import uiautomator2 as u2
pkgname = "com.tencent.tmgp.sgame"
# pkgname = "com.netease.cloudmusic"
u2.plugin_register('perf', Perf, pkgname)
d = u2.connect("10.242.62.224")
print(d.current_app())
# print(d.ext_perf.netstat(5350))
# d.app_start(pkgname)
d.ext_perf.start()
d.ext_perf.debug = True
try:
time.sleep(500)
except KeyboardInterrupt:
d.ext_perf.stop()
d.ext_perf.csv2images()
print("threading stopped")
|
[
"atexit.register",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"time.strftime",
"uiautomator2.plugin_register",
"matplotlib.pyplot.gca",
"os.path.join",
"threading.Condition",
"os.path.exists",
"threading.Event",
"datetime.datetime.now",
"threading.Thread",
"csv.writer",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"uiautomator2.connect",
"time.sleep",
"datetime.datetime.strptime",
"matplotlib.pyplot.ylabel",
"re.compile",
"matplotlib.pyplot.subplot",
"os.makedirs",
"matplotlib.pyplot.plot",
"time.time",
"matplotlib.ticker.NullFormatter",
"matplotlib.pyplot.xlabel"
] |
[((217, 249), 're.compile', 're.compile', (['"""TOTAL[:\\\\s]+(\\\\d+)"""'], {}), "('TOTAL[:\\\\s]+(\\\\d+)')\n", (227, 249), False, 'import re\n'), ((11830, 11871), 'uiautomator2.plugin_register', 'u2.plugin_register', (['"""perf"""', 'Perf', 'pkgname'], {}), "('perf', Perf, pkgname)\n", (11848, 11871), True, 'import uiautomator2 as u2\n'), ((11881, 11908), 'uiautomator2.connect', 'u2.connect', (['"""10.242.62.224"""'], {}), "('10.242.62.224')\n", (11891, 11908), True, 'import uiautomator2 as u2\n'), ((951, 968), 'threading.Event', 'threading.Event', ([], {}), '()\n', (966, 968), False, 'import threading\n'), ((995, 1016), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (1014, 1016), False, 'import threading\n'), ((5706, 5717), 'time.time', 'time.time', ([], {}), '()\n', (5715, 5717), False, 'import time\n'), ((6444, 6455), 'time.time', 'time.time', ([], {}), '()\n', (6453, 6455), False, 'import time\n'), ((8606, 8634), 'atexit.register', 'atexit.register', (['defer_close'], {}), '(defer_close)\n', (8621, 8634), False, 'import atexit\n'), ((8864, 8881), 'threading.Event', 'threading.Event', ([], {}), '()\n', (8879, 8881), False, 'import threading\n'), ((8908, 8929), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (8927, 8929), False, 'import threading\n'), ((8949, 9006), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.continue_collect', 'args': '(f,)'}), '(target=self.continue_collect, args=(f,))\n', (8965, 9006), False, 'import threading\n'), ((9791, 9807), 'pandas.read_csv', 'pd.read_csv', (['src'], {}), '(src)\n', (9802, 9807), True, 'import pandas as pd\n'), ((9947, 9978), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M"""'], {}), "('%Y-%m-%d %H:%M')\n", (9960, 9978), False, 'import time\n'), ((10147, 10167), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (10158, 10167), True, 'import matplotlib.pyplot as plt\n'), ((10176, 10235), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "(data['rxBytes'] / 1024)"], {'label': '"""all"""'}), "(data['time'], data['rxBytes'] / 1024, label='all')\n", (10184, 10235), True, 'import matplotlib.pyplot as plt\n'), ((10244, 10313), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "(data['rxTcpBytes'] / 1024)", '"""r--"""'], {'label': '"""tcp"""'}), "(data['time'], data['rxTcpBytes'] / 1024, 'r--', label='tcp')\n", (10252, 10313), True, 'import matplotlib.pyplot as plt\n'), ((10322, 10334), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10332, 10334), True, 'import matplotlib.pyplot as plt\n'), ((10572, 10594), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Recv(KB)"""'], {}), "('Recv(KB)')\n", (10582, 10594), True, 'import matplotlib.pyplot as plt\n'), ((10603, 10619), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (10611, 10619), True, 'import matplotlib.pyplot as plt\n'), ((10629, 10649), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (10640, 10649), True, 'import matplotlib.pyplot as plt\n'), ((10658, 10717), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "(data['txBytes'] / 1024)"], {'label': '"""all"""'}), "(data['time'], data['txBytes'] / 1024, label='all')\n", (10666, 10717), True, 'import matplotlib.pyplot as plt\n'), ((10726, 10795), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "(data['txTcpBytes'] / 1024)", '"""r--"""'], {'label': '"""tcp"""'}), "(data['time'], data['txTcpBytes'] / 1024, 'r--', label='tcp')\n", (10734, 10795), True, 'import matplotlib.pyplot as plt\n'), ((10804, 10816), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10814, 10816), True, 'import matplotlib.pyplot as plt\n'), ((10825, 10843), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (10835, 10843), True, 'import matplotlib.pyplot as plt\n'), ((10852, 10874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Send(KB)"""'], {}), "('Send(KB)')\n", (10862, 10874), True, 'import matplotlib.pyplot as plt\n'), ((10883, 10899), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (10891, 10899), True, 'import matplotlib.pyplot as plt\n'), ((10965, 10974), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10972, 10974), True, 'import matplotlib.pyplot as plt\n'), ((10984, 11004), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (10995, 11004), True, 'import matplotlib.pyplot as plt\n'), ((11109, 11149), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "data['pss']", '"""-"""'], {}), "(data['time'], data['pss'], '-')\n", (11117, 11149), True, 'import matplotlib.pyplot as plt\n'), ((11158, 11179), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PSS(MB)"""'], {}), "('PSS(MB)')\n", (11168, 11179), True, 'import matplotlib.pyplot as plt\n'), ((11257, 11277), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (11268, 11277), True, 'import matplotlib.pyplot as plt\n'), ((11286, 11326), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "data['cpu']", '"""-"""'], {}), "(data['time'], data['cpu'], '-')\n", (11294, 11326), True, 'import matplotlib.pyplot as plt\n'), ((11384, 11401), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CPU"""'], {}), "('CPU')\n", (11394, 11401), True, 'import matplotlib.pyplot as plt\n'), ((11479, 11499), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (11490, 11499), True, 'import matplotlib.pyplot as plt\n'), ((11508, 11548), 'matplotlib.pyplot.plot', 'plt.plot', (["data['time']", "data['fps']", '"""-"""'], {}), "(data['time'], data['fps'], '-')\n", (11516, 11548), True, 'import matplotlib.pyplot as plt\n'), ((11557, 11574), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FPS"""'], {}), "('FPS')\n", (11567, 11574), True, 'import matplotlib.pyplot as plt\n'), ((11583, 11598), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(60)'], {}), '(0, 60)\n', (11591, 11598), True, 'import matplotlib.pyplot as plt\n'), ((11607, 11625), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (11617, 11625), True, 'import matplotlib.pyplot as plt\n'), ((12069, 12084), 'time.sleep', 'time.sleep', (['(500)'], {}), '(500)\n', (12079, 12084), False, 'import time\n'), ((2662, 2677), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2672, 2677), False, 'import time\n'), ((6302, 6313), 'time.time', 'time.time', ([], {}), '()\n', (6311, 6313), False, 'import time\n'), ((7475, 7488), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (7485, 7488), False, 'import csv\n'), ((7550, 7561), 'time.time', 'time.time', ([], {}), '()\n', (7559, 7561), False, 'import time\n'), ((9712, 9738), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (9726, 9738), False, 'import os\n'), ((9752, 9775), 'os.makedirs', 'os.makedirs', (['target_dir'], {}), '(target_dir)\n', (9763, 9775), False, 'import os\n'), ((10540, 10562), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (10560, 10562), True, 'import matplotlib.ticker as ticker\n'), ((10920, 10955), 'os.path.join', 'os.path.join', (['target_dir', '"""net.png"""'], {}), "(target_dir, 'net.png')\n", (10932, 10955), False, 'import os\n'), ((11224, 11246), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (11244, 11246), True, 'import matplotlib.ticker as ticker\n'), ((11446, 11468), 'matplotlib.ticker.NullFormatter', 'ticker.NullFormatter', ([], {}), '()\n', (11466, 11468), True, 'import matplotlib.ticker as ticker\n'), ((11646, 11685), 'os.path.join', 'os.path.join', (['target_dir', '"""summary.png"""'], {}), "(target_dir, 'summary.png')\n", (11658, 11685), False, 'import os\n'), ((8091, 8115), 'time.sleep', 'time.sleep', (['wait_seconds'], {}), '(wait_seconds)\n', (8101, 8115), False, 'import time\n'), ((8146, 8157), 'time.time', 'time.time', ([], {}), '()\n', (8155, 8157), False, 'import time\n'), ((9873, 9926), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(x, '%Y-%m-%d %H:%M:%S.%f')\n", (9899, 9926), False, 'import datetime\n'), ((6824, 6847), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6845, 6847), False, 'import datetime\n'), ((7852, 7865), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7862, 7865), False, 'import time\n'), ((10504, 10513), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10511, 10513), True, 'import matplotlib.pyplot as plt\n'), ((11188, 11197), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11195, 11197), True, 'import matplotlib.pyplot as plt\n'), ((11410, 11419), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11417, 11419), True, 'import matplotlib.pyplot as plt\n'), ((8047, 8058), 'time.time', 'time.time', ([], {}), '()\n', (8056, 8058), False, 'import time\n')]
|
"""Tests for distutils.filelist."""
import unittest
from distutils.filelist import glob_to_re, FileList
from test.support import captured_stdout, run_unittest
from distutils import debug
class FileListTestCase(unittest.TestCase):
def test_glob_to_re(self):
# simple cases
self.assertEqual(glob_to_re('foo*'), 'foo[^/]*\\Z(?ms)')
self.assertEqual(glob_to_re('foo?'), 'foo[^/]\\Z(?ms)')
self.assertEqual(glob_to_re('foo??'), 'foo[^/][^/]\\Z(?ms)')
# special cases
self.assertEqual(glob_to_re(r'foo\\*'), r'foo\\\\[^/]*\Z(?ms)')
self.assertEqual(glob_to_re(r'foo\\\*'), r'foo\\\\\\[^/]*\Z(?ms)')
self.assertEqual(glob_to_re('foo????'), r'foo[^/][^/][^/][^/]\Z(?ms)')
self.assertEqual(glob_to_re(r'foo\\??'), r'foo\\\\[^/][^/]\Z(?ms)')
def test_debug_print(self):
file_list = FileList()
with captured_stdout() as stdout:
file_list.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), '')
debug.DEBUG = True
try:
with captured_stdout() as stdout:
file_list.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), 'xxx\n')
finally:
debug.DEBUG = False
def test_suite():
return unittest.makeSuite(FileListTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
[
"distutils.filelist.glob_to_re",
"test.support.captured_stdout",
"unittest.makeSuite",
"distutils.filelist.FileList"
] |
[((1317, 1353), 'unittest.makeSuite', 'unittest.makeSuite', (['FileListTestCase'], {}), '(FileListTestCase)\n', (1335, 1353), False, 'import unittest\n'), ((865, 875), 'distutils.filelist.FileList', 'FileList', ([], {}), '()\n', (873, 875), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((312, 330), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo*"""'], {}), "('foo*')\n", (322, 330), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((377, 395), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo?"""'], {}), "('foo?')\n", (387, 395), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((441, 460), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo??"""'], {}), "('foo??')\n", (451, 460), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((535, 557), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo\\\\\\\\*"""'], {}), "('foo\\\\\\\\*')\n", (545, 557), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((607, 631), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo\\\\\\\\\\\\*"""'], {}), "('foo\\\\\\\\\\\\*')\n", (617, 631), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((682, 703), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo????"""'], {}), "('foo????')\n", (692, 703), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((761, 784), 'distutils.filelist.glob_to_re', 'glob_to_re', (['"""foo\\\\\\\\??"""'], {}), "('foo\\\\\\\\??')\n", (771, 784), False, 'from distutils.filelist import glob_to_re, FileList\n'), ((889, 906), 'test.support.captured_stdout', 'captured_stdout', ([], {}), '()\n', (904, 906), False, 'from test.support import captured_stdout, run_unittest\n'), ((1084, 1101), 'test.support.captured_stdout', 'captured_stdout', ([], {}), '()\n', (1099, 1101), False, 'from test.support import captured_stdout, run_unittest\n')]
|
#!/usr/bin/env python3
import sys
# import osgeo.utils.ogrmerge as a convenience to use as a script
from osgeo.utils.ogrmerge import * # noqa
from osgeo.utils.ogrmerge import main
from osgeo.gdal import deprecation_warn
deprecation_warn('ogrmerge', 'utils')
sys.exit(main(sys.argv))
|
[
"osgeo.utils.ogrmerge.main",
"osgeo.gdal.deprecation_warn"
] |
[((224, 261), 'osgeo.gdal.deprecation_warn', 'deprecation_warn', (['"""ogrmerge"""', '"""utils"""'], {}), "('ogrmerge', 'utils')\n", (240, 261), False, 'from osgeo.gdal import deprecation_warn\n'), ((271, 285), 'osgeo.utils.ogrmerge.main', 'main', (['sys.argv'], {}), '(sys.argv)\n', (275, 285), False, 'from osgeo.utils.ogrmerge import main\n')]
|
import argparse
from debug import debug_assert
from accounting import Ledger, data_path
parser = argparse.ArgumentParser(description="For a certain ledger, performs a mapping of transactions into derived accounts")
parser.add_argument("--ledger", nargs=1, required=True, help="The ledger the accounts are mapped in", metavar="<Ledger Name>", dest="ledger_name")
arguments = parser.parse_args()
debug_assert(isinstance(arguments.ledger_name, list) and len(arguments.ledger_name) == 1)
ledger_name = arguments.ledger_name[0]
ledger_data_path = data_path.joinpath(ledger_name)
if not ledger_data_path.exists() :
assert(f"The ledger {ledger_name} does not exist!")
ledger = Ledger(ledger_data_path)
ledger.map_spending_accounts()
|
[
"accounting.Ledger",
"accounting.data_path.joinpath",
"argparse.ArgumentParser"
] |
[((99, 226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""For a certain ledger, performs a mapping of transactions into derived accounts"""'}), "(description=\n 'For a certain ledger, performs a mapping of transactions into derived accounts'\n )\n", (122, 226), False, 'import argparse\n'), ((547, 578), 'accounting.data_path.joinpath', 'data_path.joinpath', (['ledger_name'], {}), '(ledger_name)\n', (565, 578), False, 'from accounting import Ledger, data_path\n'), ((680, 704), 'accounting.Ledger', 'Ledger', (['ledger_data_path'], {}), '(ledger_data_path)\n', (686, 704), False, 'from accounting import Ledger, data_path\n')]
|
# !!! Change: this is a new file
import os
import pathlib
import random
import time
import pprint
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from utils.conv_type import FixedSubnetConv, SampleSubnetConv
from utils.logging import AverageMeter, ProgressMeter
from utils.net_utils import (
set_model_prune_rate,
freeze_model_weights,
save_checkpoint,
get_lr,
LabelSmoothing,
)
from utils.schedulers import get_policy
from utils.feature_extractor import FeatureExtractor
from args import args
import importlib
import data
import models
import numpy as np
def main():
print(args)
# Simply call main_worker function
main_worker(args)
def main_worker(args):
args.gpu = None
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model and optimizer
model = get_model(args)
model = set_gpu(args, model)
if args.pretrained:
pretrained(args, model)
data = get_dataset(args)
output_path = args.pretrained + "_activations"
# setup feature extractor
feature_extractor = FeatureExtractor(model.module)
print(model.module)
target_layers = feature_extractor.parse_default_layers()
target_types = feature_extractor.parse_type("relu")
feature_extractor.append_target_layers(target_layers, target_types)
# print(feature_extractor.module_dict)
print(feature_extractor.target_outputs.keys())
predicate(data.val_loader, feature_extractor, output_path)
def predicate(data_loader, feature_extractor, output_path=None):
batch_time = AverageMeter("Time", ":6.3f", write_val=False)
model = feature_extractor.model
outputs_dict = dict()
# switch to evaluate mode
model.eval()
with torch.no_grad():
toc = time.time()
for batch_ind, (input, _) in enumerate(data_loader):
input = input.cuda(non_blocking=True)
# forward to get intermediate outputs
_ = model(input)
# synchronize so that everything is calculated
torch.cuda.synchronize()
# print(feature_extractor.target_outputs)
for target_layer, target_output in feature_extractor.target_outputs.items():
if target_layer in outputs_dict:
outputs_dict[target_layer].append(target_output.data.numpy())
else:
outputs_dict[target_layer] = [target_output.data.numpy()]
# measure elapsed time
batch_time.update(time.time() - toc)
toc = time.time()
if batch_ind % 10 == 0:
print('Predicate: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
batch_ind, len(data_loader), batch_time=batch_time))
if output_path is not None:
# def _squeeze_dict(d):
# for key, val in d.items():
# d[key] = np.concatenate(val, 0)
# return d
# outputs_dict = _squeeze_dict(outputs_dict)
# np.savez_compressed(output_path, **outputs_dict)
for key, val in outputs_dict.items():
np.save(output_path + '_' + key, np.concatenate(val, 0))
print(key, 'saved')
def set_gpu(args, model):
assert torch.cuda.is_available(), "CPU-only experiments currently unsupported"
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif args.multigpu is None:
device = torch.device("cpu")
else:
# DataParallel will divide and allocate batch_size to all available GPUs
print(f"=> Parallelizing on {args.multigpu} gpus")
torch.cuda.set_device(args.multigpu[0])
args.gpu = args.multigpu[0]
model = torch.nn.DataParallel(model, device_ids=args.multigpu).cuda(
args.multigpu[0]
)
cudnn.benchmark = True
return model
def pretrained(args, model):
if os.path.isfile(args.pretrained):
print("=> loading pretrained weights from '{}'".format(args.pretrained))
pretrained = torch.load(
args.pretrained,
map_location=torch.device("cuda:{}".format(args.multigpu[0])),
)["state_dict"]
model_state_dict = model.state_dict()
for k, v in pretrained.items():
if k not in model_state_dict or v.size() != model_state_dict[k].size():
print("IGNORE:", k)
pretrained = {
k: v
for k, v in pretrained.items()
if (k in model_state_dict and v.size() == model_state_dict[k].size())
}
model_state_dict.update(pretrained)
model.load_state_dict(model_state_dict)
else:
print("=> no pretrained weights found at '{}'".format(args.pretrained))
for n, m in model.named_modules():
if isinstance(m, FixedSubnetConv):
m.set_subnet()
def get_dataset(args):
print(f"=> Getting {args.set} dataset")
dataset = getattr(data, args.set)(args)
return dataset
def get_model(args):
if args.first_layer_dense:
args.first_layer_type = "DenseConv"
print("=> Creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# applying sparsity to the network
if (
args.conv_type != "DenseConv"
and args.conv_type != "SampleSubnetConv"
and args.conv_type != "ContinuousSparseConv"
):
if args.prune_rate < 0:
raise ValueError("Need to set a positive prune rate")
set_model_prune_rate(model, prune_rate=args.prune_rate)
print(
f"=> Rough estimate model params {sum(int(p.numel() * (1-args.prune_rate)) for n, p in model.named_parameters() if not n.endswith('scores'))}"
)
# freezing the weights if we are only doing subnet training
if args.freeze_weights:
freeze_model_weights(model)
return model
if __name__ == "__main__":
main()
|
[
"torch.cuda.synchronize",
"utils.net_utils.set_model_prune_rate",
"utils.net_utils.freeze_model_weights",
"time.time",
"os.path.isfile",
"utils.logging.AverageMeter",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.set_device",
"torch.nn.DataParallel",
"torch.no_grad",
"numpy.concatenate",
"utils.feature_extractor.FeatureExtractor"
] |
[((1274, 1304), 'utils.feature_extractor.FeatureExtractor', 'FeatureExtractor', (['model.module'], {}), '(model.module)\n', (1290, 1304), False, 'from utils.feature_extractor import FeatureExtractor\n'), ((1761, 1807), 'utils.logging.AverageMeter', 'AverageMeter', (['"""Time"""', '""":6.3f"""'], {'write_val': '(False)'}), "('Time', ':6.3f', write_val=False)\n", (1773, 1807), False, 'from utils.logging import AverageMeter, ProgressMeter\n'), ((3476, 3501), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3499, 3501), False, 'import torch\n'), ((4158, 4189), 'os.path.isfile', 'os.path.isfile', (['args.pretrained'], {}), '(args.pretrained)\n', (4172, 4189), False, 'import os\n'), ((1928, 1943), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1941, 1943), False, 'import torch\n'), ((1959, 1970), 'time.time', 'time.time', ([], {}), '()\n', (1968, 1970), False, 'import time\n'), ((3586, 3617), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (3607, 3617), False, 'import torch\n'), ((5738, 5793), 'utils.net_utils.set_model_prune_rate', 'set_model_prune_rate', (['model'], {'prune_rate': 'args.prune_rate'}), '(model, prune_rate=args.prune_rate)\n', (5758, 5793), False, 'from utils.net_utils import set_model_prune_rate, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((6075, 6102), 'utils.net_utils.freeze_model_weights', 'freeze_model_weights', (['model'], {}), '(model)\n', (6095, 6102), False, 'from utils.net_utils import set_model_prune_rate, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((2234, 2258), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2256, 2258), False, 'import torch\n'), ((2737, 2748), 'time.time', 'time.time', ([], {}), '()\n', (2746, 2748), False, 'import time\n'), ((3704, 3723), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3716, 3723), False, 'import torch\n'), ((3882, 3921), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.multigpu[0]'], {}), '(args.multigpu[0])\n', (3903, 3921), False, 'import torch\n'), ((3381, 3403), 'numpy.concatenate', 'np.concatenate', (['val', '(0)'], {}), '(val, 0)\n', (3395, 3403), True, 'import numpy as np\n'), ((2700, 2711), 'time.time', 'time.time', ([], {}), '()\n', (2709, 2711), False, 'import time\n'), ((3974, 4028), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'args.multigpu'}), '(model, device_ids=args.multigpu)\n', (3995, 4028), False, 'import torch\n')]
|
import networkx as nx
import numpy as np
from pyquil.api import QPUCompiler
from pyquil.gates import H, RY, CZ, CNOT, MEASURE
from pyquil.quil import Program
from pyquil.quilbase import Pragma
from forest.benchmarking.compilation import basic_compile
def create_ghz_program(tree: nx.DiGraph):
"""
Create a Bell/GHZ state with CNOTs described by tree.
:param tree: A tree that describes the CNOTs to perform to create a bell/GHZ state.
:return: the program
"""
assert nx.is_tree(tree), 'Needs to be a tree'
nodes = list(nx.topological_sort(tree))
n_qubits = len(nodes)
program = Program(H(nodes[0]))
for node in nodes:
for child in tree.successors(node):
program += CNOT(node, child)
ro = program.declare('ro', 'BIT', n_qubits)
for i, q in enumerate(nodes):
program += MEASURE(q, ro[i])
return program
def ghz_state_statistics(bitstrings):
"""
Compute statistics bitstrings sampled from a Bell/GHZ state
:param bitstrings: An array of bitstrings
:return: A dictionary where bell = number of bitstrings consistent with a bell/GHZ state;
total = total number of bitstrings.
"""
bitstrings = np.asarray(bitstrings)
bell = np.sum(np.logical_or(np.all(bitstrings == 0, axis=1),
np.all(bitstrings == 1, axis=1)))
total = len(bitstrings)
return {
'bell': int(bell),
'total': int(total),
}
def create_graph_state(graph: nx.Graph, use_pragmas=False):
"""
Write a program to create a graph state according to the specified graph
A graph state involves Hadamarding all your qubits and then applying a CZ for each
edge in the graph. A graph state and the ability to measure it however you want gives
you universal quantum computation. Some good references are [MBQC]_ and [MBCS]_.
Similar to a Bell state / GHZ state, we can try to prepare a graph state and measure
how well we've done according to expected parities.
.. [MBQC] A One-Way Quantum Computer.
<NAME>.
Phys. Rev. Lett. 86, 5188 (2001).
https://doi.org/10.1103/PhysRevLett.86.5188
https://arxiv.org/abs/quant-ph/0010033
.. [MBCS] Measurement-based quantum computation with cluster states.
<NAME>.
Phys. Rev. A 68, 022312 (2003).
https://dx.doi.org/10.1103/PhysRevA.68.022312
https://arxiv.org/abs/quant-ph/0301052
:param graph: The graph. Nodes are used as arguments to gates, so they should be qubit-like.
:param use_pragmas: Use COMMUTING_BLOCKS pragmas to hint at the compiler
:return: A program that constructs a graph state.
"""
program = Program()
for q in graph.nodes:
program += H(q)
if use_pragmas:
program += Pragma('COMMUTING_BLOCKS')
for a, b in graph.edges:
if use_pragmas:
program += Pragma('BLOCK')
program += CZ(a, b)
if use_pragmas:
program += Pragma('END_BLOCK')
if use_pragmas:
program += Pragma('END_COMMUTING_BLOCKS')
return program
def measure_graph_state(graph: nx.Graph, focal_node: int):
"""
Given a graph state, measure a focal node and its neighbors with a particular measurement
angle.
:param graph: The graph state graph. This is needed to figure out what the neighbors are
:param focal_node: The node in the graph to serve as the focus. The focal node is measured
at an angle and all its neighbors are measured in the Z basis
:return: Program, list of classical offsets into the ``ro`` register.
"""
program = Program()
theta = program.declare('theta', 'REAL')
program += RY(theta, focal_node)
neighbors = sorted(graph[focal_node])
ro = program.declare('ro', 'BIT', len(neighbors) + 1)
program += MEASURE(focal_node, ro[0])
for i, neighbor in enumerate(neighbors):
program += MEASURE(neighbor, ro[i + 1])
classical_addresses = list(range(len(neighbors) + 1))
return program, classical_addresses
def compiled_parametric_graph_state(compiler: QPUCompiler, graph: nx.Graph, focal_node: int,
num_shots: int = 1000):
"""
Construct a program to create and measure a graph state, map it to qubits using ``addressing``,
and compile to an ISA.
Hackily implement a parameterized program by compiling a program with a particular angle,
finding where that angle appears in the results, and replacing it with ``"{angle}"`` so
the resulting compiled program can be run many times by using python's str.format method.
:param graph: A networkx graph defining the graph state
:param focal_node: The node of the graph to measure
:param compiler: The compiler to do the compiling.
:param num_shots: The number of shots to take when measuring the graph state.
:return: an executable that constructs and measures a graph state.
"""
program = create_graph_state(graph)
measure_prog, c_addrs = measure_graph_state(graph, focal_node)
program += measure_prog
program.wrap_in_numshots_loop(num_shots)
nq_program = basic_compile(program)
executable = compiler.native_quil_to_executable(nq_program)
return executable
|
[
"pyquil.quilbase.Pragma",
"pyquil.gates.CZ",
"pyquil.gates.MEASURE",
"numpy.asarray",
"pyquil.gates.H",
"networkx.topological_sort",
"pyquil.gates.RY",
"forest.benchmarking.compilation.basic_compile",
"pyquil.gates.CNOT",
"pyquil.quil.Program",
"networkx.is_tree",
"numpy.all"
] |
[((495, 511), 'networkx.is_tree', 'nx.is_tree', (['tree'], {}), '(tree)\n', (505, 511), True, 'import networkx as nx\n'), ((1210, 1232), 'numpy.asarray', 'np.asarray', (['bitstrings'], {}), '(bitstrings)\n', (1220, 1232), True, 'import numpy as np\n'), ((2729, 2738), 'pyquil.quil.Program', 'Program', ([], {}), '()\n', (2736, 2738), False, 'from pyquil.quil import Program\n'), ((3662, 3671), 'pyquil.quil.Program', 'Program', ([], {}), '()\n', (3669, 3671), False, 'from pyquil.quil import Program\n'), ((3732, 3753), 'pyquil.gates.RY', 'RY', (['theta', 'focal_node'], {}), '(theta, focal_node)\n', (3734, 3753), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((3871, 3897), 'pyquil.gates.MEASURE', 'MEASURE', (['focal_node', 'ro[0]'], {}), '(focal_node, ro[0])\n', (3878, 3897), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((5191, 5213), 'forest.benchmarking.compilation.basic_compile', 'basic_compile', (['program'], {}), '(program)\n', (5204, 5213), False, 'from forest.benchmarking.compilation import basic_compile\n'), ((551, 576), 'networkx.topological_sort', 'nx.topological_sort', (['tree'], {}), '(tree)\n', (570, 576), True, 'import networkx as nx\n'), ((626, 637), 'pyquil.gates.H', 'H', (['nodes[0]'], {}), '(nodes[0])\n', (627, 637), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((850, 867), 'pyquil.gates.MEASURE', 'MEASURE', (['q', 'ro[i]'], {}), '(q, ro[i])\n', (857, 867), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((2784, 2788), 'pyquil.gates.H', 'H', (['q'], {}), '(q)\n', (2785, 2788), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((2829, 2855), 'pyquil.quilbase.Pragma', 'Pragma', (['"""COMMUTING_BLOCKS"""'], {}), "('COMMUTING_BLOCKS')\n", (2835, 2855), False, 'from pyquil.quilbase import Pragma\n'), ((2967, 2975), 'pyquil.gates.CZ', 'CZ', (['a', 'b'], {}), '(a, b)\n', (2969, 2975), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((3082, 3112), 'pyquil.quilbase.Pragma', 'Pragma', (['"""END_COMMUTING_BLOCKS"""'], {}), "('END_COMMUTING_BLOCKS')\n", (3088, 3112), False, 'from pyquil.quilbase import Pragma\n'), ((3962, 3990), 'pyquil.gates.MEASURE', 'MEASURE', (['neighbor', 'ro[i + 1]'], {}), '(neighbor, ro[i + 1])\n', (3969, 3990), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((730, 747), 'pyquil.gates.CNOT', 'CNOT', (['node', 'child'], {}), '(node, child)\n', (734, 747), False, 'from pyquil.gates import H, RY, CZ, CNOT, MEASURE\n'), ((1265, 1296), 'numpy.all', 'np.all', (['(bitstrings == 0)'], {'axis': '(1)'}), '(bitstrings == 0, axis=1)\n', (1271, 1296), True, 'import numpy as np\n'), ((1330, 1361), 'numpy.all', 'np.all', (['(bitstrings == 1)'], {'axis': '(1)'}), '(bitstrings == 1, axis=1)\n', (1336, 1361), True, 'import numpy as np\n'), ((2932, 2947), 'pyquil.quilbase.Pragma', 'Pragma', (['"""BLOCK"""'], {}), "('BLOCK')\n", (2938, 2947), False, 'from pyquil.quilbase import Pragma\n'), ((3023, 3042), 'pyquil.quilbase.Pragma', 'Pragma', (['"""END_BLOCK"""'], {}), "('END_BLOCK')\n", (3029, 3042), False, 'from pyquil.quilbase import Pragma\n')]
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Tests for the Event implementation (event.py)
#
# Copyright (C) 2017 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import bb
import logging
import bb.compat
import bb.event
import importlib
import threading
import time
import pickle
from unittest.mock import Mock
from unittest.mock import call
class EventQueueStub():
""" Class used as specification for UI event handler queue stub objects """
def __init__(self):
return
def send(self, event):
return
class PickleEventQueueStub():
""" Class used as specification for UI event handler queue stub objects
with sendpickle method """
def __init__(self):
return
def sendpickle(self, pickled_event):
return
class UIClientStub():
""" Class used as specification for UI event handler stub objects """
def __init__(self):
self.event = None
class EventHandlingTest(unittest.TestCase):
""" Event handling test class """
_threadlock_test_calls = []
def setUp(self):
self._test_process = Mock()
ui_client1 = UIClientStub()
ui_client2 = UIClientStub()
self._test_ui1 = Mock(wraps=ui_client1)
self._test_ui2 = Mock(wraps=ui_client2)
importlib.reload(bb.event)
def _create_test_handlers(self):
""" Method used to create a test handler ordered dictionary """
test_handlers = bb.compat.OrderedDict()
test_handlers["handler1"] = self._test_process.handler1
test_handlers["handler2"] = self._test_process.handler2
return test_handlers
def test_class_handlers(self):
""" Test set_class_handlers and get_class_handlers methods """
test_handlers = self._create_test_handlers()
bb.event.set_class_handlers(test_handlers)
self.assertEqual(test_handlers,
bb.event.get_class_handlers())
def test_handlers(self):
""" Test set_handlers and get_handlers """
test_handlers = self._create_test_handlers()
bb.event.set_handlers(test_handlers)
self.assertEqual(test_handlers,
bb.event.get_handlers())
def test_clean_class_handlers(self):
""" Test clean_class_handlers method """
cleanDict = bb.compat.OrderedDict()
self.assertEqual(cleanDict,
bb.event.clean_class_handlers())
def test_register(self):
""" Test register method for class handlers """
result = bb.event.register("handler", self._test_process.handler)
self.assertEqual(result, bb.event.Registered)
handlers_dict = bb.event.get_class_handlers()
self.assertIn("handler", handlers_dict)
def test_already_registered(self):
""" Test detection of an already registed class handler """
bb.event.register("handler", self._test_process.handler)
handlers_dict = bb.event.get_class_handlers()
self.assertIn("handler", handlers_dict)
result = bb.event.register("handler", self._test_process.handler)
self.assertEqual(result, bb.event.AlreadyRegistered)
def test_register_from_string(self):
""" Test register method receiving code in string """
result = bb.event.register("string_handler", " return True")
self.assertEqual(result, bb.event.Registered)
handlers_dict = bb.event.get_class_handlers()
self.assertIn("string_handler", handlers_dict)
def test_register_with_mask(self):
""" Test register method with event masking """
mask = ["bb.event.OperationStarted",
"bb.event.OperationCompleted"]
result = bb.event.register("event_handler",
self._test_process.event_handler,
mask)
self.assertEqual(result, bb.event.Registered)
handlers_dict = bb.event.get_class_handlers()
self.assertIn("event_handler", handlers_dict)
def test_remove(self):
""" Test remove method for class handlers """
test_handlers = self._create_test_handlers()
bb.event.set_class_handlers(test_handlers)
count = len(test_handlers)
bb.event.remove("handler1", None)
test_handlers = bb.event.get_class_handlers()
self.assertEqual(len(test_handlers), count - 1)
with self.assertRaises(KeyError):
bb.event.remove("handler1", None)
def test_execute_handler(self):
""" Test execute_handler method for class handlers """
mask = ["bb.event.OperationProgress"]
result = bb.event.register("event_handler",
self._test_process.event_handler,
mask)
self.assertEqual(result, bb.event.Registered)
event = bb.event.OperationProgress(current=10, total=100)
bb.event.execute_handler("event_handler",
self._test_process.event_handler,
event,
None)
self._test_process.event_handler.assert_called_once_with(event)
def test_fire_class_handlers(self):
""" Test fire_class_handlers method """
mask = ["bb.event.OperationStarted"]
result = bb.event.register("event_handler1",
self._test_process.event_handler1,
mask)
self.assertEqual(result, bb.event.Registered)
result = bb.event.register("event_handler2",
self._test_process.event_handler2,
"*")
self.assertEqual(result, bb.event.Registered)
event1 = bb.event.OperationStarted()
event2 = bb.event.OperationCompleted(total=123)
bb.event.fire_class_handlers(event1, None)
bb.event.fire_class_handlers(event2, None)
bb.event.fire_class_handlers(event2, None)
expected_event_handler1 = [call(event1)]
expected_event_handler2 = [call(event1),
call(event2),
call(event2)]
self.assertEqual(self._test_process.event_handler1.call_args_list,
expected_event_handler1)
self.assertEqual(self._test_process.event_handler2.call_args_list,
expected_event_handler2)
def test_change_handler_event_mapping(self):
""" Test changing the event mapping for class handlers """
event1 = bb.event.OperationStarted()
event2 = bb.event.OperationCompleted(total=123)
# register handler for all events
result = bb.event.register("event_handler1",
self._test_process.event_handler1,
"*")
self.assertEqual(result, bb.event.Registered)
bb.event.fire_class_handlers(event1, None)
bb.event.fire_class_handlers(event2, None)
expected = [call(event1), call(event2)]
self.assertEqual(self._test_process.event_handler1.call_args_list,
expected)
# unregister handler and register it only for OperationStarted
result = bb.event.remove("event_handler1",
self._test_process.event_handler1)
mask = ["bb.event.OperationStarted"]
result = bb.event.register("event_handler1",
self._test_process.event_handler1,
mask)
self.assertEqual(result, bb.event.Registered)
bb.event.fire_class_handlers(event1, None)
bb.event.fire_class_handlers(event2, None)
expected = [call(event1), call(event2), call(event1)]
self.assertEqual(self._test_process.event_handler1.call_args_list,
expected)
# unregister handler and register it only for OperationCompleted
result = bb.event.remove("event_handler1",
self._test_process.event_handler1)
mask = ["bb.event.OperationCompleted"]
result = bb.event.register("event_handler1",
self._test_process.event_handler1,
mask)
self.assertEqual(result, bb.event.Registered)
bb.event.fire_class_handlers(event1, None)
bb.event.fire_class_handlers(event2, None)
expected = [call(event1), call(event2), call(event1), call(event2)]
self.assertEqual(self._test_process.event_handler1.call_args_list,
expected)
def test_register_UIHhandler(self):
""" Test register_UIHhandler method """
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
def test_UIHhandler_already_registered(self):
""" Test registering an UIHhandler already existing """
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 2)
def test_unregister_UIHhandler(self):
""" Test unregister_UIHhandler method """
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
result = bb.event.unregister_UIHhandler(1)
self.assertIs(result, None)
def test_fire_ui_handlers(self):
""" Test fire_ui_handlers method """
self._test_ui1.event = Mock(spec_set=EventQueueStub)
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
self.assertEqual(result, 2)
event1 = bb.event.OperationStarted()
bb.event.fire_ui_handlers(event1, None)
expected = [call(event1)]
self.assertEqual(self._test_ui1.event.send.call_args_list,
expected)
expected = [call(pickle.dumps(event1))]
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
expected)
def test_fire(self):
""" Test fire method used to trigger class and ui event handlers """
mask = ["bb.event.ConfigParsed"]
result = bb.event.register("event_handler1",
self._test_process.event_handler1,
mask)
self._test_ui1.event = Mock(spec_set=EventQueueStub)
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
event1 = bb.event.ConfigParsed()
bb.event.fire(event1, None)
expected = [call(event1)]
self.assertEqual(self._test_process.event_handler1.call_args_list,
expected)
self.assertEqual(self._test_ui1.event.send.call_args_list,
expected)
def test_fire_from_worker(self):
""" Test fire_from_worker method """
self._test_ui1.event = Mock(spec_set=EventQueueStub)
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
event1 = bb.event.ConfigParsed()
bb.event.fire_from_worker(event1, None)
expected = [call(event1)]
self.assertEqual(self._test_ui1.event.send.call_args_list,
expected)
def test_print_ui_queue(self):
""" Test print_ui_queue method """
event1 = bb.event.OperationStarted()
event2 = bb.event.OperationCompleted(total=123)
bb.event.fire(event1, None)
bb.event.fire(event2, None)
logger = logging.getLogger("BitBake")
logger.addHandler(bb.event.LogHandler())
logger.info("Test info LogRecord")
logger.warning("Test warning LogRecord")
with self.assertLogs("BitBake", level="INFO") as cm:
bb.event.print_ui_queue()
self.assertEqual(cm.output,
["INFO:BitBake:Test info LogRecord",
"WARNING:BitBake:Test warning LogRecord"])
def _set_threadlock_test_mockups(self):
""" Create UI event handler mockups used in enable and disable
threadlock tests """
def ui1_event_send(event):
if type(event) is bb.event.ConfigParsed:
self._threadlock_test_calls.append("w1_ui1")
if type(event) is bb.event.OperationStarted:
self._threadlock_test_calls.append("w2_ui1")
time.sleep(2)
def ui2_event_send(event):
if type(event) is bb.event.ConfigParsed:
self._threadlock_test_calls.append("w1_ui2")
if type(event) is bb.event.OperationStarted:
self._threadlock_test_calls.append("w2_ui2")
time.sleep(2)
self._threadlock_test_calls = []
self._test_ui1.event = EventQueueStub()
self._test_ui1.event.send = ui1_event_send
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
self.assertEqual(result, 1)
self._test_ui2.event = EventQueueStub()
self._test_ui2.event.send = ui2_event_send
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
self.assertEqual(result, 2)
def _set_and_run_threadlock_test_workers(self):
""" Create and run the workers used to trigger events in enable and
disable threadlock tests """
worker1 = threading.Thread(target=self._thread_lock_test_worker1)
worker2 = threading.Thread(target=self._thread_lock_test_worker2)
worker1.start()
time.sleep(1)
worker2.start()
worker1.join()
worker2.join()
def _thread_lock_test_worker1(self):
""" First worker used to fire the ConfigParsed event for enable and
disable threadlocks tests """
bb.event.fire(bb.event.ConfigParsed(), None)
def _thread_lock_test_worker2(self):
""" Second worker used to fire the OperationStarted event for enable
and disable threadlocks tests """
bb.event.fire(bb.event.OperationStarted(), None)
def test_enable_threadlock(self):
""" Test enable_threadlock method """
self._set_threadlock_test_mockups()
bb.event.enable_threadlock()
self._set_and_run_threadlock_test_workers()
# Calls to UI handlers should be in order as all the registered
# handlers for the event coming from the first worker should be
# called before processing the event from the second worker.
self.assertEqual(self._threadlock_test_calls,
["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
def test_disable_threadlock(self):
""" Test disable_threadlock method """
self._set_threadlock_test_mockups()
bb.event.disable_threadlock()
self._set_and_run_threadlock_test_workers()
# Calls to UI handlers should be intertwined together. Thanks to the
# delay in the registered handlers for the event coming from the first
# worker, the event coming from the second worker starts being
# processed before finishing handling the first worker event.
self.assertEqual(self._threadlock_test_calls,
["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
|
[
"bb.event.fire_ui_handlers",
"bb.event.unregister_UIHhandler",
"unittest.mock.call",
"bb.event.OperationStarted",
"bb.compat.OrderedDict",
"bb.event.clean_class_handlers",
"bb.event.fire_class_handlers",
"bb.event.fire_from_worker",
"bb.event.set_class_handlers",
"bb.event.register",
"bb.event.enable_threadlock",
"bb.event.ConfigParsed",
"bb.event.get_class_handlers",
"pickle.dumps",
"threading.Thread",
"bb.event.disable_threadlock",
"time.sleep",
"bb.event.get_handlers",
"bb.event.execute_handler",
"bb.event.LogHandler",
"bb.event.set_handlers",
"bb.event.register_UIHhandler",
"bb.event.print_ui_queue",
"bb.event.OperationCompleted",
"unittest.mock.Mock",
"bb.event.remove",
"bb.event.OperationProgress",
"importlib.reload",
"bb.event.fire",
"logging.getLogger"
] |
[((1771, 1777), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1775, 1777), False, 'from unittest.mock import Mock\n'), ((1875, 1897), 'unittest.mock.Mock', 'Mock', ([], {'wraps': 'ui_client1'}), '(wraps=ui_client1)\n', (1879, 1897), False, 'from unittest.mock import Mock\n'), ((1923, 1945), 'unittest.mock.Mock', 'Mock', ([], {'wraps': 'ui_client2'}), '(wraps=ui_client2)\n', (1927, 1945), False, 'from unittest.mock import Mock\n'), ((1954, 1980), 'importlib.reload', 'importlib.reload', (['bb.event'], {}), '(bb.event)\n', (1970, 1980), False, 'import importlib\n'), ((2115, 2138), 'bb.compat.OrderedDict', 'bb.compat.OrderedDict', ([], {}), '()\n', (2136, 2138), False, 'import bb\n'), ((2464, 2506), 'bb.event.set_class_handlers', 'bb.event.set_class_handlers', (['test_handlers'], {}), '(test_handlers)\n', (2491, 2506), False, 'import bb\n'), ((2745, 2781), 'bb.event.set_handlers', 'bb.event.set_handlers', (['test_handlers'], {}), '(test_handlers)\n', (2766, 2781), False, 'import bb\n'), ((2983, 3006), 'bb.compat.OrderedDict', 'bb.compat.OrderedDict', ([], {}), '()\n', (3004, 3006), False, 'import bb\n'), ((3204, 3260), 'bb.event.register', 'bb.event.register', (['"""handler"""', 'self._test_process.handler'], {}), "('handler', self._test_process.handler)\n", (3221, 3260), False, 'import bb\n'), ((3339, 3368), 'bb.event.get_class_handlers', 'bb.event.get_class_handlers', ([], {}), '()\n', (3366, 3368), False, 'import bb\n'), ((3533, 3589), 'bb.event.register', 'bb.event.register', (['"""handler"""', 'self._test_process.handler'], {}), "('handler', self._test_process.handler)\n", (3550, 3589), False, 'import bb\n'), ((3614, 3643), 'bb.event.get_class_handlers', 'bb.event.get_class_handlers', ([], {}), '()\n', (3641, 3643), False, 'import bb\n'), ((3709, 3765), 'bb.event.register', 'bb.event.register', (['"""handler"""', 'self._test_process.handler'], {}), "('handler', self._test_process.handler)\n", (3726, 3765), False, 'import bb\n'), ((3948, 4002), 'bb.event.register', 'bb.event.register', (['"""string_handler"""', '""" return True"""'], {}), "('string_handler', ' return True')\n", (3965, 4002), False, 'import bb\n'), ((4081, 4110), 'bb.event.get_class_handlers', 'bb.event.get_class_handlers', ([], {}), '()\n', (4108, 4110), False, 'import bb\n'), ((4371, 4445), 'bb.event.register', 'bb.event.register', (['"""event_handler"""', 'self._test_process.event_handler', 'mask'], {}), "('event_handler', self._test_process.event_handler, mask)\n", (4388, 4445), False, 'import bb\n'), ((4594, 4623), 'bb.event.get_class_handlers', 'bb.event.get_class_handlers', ([], {}), '()\n', (4621, 4623), False, 'import bb\n'), ((4821, 4863), 'bb.event.set_class_handlers', 'bb.event.set_class_handlers', (['test_handlers'], {}), '(test_handlers)\n', (4848, 4863), False, 'import bb\n'), ((4907, 4940), 'bb.event.remove', 'bb.event.remove', (['"""handler1"""', 'None'], {}), "('handler1', None)\n", (4922, 4940), False, 'import bb\n'), ((4965, 4994), 'bb.event.get_class_handlers', 'bb.event.get_class_handlers', ([], {}), '()\n', (4992, 4994), False, 'import bb\n'), ((5302, 5376), 'bb.event.register', 'bb.event.register', (['"""event_handler"""', 'self._test_process.event_handler', 'mask'], {}), "('event_handler', self._test_process.event_handler, mask)\n", (5319, 5376), False, 'import bb\n'), ((5517, 5566), 'bb.event.OperationProgress', 'bb.event.OperationProgress', ([], {'current': '(10)', 'total': '(100)'}), '(current=10, total=100)\n', (5543, 5566), False, 'import bb\n'), ((5575, 5667), 'bb.event.execute_handler', 'bb.event.execute_handler', (['"""event_handler"""', 'self._test_process.event_handler', 'event', 'None'], {}), "('event_handler', self._test_process.event_handler,\n event, None)\n", (5599, 5667), False, 'import bb\n'), ((5986, 6062), 'bb.event.register', 'bb.event.register', (['"""event_handler1"""', 'self._test_process.event_handler1', 'mask'], {}), "('event_handler1', self._test_process.event_handler1, mask)\n", (6003, 6062), False, 'import bb\n'), ((6204, 6279), 'bb.event.register', 'bb.event.register', (['"""event_handler2"""', 'self._test_process.event_handler2', '"""*"""'], {}), "('event_handler2', self._test_process.event_handler2, '*')\n", (6221, 6279), False, 'import bb\n'), ((6421, 6448), 'bb.event.OperationStarted', 'bb.event.OperationStarted', ([], {}), '()\n', (6446, 6448), False, 'import bb\n'), ((6466, 6504), 'bb.event.OperationCompleted', 'bb.event.OperationCompleted', ([], {'total': '(123)'}), '(total=123)\n', (6493, 6504), False, 'import bb\n'), ((6513, 6555), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event1', 'None'], {}), '(event1, None)\n', (6541, 6555), False, 'import bb\n'), ((6564, 6606), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event2', 'None'], {}), '(event2, None)\n', (6592, 6606), False, 'import bb\n'), ((6615, 6657), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event2', 'None'], {}), '(event2, None)\n', (6643, 6657), False, 'import bb\n'), ((7238, 7265), 'bb.event.OperationStarted', 'bb.event.OperationStarted', ([], {}), '()\n', (7263, 7265), False, 'import bb\n'), ((7283, 7321), 'bb.event.OperationCompleted', 'bb.event.OperationCompleted', ([], {'total': '(123)'}), '(total=123)\n', (7310, 7321), False, 'import bb\n'), ((7382, 7457), 'bb.event.register', 'bb.event.register', (['"""event_handler1"""', 'self._test_process.event_handler1', '"""*"""'], {}), "('event_handler1', self._test_process.event_handler1, '*')\n", (7399, 7457), False, 'import bb\n'), ((7590, 7632), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event1', 'None'], {}), '(event1, None)\n', (7618, 7632), False, 'import bb\n'), ((7641, 7683), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event2', 'None'], {}), '(event2, None)\n', (7669, 7683), False, 'import bb\n'), ((7931, 7999), 'bb.event.remove', 'bb.event.remove', (['"""event_handler1"""', 'self._test_process.event_handler1'], {}), "('event_handler1', self._test_process.event_handler1)\n", (7946, 7999), False, 'import bb\n'), ((8095, 8171), 'bb.event.register', 'bb.event.register', (['"""event_handler1"""', 'self._test_process.event_handler1', 'mask'], {}), "('event_handler1', self._test_process.event_handler1, mask)\n", (8112, 8171), False, 'import bb\n'), ((8304, 8346), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event1', 'None'], {}), '(event1, None)\n', (8332, 8346), False, 'import bb\n'), ((8355, 8397), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event2', 'None'], {}), '(event2, None)\n', (8383, 8397), False, 'import bb\n'), ((8661, 8729), 'bb.event.remove', 'bb.event.remove', (['"""event_handler1"""', 'self._test_process.event_handler1'], {}), "('event_handler1', self._test_process.event_handler1)\n", (8676, 8729), False, 'import bb\n'), ((8827, 8903), 'bb.event.register', 'bb.event.register', (['"""event_handler1"""', 'self._test_process.event_handler1', 'mask'], {}), "('event_handler1', self._test_process.event_handler1, mask)\n", (8844, 8903), False, 'import bb\n'), ((9036, 9078), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event1', 'None'], {}), '(event1, None)\n', (9064, 9078), False, 'import bb\n'), ((9087, 9129), 'bb.event.fire_class_handlers', 'bb.event.fire_class_handlers', (['event2', 'None'], {}), '(event2, None)\n', (9115, 9129), False, 'import bb\n'), ((9422, 9479), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (9450, 9479), False, 'import bb\n'), ((9648, 9705), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (9676, 9705), False, 'import bb\n'), ((9759, 9816), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (9787, 9816), False, 'import bb\n'), ((9963, 10020), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (9991, 10020), False, 'import bb\n'), ((10074, 10107), 'bb.event.unregister_UIHhandler', 'bb.event.unregister_UIHhandler', (['(1)'], {}), '(1)\n', (10104, 10107), False, 'import bb\n'), ((10258, 10287), 'unittest.mock.Mock', 'Mock', ([], {'spec_set': 'EventQueueStub'}), '(spec_set=EventQueueStub)\n', (10262, 10287), False, 'from unittest.mock import Mock\n'), ((10305, 10362), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (10333, 10362), False, 'import bb\n'), ((10430, 10465), 'unittest.mock.Mock', 'Mock', ([], {'spec_set': 'PickleEventQueueStub'}), '(spec_set=PickleEventQueueStub)\n', (10434, 10465), False, 'from unittest.mock import Mock\n'), ((10483, 10540), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui2'], {'mainui': '(True)'}), '(self._test_ui2, mainui=True)\n', (10511, 10540), False, 'import bb\n'), ((10594, 10621), 'bb.event.OperationStarted', 'bb.event.OperationStarted', ([], {}), '()\n', (10619, 10621), False, 'import bb\n'), ((10630, 10669), 'bb.event.fire_ui_handlers', 'bb.event.fire_ui_handlers', (['event1', 'None'], {}), '(event1, None)\n', (10655, 10669), False, 'import bb\n'), ((11123, 11199), 'bb.event.register', 'bb.event.register', (['"""event_handler1"""', 'self._test_process.event_handler1', 'mask'], {}), "('event_handler1', self._test_process.event_handler1, mask)\n", (11140, 11199), False, 'import bb\n'), ((11302, 11331), 'unittest.mock.Mock', 'Mock', ([], {'spec_set': 'EventQueueStub'}), '(spec_set=EventQueueStub)\n', (11306, 11331), False, 'from unittest.mock import Mock\n'), ((11349, 11406), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (11377, 11406), False, 'import bb\n'), ((11461, 11484), 'bb.event.ConfigParsed', 'bb.event.ConfigParsed', ([], {}), '()\n', (11482, 11484), False, 'import bb\n'), ((11493, 11520), 'bb.event.fire', 'bb.event.fire', (['event1', 'None'], {}), '(event1, None)\n', (11506, 11520), False, 'import bb\n'), ((11881, 11910), 'unittest.mock.Mock', 'Mock', ([], {'spec_set': 'EventQueueStub'}), '(spec_set=EventQueueStub)\n', (11885, 11910), False, 'from unittest.mock import Mock\n'), ((11928, 11985), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (11956, 11985), False, 'import bb\n'), ((12039, 12062), 'bb.event.ConfigParsed', 'bb.event.ConfigParsed', ([], {}), '()\n', (12060, 12062), False, 'import bb\n'), ((12071, 12110), 'bb.event.fire_from_worker', 'bb.event.fire_from_worker', (['event1', 'None'], {}), '(event1, None)\n', (12096, 12110), False, 'import bb\n'), ((12343, 12370), 'bb.event.OperationStarted', 'bb.event.OperationStarted', ([], {}), '()\n', (12368, 12370), False, 'import bb\n'), ((12388, 12426), 'bb.event.OperationCompleted', 'bb.event.OperationCompleted', ([], {'total': '(123)'}), '(total=123)\n', (12415, 12426), False, 'import bb\n'), ((12435, 12462), 'bb.event.fire', 'bb.event.fire', (['event1', 'None'], {}), '(event1, None)\n', (12448, 12462), False, 'import bb\n'), ((12471, 12498), 'bb.event.fire', 'bb.event.fire', (['event2', 'None'], {}), '(event2, None)\n', (12484, 12498), False, 'import bb\n'), ((12516, 12544), 'logging.getLogger', 'logging.getLogger', (['"""BitBake"""'], {}), "('BitBake')\n", (12533, 12544), False, 'import logging\n'), ((13846, 13903), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui1'], {'mainui': '(True)'}), '(self._test_ui1, mainui=True)\n', (13874, 13903), False, 'import bb\n'), ((14056, 14113), 'bb.event.register_UIHhandler', 'bb.event.register_UIHhandler', (['self._test_ui2'], {'mainui': '(True)'}), '(self._test_ui2, mainui=True)\n', (14084, 14113), False, 'import bb\n'), ((14338, 14393), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._thread_lock_test_worker1'}), '(target=self._thread_lock_test_worker1)\n', (14354, 14393), False, 'import threading\n'), ((14412, 14467), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._thread_lock_test_worker2'}), '(target=self._thread_lock_test_worker2)\n', (14428, 14467), False, 'import threading\n'), ((14500, 14513), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14510, 14513), False, 'import time\n'), ((15156, 15184), 'bb.event.enable_threadlock', 'bb.event.enable_threadlock', ([], {}), '()\n', (15182, 15184), False, 'import bb\n'), ((15710, 15739), 'bb.event.disable_threadlock', 'bb.event.disable_threadlock', ([], {}), '()\n', (15737, 15739), False, 'import bb\n'), ((2572, 2601), 'bb.event.get_class_handlers', 'bb.event.get_class_handlers', ([], {}), '()\n', (2599, 2601), False, 'import bb\n'), ((2847, 2870), 'bb.event.get_handlers', 'bb.event.get_handlers', ([], {}), '()\n', (2868, 2870), False, 'import bb\n'), ((3068, 3099), 'bb.event.clean_class_handlers', 'bb.event.clean_class_handlers', ([], {}), '()\n', (3097, 3099), False, 'import bb\n'), ((5105, 5138), 'bb.event.remove', 'bb.event.remove', (['"""handler1"""', 'None'], {}), "('handler1', None)\n", (5120, 5138), False, 'import bb\n'), ((6693, 6705), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (6697, 6705), False, 'from unittest.mock import call\n'), ((6742, 6754), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (6746, 6754), False, 'from unittest.mock import call\n'), ((6791, 6803), 'unittest.mock.call', 'call', (['event2'], {}), '(event2)\n', (6795, 6803), False, 'from unittest.mock import call\n'), ((6840, 6852), 'unittest.mock.call', 'call', (['event2'], {}), '(event2)\n', (6844, 6852), False, 'from unittest.mock import call\n'), ((7704, 7716), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (7708, 7716), False, 'from unittest.mock import call\n'), ((7718, 7730), 'unittest.mock.call', 'call', (['event2'], {}), '(event2)\n', (7722, 7730), False, 'from unittest.mock import call\n'), ((8418, 8430), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (8422, 8430), False, 'from unittest.mock import call\n'), ((8432, 8444), 'unittest.mock.call', 'call', (['event2'], {}), '(event2)\n', (8436, 8444), False, 'from unittest.mock import call\n'), ((8446, 8458), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (8450, 8458), False, 'from unittest.mock import call\n'), ((9150, 9162), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (9154, 9162), False, 'from unittest.mock import call\n'), ((9164, 9176), 'unittest.mock.call', 'call', (['event2'], {}), '(event2)\n', (9168, 9176), False, 'from unittest.mock import call\n'), ((9178, 9190), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (9182, 9190), False, 'from unittest.mock import call\n'), ((9192, 9204), 'unittest.mock.call', 'call', (['event2'], {}), '(event2)\n', (9196, 9204), False, 'from unittest.mock import call\n'), ((10690, 10702), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (10694, 10702), False, 'from unittest.mock import call\n'), ((11541, 11553), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (11545, 11553), False, 'from unittest.mock import call\n'), ((12131, 12143), 'unittest.mock.call', 'call', (['event1'], {}), '(event1)\n', (12135, 12143), False, 'from unittest.mock import call\n'), ((12571, 12592), 'bb.event.LogHandler', 'bb.event.LogHandler', ([], {}), '()\n', (12590, 12592), False, 'import bb\n'), ((12759, 12784), 'bb.event.print_ui_queue', 'bb.event.print_ui_queue', ([], {}), '()\n', (12782, 12784), False, 'import bb\n'), ((13380, 13393), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (13390, 13393), False, 'import time\n'), ((13674, 13687), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (13684, 13687), False, 'import time\n'), ((14766, 14789), 'bb.event.ConfigParsed', 'bb.event.ConfigParsed', ([], {}), '()\n', (14787, 14789), False, 'import bb\n'), ((14984, 15011), 'bb.event.OperationStarted', 'bb.event.OperationStarted', ([], {}), '()\n', (15009, 15011), False, 'import bb\n'), ((10831, 10851), 'pickle.dumps', 'pickle.dumps', (['event1'], {}), '(event1)\n', (10843, 10851), False, 'import pickle\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
conan.stacker was created on 2017/10/17.
Author: Charles_Lai
Email: <EMAIL>
"""
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from diego.ensemble_net.base import Ensemble
from diego.ensemble_net.combination import Combiner
from diego.classifier.logistic_regression_sk import LogisticRegressionSK
class EnsembleStack(object):
def __init__(self, mode='probs', cv=5):
self.mode = mode
self.layers = []
self.cv = cv
def add_layer(self, ensemble):
if isinstance(ensemble, Ensemble):
self.layers.append(ensemble)
else:
raise Exception('not an Ensemble object')
def fit_layer(self, layer_idx, X, y):
if layer_idx >= len(self.layers):
return
elif layer_idx == len(self.layers) - 1:
self.layers[layer_idx].fit(X, y)
else:
n_classes = len(set(y)) - 1
n_classifiers = len(self.layers[layer_idx])
output = np.zeros((X.shape[0], n_classes * n_classifiers))
skf = list(StratifiedKFold(self.cv).split(X, y))
for tra, tst in skf:
self.layers[layer_idx].fit(X[tra], y[tra])
out = self.layers[layer_idx].output(X[tst], mode=self.mode)
output[tst, :] = out[:, 1:, :].reshape(
out.shape[0], (out.shape[1] - 1) * out.shape[2])
self.layers[layer_idx].fit(X, y)
self.fit_layer(layer_idx + 1, output, y)
def fit(self, X, y):
if self.cv > 1:
self.fit_layer(0, X, y)
else:
X_ = X
for layer in self.layers:
layer.fit(X_, y)
out = layer.output(X_, mode=self.mode)
X_ = out[:, 1:, :].reshape(
out.shape[0], (out.shape[1] - 1) * out.shape[2])
return self
def output(self, X):
input_ = X
for layer in self.layers:
out = layer.output(input_, mode=self.mode)
input_ = out[:, 1:, :].reshape(
out.shape[0], (out.shape[1] - 1) * out.shape[2])
return input_
class EnsembleStackClassifier(object):
def __init__(self, stack, combiner=None):
self.stack = stack
if combiner is None:
self.combiner = Combiner(rule='mean')
elif isinstance(combiner, str):
if combiner == 'majority_vote':
raise ValueError('EnsembleStackClassifier '
'do not support majority_vote')
self.combiner = Combiner(rule=combiner)
elif isinstance(combiner, Combiner):
self.combiner = combiner
else:
raise ValueError('Invalid combiner!')
self.clf = self._make_clf()
@staticmethod
def _make_clf():
import autosklearn.classification
import autosklearn.pipeline.components.classification
autosklearn.pipeline.components.classification.add_classifier(
LogisticRegressionSK)
clf = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=30,
per_run_time_limit=10,
include_estimators=['LogisticRegressionSK'],
)
return clf
def fit(self, X, y):
self.stack.fit(X, y)
return self
def refit(self, X, y):
out = self.stack.output(X)
print(out)
print(out.shape, y.shape)
self.clf.fit(out, y)
def predict(self, X):
out = self.stack.output(X)
try:
y_pred = self.clf.predict(out)
except:
raise Exception('You must refit ensemble stacker')
return y_pred
def output(self, X):
out = self.stack.output(X)
return self.combiner.combine(out)
def output_proba(self, X):
out = self.stack.output(X)
return np.mean(out, axis=2)
|
[
"diego.ensemble_net.combination.Combiner",
"numpy.mean",
"numpy.zeros",
"sklearn.model_selection.StratifiedKFold"
] |
[((3956, 3976), 'numpy.mean', 'np.mean', (['out'], {'axis': '(2)'}), '(out, axis=2)\n', (3963, 3976), True, 'import numpy as np\n'), ((2395, 2416), 'diego.ensemble_net.combination.Combiner', 'Combiner', ([], {'rule': '"""mean"""'}), "(rule='mean')\n", (2403, 2416), False, 'from diego.ensemble_net.combination import Combiner\n'), ((1076, 1125), 'numpy.zeros', 'np.zeros', (['(X.shape[0], n_classes * n_classifiers)'], {}), '((X.shape[0], n_classes * n_classifiers))\n', (1084, 1125), True, 'import numpy as np\n'), ((2654, 2677), 'diego.ensemble_net.combination.Combiner', 'Combiner', ([], {'rule': 'combiner'}), '(rule=combiner)\n', (2662, 2677), False, 'from diego.ensemble_net.combination import Combiner\n'), ((1149, 1173), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['self.cv'], {}), '(self.cv)\n', (1164, 1173), False, 'from sklearn.model_selection import StratifiedKFold\n')]
|
import gzip
import ujson as json
import click
from tqdm import tqdm
from joblib import Parallel, delayed
from _config import Config
from utils import json_paths_iter, sanitize_numpy_types, write_dicts_to_csv
from tree_conversions import tree_to_nx_user_graph
from graph_metrics import compute_undirected_graph_metrics
from graph_metrics import compute_directed_graph_metrics
from graph_metrics import n_friends_followers_stats
def reply_graph_metrics(conversation, remove_root):
net_features = conversation["network_features"]
# fetch root info
root_tweet_id = conversation["reply_tree"]["tweet"]
root_user_id = conversation["tweets"][root_tweet_id]["user_id"]
# remove root => root_user_id = None
if not remove_root:
root_user_id = None
# create networkx graphs
G_di = tree_to_nx_user_graph(
conversation,
directed=True,
remove_root=remove_root
)
G_ud = tree_to_nx_user_graph(
conversation,
directed=False,
remove_root=remove_root
)
# sanity check
assert(len(G_di) == len(G_ud))
# NB: skip conversations with 0 nodes,
# this can happen if there only tweets by the root
# and we want to remove the root.
if len(G_di) == 0:
return None
# undirected graph metrics
g_ud_metrics = compute_undirected_graph_metrics(G_ud)
# directed graph metrics
g_di_metrics = compute_directed_graph_metrics(G_di)
# friends and followers stats
n_friends_followers_metrics = n_friends_followers_stats(
G_di,
net_features,
root_user_id
)
metrics = {
"root_tweet_id": root_tweet_id,
"n_nodes": len(G_di),
**g_di_metrics,
**g_ud_metrics,
**n_friends_followers_metrics
}
# convert numpy types to python types
metrics = sanitize_numpy_types(metrics)
return metrics
def compute_metrics(json_fpath):
conversation = json.load(gzip.open(json_fpath))
metrics = reply_graph_metrics(conversation, remove_root=False)
return metrics
#
# REPLY GRAPH METRICS COMPUTATION
#
def compute_reply_graph_metrics(dataset, n_jobs=1, limit=None):
print("--- Reply Graph Metrics ---")
print(f"Dataset: {dataset}")
print(f"Num Jobs: {n_jobs}")
print(f"Limit: {limit}")
print("----------------------------")
# paths
conf = Config(dataset)
output_fpath = f"{conf.data_root}/reply_graph_metrics.csv"
# iterator
json_fpaths = json_paths_iter(
conf.conversations_no_embs_jsons_dir,
limit=limit
)
# compute metrics
print("Computing metrics ...")
if n_jobs == 1:
metrics = [compute_metrics(json_fpath) \
for json_fpath in tqdm(json_fpaths)]
else:
parallel = Parallel(n_jobs=n_jobs, verbose=10)
metrics = parallel(
delayed(compute_metrics)(json_fpath) \
for json_fpath in json_fpaths
)
print("Output:", len(metrics))
# output to csv
print("Outputting reply graph metrics to CSV ...")
write_dicts_to_csv(metrics, output_fpath)
print("Done!")
@click.command()
@click.option('--dataset', required=True,
type=click.Choice(["news", "midterms"]))
@click.option('--n_jobs', required=True, type=int)
@click.option('--limit', default=None, type=int)
def main(dataset, n_jobs, limit):
compute_reply_graph_metrics(
dataset,
n_jobs=n_jobs,
limit=limit
)
if __name__ == "__main__":
main()
# END
|
[
"utils.sanitize_numpy_types",
"tqdm.tqdm",
"graph_metrics.compute_undirected_graph_metrics",
"tree_conversions.tree_to_nx_user_graph",
"gzip.open",
"graph_metrics.compute_directed_graph_metrics",
"_config.Config",
"click.option",
"click.command",
"click.Choice",
"utils.json_paths_iter",
"utils.write_dicts_to_csv",
"joblib.Parallel",
"joblib.delayed",
"graph_metrics.n_friends_followers_stats"
] |
[((3208, 3223), 'click.command', 'click.command', ([], {}), '()\n', (3221, 3223), False, 'import click\n'), ((3312, 3361), 'click.option', 'click.option', (['"""--n_jobs"""'], {'required': '(True)', 'type': 'int'}), "('--n_jobs', required=True, type=int)\n", (3324, 3361), False, 'import click\n'), ((3363, 3410), 'click.option', 'click.option', (['"""--limit"""'], {'default': 'None', 'type': 'int'}), "('--limit', default=None, type=int)\n", (3375, 3410), False, 'import click\n'), ((835, 910), 'tree_conversions.tree_to_nx_user_graph', 'tree_to_nx_user_graph', (['conversation'], {'directed': '(True)', 'remove_root': 'remove_root'}), '(conversation, directed=True, remove_root=remove_root)\n', (856, 910), False, 'from tree_conversions import tree_to_nx_user_graph\n'), ((954, 1030), 'tree_conversions.tree_to_nx_user_graph', 'tree_to_nx_user_graph', (['conversation'], {'directed': '(False)', 'remove_root': 'remove_root'}), '(conversation, directed=False, remove_root=remove_root)\n', (975, 1030), False, 'from tree_conversions import tree_to_nx_user_graph\n'), ((1366, 1404), 'graph_metrics.compute_undirected_graph_metrics', 'compute_undirected_graph_metrics', (['G_ud'], {}), '(G_ud)\n', (1398, 1404), False, 'from graph_metrics import compute_undirected_graph_metrics\n'), ((1458, 1494), 'graph_metrics.compute_directed_graph_metrics', 'compute_directed_graph_metrics', (['G_di'], {}), '(G_di)\n', (1488, 1494), False, 'from graph_metrics import compute_directed_graph_metrics\n'), ((1564, 1623), 'graph_metrics.n_friends_followers_stats', 'n_friends_followers_stats', (['G_di', 'net_features', 'root_user_id'], {}), '(G_di, net_features, root_user_id)\n', (1589, 1623), False, 'from graph_metrics import n_friends_followers_stats\n'), ((1900, 1929), 'utils.sanitize_numpy_types', 'sanitize_numpy_types', (['metrics'], {}), '(metrics)\n', (1920, 1929), False, 'from utils import json_paths_iter, sanitize_numpy_types, write_dicts_to_csv\n'), ((2434, 2449), '_config.Config', 'Config', (['dataset'], {}), '(dataset)\n', (2440, 2449), False, 'from _config import Config\n'), ((2552, 2618), 'utils.json_paths_iter', 'json_paths_iter', (['conf.conversations_no_embs_jsons_dir'], {'limit': 'limit'}), '(conf.conversations_no_embs_jsons_dir, limit=limit)\n', (2567, 2618), False, 'from utils import json_paths_iter, sanitize_numpy_types, write_dicts_to_csv\n'), ((3143, 3184), 'utils.write_dicts_to_csv', 'write_dicts_to_csv', (['metrics', 'output_fpath'], {}), '(metrics, output_fpath)\n', (3161, 3184), False, 'from utils import json_paths_iter, sanitize_numpy_types, write_dicts_to_csv\n'), ((2014, 2035), 'gzip.open', 'gzip.open', (['json_fpath'], {}), '(json_fpath)\n', (2023, 2035), False, 'import gzip\n'), ((2848, 2883), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': '(10)'}), '(n_jobs=n_jobs, verbose=10)\n', (2856, 2883), False, 'from joblib import Parallel, delayed\n'), ((3275, 3309), 'click.Choice', 'click.Choice', (["['news', 'midterms']"], {}), "(['news', 'midterms'])\n", (3287, 3309), False, 'import click\n'), ((2800, 2817), 'tqdm.tqdm', 'tqdm', (['json_fpaths'], {}), '(json_fpaths)\n', (2804, 2817), False, 'from tqdm import tqdm\n'), ((2924, 2948), 'joblib.delayed', 'delayed', (['compute_metrics'], {}), '(compute_metrics)\n', (2931, 2948), False, 'from joblib import Parallel, delayed\n')]
|
import click
from app.celery.tasks import publish_govuk_alerts
def setup_commands(app):
app.cli.add_command(publish)
@click.command('publish')
def publish():
publish_govuk_alerts()
|
[
"app.celery.tasks.publish_govuk_alerts",
"click.command"
] |
[((127, 151), 'click.command', 'click.command', (['"""publish"""'], {}), "('publish')\n", (140, 151), False, 'import click\n'), ((171, 193), 'app.celery.tasks.publish_govuk_alerts', 'publish_govuk_alerts', ([], {}), '()\n', (191, 193), False, 'from app.celery.tasks import publish_govuk_alerts\n')]
|
from pathlib import Path
BASE_DIR = Path(__file__).parent
|
[
"pathlib.Path"
] |
[((38, 52), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (42, 52), False, 'from pathlib import Path\n')]
|
import numpy as np
import astropy.units as u
from astropy.table import QTable
from jdaviz import Specviz
from specutils import Spectrum1D
def test_line_lists():
viz = Specviz()
spec = Spectrum1D(flux=np.random.rand(100)*u.Jy,
spectral_axis=np.arange(6000, 7000, 10)*u.AA)
viz.load_spectrum(spec)
lt = QTable()
lt['linename'] = ['O III', 'Halpha']
lt['rest'] = [5007, 6563]*u.AA
lt['redshift'] = u.Quantity(0.046)
viz.load_line_list(lt)
assert len(viz.spectral_lines) == 2
assert viz.spectral_lines.loc["linename", "Halpha"]["listname"] == "Custom"
assert np.all(viz.spectral_lines["show"])
viz.erase_spectral_lines()
assert np.all(viz.spectral_lines["show"] == False) # noqa
viz.plot_spectral_line("Halpha")
viz.plot_spectral_line("O III 5007.0")
assert np.all(viz.spectral_lines["show"])
|
[
"astropy.units.Quantity",
"jdaviz.Specviz",
"astropy.table.QTable",
"numpy.arange",
"numpy.random.rand",
"numpy.all"
] |
[((174, 183), 'jdaviz.Specviz', 'Specviz', ([], {}), '()\n', (181, 183), False, 'from jdaviz import Specviz\n'), ((343, 351), 'astropy.table.QTable', 'QTable', ([], {}), '()\n', (349, 351), False, 'from astropy.table import QTable\n'), ((449, 466), 'astropy.units.Quantity', 'u.Quantity', (['(0.046)'], {}), '(0.046)\n', (459, 466), True, 'import astropy.units as u\n'), ((626, 660), 'numpy.all', 'np.all', (["viz.spectral_lines['show']"], {}), "(viz.spectral_lines['show'])\n", (632, 660), True, 'import numpy as np\n'), ((705, 748), 'numpy.all', 'np.all', (["(viz.spectral_lines['show'] == False)"], {}), "(viz.spectral_lines['show'] == False)\n", (711, 748), True, 'import numpy as np\n'), ((850, 884), 'numpy.all', 'np.all', (["viz.spectral_lines['show']"], {}), "(viz.spectral_lines['show'])\n", (856, 884), True, 'import numpy as np\n'), ((211, 230), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (225, 230), True, 'import numpy as np\n'), ((273, 298), 'numpy.arange', 'np.arange', (['(6000)', '(7000)', '(10)'], {}), '(6000, 7000, 10)\n', (282, 298), True, 'import numpy as np\n')]
|
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": ChromeDriverManager().install()}
return Browser("chrome", **executable_path, headless=False)
def scrape():
# NASA Mars News
browser = init_browser()
news_url = "https://mars.nasa.gov/news/"
browser.visit(news_url)
time.sleep(3)
html = browser.html
news_soup = BeautifulSoup(html, "html.parser")
slide = news_soup.select('ul.item_list li.slide')[0]
news_title = slide.find("div", class_="content_title").get_text()
news_p = slide.find("div", class_="article_teaser_body").get_text()
# JPL Mars Space Images - Featured Image
browser = init_browser()
jpl_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html#'
browser.visit(jpl_url)
time.sleep(3)
html = browser.html
jpl_soup = BeautifulSoup(html, "html.parser")
image = jpl_soup.select('div.floating_text_area a')
image_url = image[0]['href']
featured_image_url = ('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/' + image_url)
# Mars Facts
browser = init_browser()
facts_url = 'https://space-facts.com/mars/#'
tables = pd.read_html(facts_url)
df = tables[0]
html_table = df.to_html(header=False, index=False)
mars_table = html_table.replace('\n', '')
# Mars Hemispheres
hemisphere_image_urls = [
{"title": "Valles Marineris Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg"},
{"title": "Cerberus Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg"},
{"title": "Schiaparelli Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg"},
{"title": "Syrtis Major Hemisphere", "img_url": "https://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg"},
]
# Store data in a dictionary
listings = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_table": mars_table,
"hemisphere_image_urls": hemisphere_image_urls,
}
# Close the browser after scraping
browser.quit()
# Return results
return listings
|
[
"pandas.read_html",
"time.sleep",
"webdriver_manager.chrome.ChromeDriverManager",
"bs4.BeautifulSoup",
"splinter.Browser"
] |
[((328, 380), 'splinter.Browser', 'Browser', (['"""chrome"""'], {'headless': '(False)'}), "('chrome', **executable_path, headless=False)\n", (335, 380), False, 'from splinter import Browser\n'), ((530, 543), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (540, 543), False, 'import time\n'), ((585, 619), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (598, 619), False, 'from bs4 import BeautifulSoup\n'), ((1013, 1026), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1023, 1026), False, 'import time\n'), ((1067, 1101), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1080, 1101), False, 'from bs4 import BeautifulSoup\n'), ((1402, 1425), 'pandas.read_html', 'pd.read_html', (['facts_url'], {}), '(facts_url)\n', (1414, 1425), True, 'import pandas as pd\n'), ((284, 305), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (303, 305), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')]
|
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.datasets import make_multilabel_classification
X, Y = make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, random_state=1)
LDAModel = LatentDirichletAllocation(n_components=5, random_state=1)
LDAModel.fit(X)
# get topics for some given samples:
print(LDAModel.transform(X[-10:]))
|
[
"sklearn.datasets.make_multilabel_classification",
"sklearn.decomposition.LatentDirichletAllocation"
] |
[((128, 233), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {'n_samples': '(100)', 'n_features': '(20)', 'n_classes': '(5)', 'n_labels': '(2)', 'random_state': '(1)'}), '(n_samples=100, n_features=20, n_classes=5,\n n_labels=2, random_state=1)\n', (158, 233), False, 'from sklearn.datasets import make_multilabel_classification\n'), ((241, 298), 'sklearn.decomposition.LatentDirichletAllocation', 'LatentDirichletAllocation', ([], {'n_components': '(5)', 'random_state': '(1)'}), '(n_components=5, random_state=1)\n', (266, 298), False, 'from sklearn.decomposition import LatentDirichletAllocation\n')]
|
"""
Simple utils to save and load from disk.
"""
import joblib
import gzip
import pickle
import os
import tempfile
import tarfile
import zipfile
import logging
from urllib.request import urlretrieve
from typing import Any, Iterator, List, Optional, Tuple, Union, cast, IO
import pandas as pd
import numpy as np
import deepchem as dc
logger = logging.getLogger(__name__)
def pad_array(x: np.ndarray,
shape: Union[Tuple, int],
fill: float = 0.0,
both: bool = False) -> np.ndarray:
"""
Pad an array with a fill value.
Parameters
----------
x: np.ndarray
A numpy array.
shape: Tuple or int
Desired shape. If int, all dimensions are padded to that size.
fill: float, optional (default 0.0)
The padded value.
both: bool, optional (default False)
If True, split the padding on both sides of each axis. If False,
padding is applied to the end of each axis.
Returns
-------
np.ndarray
A padded numpy array
"""
x = np.asarray(x)
if not isinstance(shape, tuple):
shape = tuple(shape for _ in range(x.ndim))
pad = []
for i in range(x.ndim):
diff = shape[i] - x.shape[i]
assert diff >= 0
if both:
a, b = divmod(diff, 2)
b += a
pad.append((a, b))
else:
pad.append((0, diff))
pad = tuple(pad) # type: ignore
x = np.pad(x, pad, mode='constant', constant_values=fill)
return x
def get_data_dir() -> str:
"""Get the DeepChem data directory.
Returns
-------
str
The default path to store DeepChem data. If you want to
change this path, please set your own path to `DEEPCHEM_DATA_DIR`
as an environment variable.
"""
if 'DEEPCHEM_DATA_DIR' in os.environ:
return os.environ['DEEPCHEM_DATA_DIR']
return tempfile.gettempdir()
def download_url(url: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Download a file to disk.
Parameters
----------
url: str
The URL to download from
dest_dir: str
The directory to save the file in
name: str
The file name to save it as. If omitted, it will try to extract a file name from the URL
"""
if name is None:
name = url
if '?' in name:
name = name[:name.find('?')]
if '/' in name:
name = name[name.rfind('/') + 1:]
urlretrieve(url, os.path.join(dest_dir, name))
def untargz_file(file: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Untar and unzip a .tar.gz file to disk.
Parameters
----------
file: str
The filepath to decompress
dest_dir: str
The directory to save the file in
name: str
The file name to save it as. If omitted, it will use the file name
"""
if name is None:
name = file
tar = tarfile.open(name)
tar.extractall(path=dest_dir)
tar.close()
def unzip_file(file: str,
dest_dir: str = get_data_dir(),
name: Optional[str] = None):
"""Unzip a .zip file to disk.
Parameters
----------
file: str
The filepath to decompress
dest_dir: str
The directory to save the file in
name: str
The directory name to unzip it to. If omitted, it will use the file name
"""
if name is None:
name = file
if dest_dir is None:
dest_dir = os.path.join(get_data_dir, name)
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest_dir)
def load_image_files(input_files: List[str]) -> np.ndarray:
"""Loads a set of images from disk.
Parameters
----------
input_files: List[str]
List of image filenames.
Returns
-------
np.ndarray
A numpy array that contains loaded images. The shape is, `(N,...)`.
Notes
-----
This method requires Pillow to be installed.
The supported file types are PNG and TIF.
"""
try:
from PIL import Image
except ModuleNotFoundError:
raise ImportError("This function requires Pillow to be installed.")
images = []
for input_file in input_files:
_, extension = os.path.splitext(input_file)
extension = extension.lower()
if extension == ".png":
image = np.array(Image.open(input_file))
images.append(image)
elif extension == ".tif":
im = Image.open(input_file)
imarray = np.array(im)
images.append(imarray)
else:
raise ValueError("Unsupported image filetype for %s" % input_file)
return np.array(images)
def load_sdf_files(input_files: List[str],
clean_mols: bool = True,
tasks: List[str] = [],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load SDF file into dataframe.
Parameters
----------
input_files: List[str]
List of filenames
clean_mols: bool, default True
Whether to sanitize molecules.
tasks: List[str], default []
Each entry in `tasks` is treated as a property in the SDF file and is
retrieved with `mol.GetProp(str(task))` where `mol` is the RDKit mol
loaded from a given SDF entry.
shard_size: int, default None
The shard size to yield at one time.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
Notes
-----
This function requires RDKit to be installed.
"""
try:
from rdkit import Chem
except ModuleNotFoundError:
raise ImportError("This function requires RDKit to be installed.")
df_rows = []
for input_file in input_files:
# Tasks are either in .sdf.csv file or in the .sdf file itself
has_csv = os.path.isfile(input_file + ".csv")
# Structures are stored in .sdf file
logger.info("Reading structures from %s." % input_file)
suppl = Chem.SDMolSupplier(str(input_file), clean_mols, False, False)
for ind, mol in enumerate(suppl):
if mol is None:
continue
smiles = Chem.MolToSmiles(mol)
df_row = [ind, smiles, mol]
if not has_csv: # Get task targets from .sdf file
for task in tasks:
df_row.append(mol.GetProp(str(task)))
df_rows.append(df_row)
if shard_size is not None and len(df_rows) == shard_size:
if has_csv:
mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))
raw_df = next(load_csv_files([input_file + ".csv"], shard_size=None))
yield pd.concat([mol_df, raw_df], axis=1, join='inner')
else:
mol_df = pd.DataFrame(
df_rows, columns=('mol_id', 'smiles', 'mol') + tuple(tasks))
yield mol_df
# Reset aggregator
df_rows = []
# Handle final leftovers for this file
if len(df_rows) > 0:
if has_csv:
mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))
raw_df = next(load_csv_files([input_file + ".csv"], shard_size=None))
yield pd.concat([mol_df, raw_df], axis=1, join='inner')
else:
mol_df = pd.DataFrame(
df_rows, columns=('mol_id', 'smiles', 'mol') + tuple(tasks))
yield mol_df
df_rows = []
def load_csv_files(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load data as pandas dataframe from CSV files.
Parameters
----------
input_files: List[str]
List of filenames
shard_size: int, default None
The shard size to yield at one time.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
"""
# First line of user-specified CSV *must* be header.
shard_num = 1
for input_file in input_files:
if shard_size is None:
yield pd.read_csv(input_file)
else:
logger.info("About to start loading CSV from %s" % input_file)
for df in pd.read_csv(input_file, chunksize=shard_size):
logger.info(
"Loading shard %d of size %s." % (shard_num, str(shard_size)))
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_json_files(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[pd.DataFrame]:
"""Load data as pandas dataframe.
Parameters
----------
input_files: List[str]
List of json filenames.
shard_size: int, default None
Chunksize for reading json files.
Returns
-------
Iterator[pd.DataFrame]
Generator which yields the dataframe which is the same shard size.
Notes
-----
To load shards from a json file into a Pandas dataframe, the file
must be originally saved with ``df.to_json('filename.json', orient='records', lines=True)``
"""
shard_num = 1
for input_file in input_files:
if shard_size is None:
yield pd.read_json(input_file, orient='records', lines=True)
else:
logger.info("About to start loading json from %s." % input_file)
for df in pd.read_json(
input_file, orient='records', chunksize=shard_size, lines=True):
logger.info(
"Loading shard %d of size %s." % (shard_num, str(shard_size)))
df = df.replace(np.nan, str(""), regex=True)
shard_num += 1
yield df
def load_pickle_file(input_file: str) -> Any:
"""Load from single, possibly gzipped, pickle file.
Parameters
----------
input_file: str
The filename of pickle file. This function can load from
gzipped pickle file like `XXXX.pkl.gz`.
Returns
-------
Any
The object which is loaded from the pickle file.
"""
if ".gz" in input_file:
with gzip.open(input_file, "rb") as unzipped_file:
return pickle.load(cast(IO[bytes], unzipped_file))
else:
with open(input_file, "rb") as opened_file:
return pickle.load(opened_file)
def load_pickle_files(input_files: List[str]) -> Iterator[Any]:
"""Load dataset from pickle files.
Parameters
----------
input_files: List[str]
The list of filenames of pickle file. This function can load from
gzipped pickle file like `XXXX.pkl.gz`.
Returns
-------
Iterator[Any]
Generator which yields the objects which is loaded from each pickle file.
"""
for input_file in input_files:
yield load_pickle_file(input_file)
def load_data(input_files: List[str],
shard_size: Optional[int] = None) -> Iterator[Any]:
"""Loads data from files.
Parameters
----------
input_files: List[str]
List of filenames.
shard_size: int, default None
Size of shard to yield
Returns
-------
Iterator[Any]
Iterator which iterates over provided files.
Notes
-----
The supported file types are SDF, CSV and Pickle.
"""
if len(input_files) == 0:
raise ValueError("The length of `filenames` must be more than 1.")
file_type = _get_file_type(input_files[0])
if file_type == "sdf":
if shard_size is not None:
logger.info("Ignoring shard_size for sdf input.")
for value in load_sdf_files(input_files):
yield value
elif file_type == "csv":
for value in load_csv_files(input_files, shard_size):
yield value
elif file_type == "pickle":
if shard_size is not None:
logger.info("Ignoring shard_size for pickle input.")
for value in load_pickle_files(input_files):
yield value
def _get_file_type(input_file: str) -> str:
"""Get type of input file. Must be csv/pkl/sdf/joblib file."""
filename, file_extension = os.path.splitext(input_file)
# If gzipped, need to compute extension again
if file_extension == ".gz":
filename, file_extension = os.path.splitext(filename)
if file_extension == ".csv":
return "csv"
elif file_extension == ".pkl":
return "pickle"
elif file_extension == ".joblib":
return "joblib"
elif file_extension == ".sdf":
return "sdf"
else:
raise ValueError("Unrecognized extension %s" % file_extension)
def save_to_disk(dataset: Any, filename: str, compress: int = 3):
"""Save a dataset to file.
Parameters
----------
dataset: str
A data saved
filename: str
Path to save data.
compress: int, default 3
The compress option when dumping joblib file.
"""
if filename.endswith('.joblib'):
joblib.dump(dataset, filename, compress=compress)
elif filename.endswith('.npy'):
np.save(filename, dataset)
else:
raise ValueError("Filename with unsupported extension: %s" % filename)
def load_from_disk(filename: str) -> Any:
"""Load a dataset from file.
Parameters
----------
filename: str
A filename you want to load data.
Returns
-------
Any
A loaded object from file.
"""
name = filename
if os.path.splitext(name)[1] == ".gz":
name = os.path.splitext(name)[0]
extension = os.path.splitext(name)[1]
if extension == ".pkl":
return load_pickle_file(filename)
elif extension == ".joblib":
return joblib.load(filename)
elif extension == ".csv":
# First line of user-specified CSV *must* be header.
df = pd.read_csv(filename, header=0)
df = df.replace(np.nan, str(""), regex=True)
return df
elif extension == ".npy":
return np.load(filename, allow_pickle=True)
else:
raise ValueError("Unrecognized filetype for %s" % filename)
def load_dataset_from_disk(save_dir: str) -> Tuple[bool, Optional[Tuple[
"dc.data.DiskDataset", "dc.data.DiskDataset", "dc.data.DiskDataset"]], List[
"dc.trans.Transformer"]]:
"""Loads MoleculeNet train/valid/test/transformers from disk.
Expects that data was saved using `save_dataset_to_disk` below. Expects the
following directory structure for `save_dir`:
save_dir/
|
---> train_dir/
|
---> valid_dir/
|
---> test_dir/
|
---> transformers.pkl
Parameters
----------
save_dir: str
Directory name to load datasets.
Returns
-------
loaded: bool
Whether the load succeeded
all_dataset: Tuple[DiskDataset, DiskDataset, DiskDataset]
The train, valid, test datasets
transformers: Transformer
The transformers used for this dataset
See Also
--------
save_dataset_to_disk
"""
train_dir = os.path.join(save_dir, "train_dir")
valid_dir = os.path.join(save_dir, "valid_dir")
test_dir = os.path.join(save_dir, "test_dir")
if not os.path.exists(train_dir) or not os.path.exists(
valid_dir) or not os.path.exists(test_dir):
return False, None, list()
loaded = True
train = dc.data.DiskDataset(train_dir)
valid = dc.data.DiskDataset(valid_dir)
test = dc.data.DiskDataset(test_dir)
train.memory_cache_size = 40 * (1 << 20) # 40 MB
all_dataset = (train, valid, test)
transformers = load_transformers(save_dir)
return loaded, all_dataset, transformers
def save_dataset_to_disk(
save_dir: str, train: "dc.data.DiskDataset", valid: "dc.data.DiskDataset",
test: "dc.data.DiskDataset", transformers: List["dc.trans.Transformer"]):
"""Utility used by MoleculeNet to save train/valid/test datasets.
This utility function saves a train/valid/test split of a dataset along
with transformers in the same directory. The saved datasets will take the
following structure:
save_dir/
|
---> train_dir/
|
---> valid_dir/
|
---> test_dir/
|
---> transformers.pkl
Parameters
----------
save_dir: str
Directory name to save datasets to.
train: DiskDataset
Training dataset to save.
valid: DiskDataset
Validation dataset to save.
test: DiskDataset
Test dataset to save.
transformers: List[Transformer]
List of transformers to save to disk.
See Also
--------
load_dataset_from_disk
"""
train_dir = os.path.join(save_dir, "train_dir")
valid_dir = os.path.join(save_dir, "valid_dir")
test_dir = os.path.join(save_dir, "test_dir")
train.move(train_dir)
valid.move(valid_dir)
test.move(test_dir)
save_transformers(save_dir, transformers)
def load_transformers(save_dir: str) -> List["dc.trans.Transformer"]:
"""Load the transformers for a MoleculeNet dataset from disk."""
with open(os.path.join(save_dir, "transformers.pkl"), 'rb') as f:
return pickle.load(f)
def save_transformers(save_dir: str,
transformers: List["dc.trans.Transformer"]):
"""Save the transformers for a MoleculeNet dataset to disk."""
with open(os.path.join(save_dir, "transformers.pkl"), 'wb') as f:
pickle.dump(transformers, f)
|
[
"pickle.dump",
"numpy.load",
"pandas.read_csv",
"typing.cast",
"joblib.dump",
"os.path.isfile",
"pickle.load",
"rdkit.Chem.MolToSmiles",
"os.path.join",
"numpy.pad",
"pandas.DataFrame",
"os.path.exists",
"tarfile.open",
"pandas.concat",
"numpy.save",
"numpy.asarray",
"zipfile.ZipFile",
"gzip.open",
"tempfile.gettempdir",
"deepchem.data.DiskDataset",
"pandas.read_json",
"PIL.Image.open",
"numpy.array",
"os.path.splitext",
"joblib.load",
"logging.getLogger"
] |
[((345, 372), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (362, 372), False, 'import logging\n'), ((1004, 1017), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1014, 1017), True, 'import numpy as np\n'), ((1351, 1404), 'numpy.pad', 'np.pad', (['x', 'pad'], {'mode': '"""constant"""', 'constant_values': 'fill'}), "(x, pad, mode='constant', constant_values=fill)\n", (1357, 1404), True, 'import numpy as np\n'), ((1770, 1791), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1789, 1791), False, 'import tempfile\n'), ((2804, 2822), 'tarfile.open', 'tarfile.open', (['name'], {}), '(name)\n', (2816, 2822), False, 'import tarfile\n'), ((4407, 4423), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (4415, 4423), True, 'import numpy as np\n'), ((11322, 11350), 'os.path.splitext', 'os.path.splitext', (['input_file'], {}), '(input_file)\n', (11338, 11350), False, 'import os\n'), ((13988, 14023), 'os.path.join', 'os.path.join', (['save_dir', '"""train_dir"""'], {}), "(save_dir, 'train_dir')\n", (14000, 14023), False, 'import os\n'), ((14038, 14073), 'os.path.join', 'os.path.join', (['save_dir', '"""valid_dir"""'], {}), "(save_dir, 'valid_dir')\n", (14050, 14073), False, 'import os\n'), ((14087, 14121), 'os.path.join', 'os.path.join', (['save_dir', '"""test_dir"""'], {}), "(save_dir, 'test_dir')\n", (14099, 14121), False, 'import os\n'), ((14287, 14317), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['train_dir'], {}), '(train_dir)\n', (14306, 14317), True, 'import deepchem as dc\n'), ((14328, 14358), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['valid_dir'], {}), '(valid_dir)\n', (14347, 14358), True, 'import deepchem as dc\n'), ((14368, 14397), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['test_dir'], {}), '(test_dir)\n', (14387, 14397), True, 'import deepchem as dc\n'), ((15500, 15535), 'os.path.join', 'os.path.join', (['save_dir', '"""train_dir"""'], {}), "(save_dir, 'train_dir')\n", (15512, 15535), False, 'import os\n'), ((15550, 15585), 'os.path.join', 'os.path.join', (['save_dir', '"""valid_dir"""'], {}), "(save_dir, 'valid_dir')\n", (15562, 15585), False, 'import os\n'), ((15599, 15633), 'os.path.join', 'os.path.join', (['save_dir', '"""test_dir"""'], {}), "(save_dir, 'test_dir')\n", (15611, 15633), False, 'import os\n'), ((2347, 2375), 'os.path.join', 'os.path.join', (['dest_dir', 'name'], {}), '(dest_dir, name)\n', (2359, 2375), False, 'import os\n'), ((3313, 3345), 'os.path.join', 'os.path.join', (['get_data_dir', 'name'], {}), '(get_data_dir, name)\n', (3325, 3345), False, 'import os\n'), ((3353, 3379), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file', '"""r"""'], {}), "(file, 'r')\n", (3368, 3379), False, 'import zipfile\n'), ((4028, 4056), 'os.path.splitext', 'os.path.splitext', (['input_file'], {}), '(input_file)\n', (4044, 4056), False, 'import os\n'), ((5551, 5586), 'os.path.isfile', 'os.path.isfile', (["(input_file + '.csv')"], {}), "(input_file + '.csv')\n", (5565, 5586), False, 'import os\n'), ((11460, 11486), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11476, 11486), False, 'import os\n'), ((12086, 12135), 'joblib.dump', 'joblib.dump', (['dataset', 'filename'], {'compress': 'compress'}), '(dataset, filename, compress=compress)\n', (12097, 12135), False, 'import joblib\n'), ((12614, 12636), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (12630, 12636), False, 'import os\n'), ((15966, 15980), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (15977, 15980), False, 'import pickle\n'), ((16224, 16252), 'pickle.dump', 'pickle.dump', (['transformers', 'f'], {}), '(transformers, f)\n', (16235, 16252), False, 'import pickle\n'), ((5854, 5875), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (5870, 5875), False, 'from rdkit import Chem\n'), ((7741, 7786), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {'chunksize': 'shard_size'}), '(input_file, chunksize=shard_size)\n', (7752, 7786), True, 'import pandas as pd\n'), ((8832, 8908), 'pandas.read_json', 'pd.read_json', (['input_file'], {'orient': '"""records"""', 'chunksize': 'shard_size', 'lines': '(True)'}), "(input_file, orient='records', chunksize=shard_size, lines=True)\n", (8844, 8908), True, 'import pandas as pd\n'), ((9483, 9510), 'gzip.open', 'gzip.open', (['input_file', '"""rb"""'], {}), "(input_file, 'rb')\n", (9492, 9510), False, 'import gzip\n'), ((9655, 9679), 'pickle.load', 'pickle.load', (['opened_file'], {}), '(opened_file)\n', (9666, 9679), False, 'import pickle\n'), ((12174, 12200), 'numpy.save', 'np.save', (['filename', 'dataset'], {}), '(filename, dataset)\n', (12181, 12200), True, 'import numpy as np\n'), ((12527, 12549), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (12543, 12549), False, 'import os\n'), ((12574, 12596), 'os.path.splitext', 'os.path.splitext', (['name'], {}), '(name)\n', (12590, 12596), False, 'import os\n'), ((12746, 12767), 'joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (12757, 12767), False, 'import joblib\n'), ((14131, 14156), 'os.path.exists', 'os.path.exists', (['train_dir'], {}), '(train_dir)\n', (14145, 14156), False, 'import os\n'), ((14164, 14189), 'os.path.exists', 'os.path.exists', (['valid_dir'], {}), '(valid_dir)\n', (14178, 14189), False, 'import os\n'), ((14204, 14228), 'os.path.exists', 'os.path.exists', (['test_dir'], {}), '(test_dir)\n', (14218, 14228), False, 'import os\n'), ((15899, 15941), 'os.path.join', 'os.path.join', (['save_dir', '"""transformers.pkl"""'], {}), "(save_dir, 'transformers.pkl')\n", (15911, 15941), False, 'import os\n'), ((16164, 16206), 'os.path.join', 'os.path.join', (['save_dir', '"""transformers.pkl"""'], {}), "(save_dir, 'transformers.pkl')\n", (16176, 16206), False, 'import os\n'), ((4142, 4164), 'PIL.Image.open', 'Image.open', (['input_file'], {}), '(input_file)\n', (4152, 4164), False, 'from PIL import Image\n'), ((4234, 4256), 'PIL.Image.open', 'Image.open', (['input_file'], {}), '(input_file)\n', (4244, 4256), False, 'from PIL import Image\n'), ((4273, 4285), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (4281, 4285), True, 'import numpy as np\n'), ((6676, 6734), 'pandas.DataFrame', 'pd.DataFrame', (['df_rows'], {'columns': "('mol_id', 'smiles', 'mol')"}), "(df_rows, columns=('mol_id', 'smiles', 'mol'))\n", (6688, 6734), True, 'import pandas as pd\n'), ((7622, 7645), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (7633, 7645), True, 'import pandas as pd\n'), ((8680, 8734), 'pandas.read_json', 'pd.read_json', (['input_file'], {'orient': '"""records"""', 'lines': '(True)'}), "(input_file, orient='records', lines=True)\n", (8692, 8734), True, 'import pandas as pd\n'), ((9554, 9584), 'typing.cast', 'cast', (['IO[bytes]', 'unzipped_file'], {}), '(IO[bytes], unzipped_file)\n', (9558, 9584), False, 'from typing import Any, Iterator, List, Optional, Tuple, Union, cast, IO\n'), ((12862, 12893), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': '(0)'}), '(filename, header=0)\n', (12873, 12893), True, 'import pandas as pd\n'), ((6174, 6232), 'pandas.DataFrame', 'pd.DataFrame', (['df_rows'], {'columns': "('mol_id', 'smiles', 'mol')"}), "(df_rows, columns=('mol_id', 'smiles', 'mol'))\n", (6186, 6232), True, 'import pandas as pd\n'), ((6827, 6876), 'pandas.concat', 'pd.concat', (['[mol_df, raw_df]'], {'axis': '(1)', 'join': '"""inner"""'}), "([mol_df, raw_df], axis=1, join='inner')\n", (6836, 6876), True, 'import pandas as pd\n'), ((12996, 13032), 'numpy.load', 'np.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (13003, 13032), True, 'import numpy as np\n'), ((6329, 6378), 'pandas.concat', 'pd.concat', (['[mol_df, raw_df]'], {'axis': '(1)', 'join': '"""inner"""'}), "([mol_df, raw_df], axis=1, join='inner')\n", (6338, 6378), True, 'import pandas as pd\n')]
|
import sys
sys.path.append("..")
import logging
import operator
import os
import remi.gui as gui
from remi import start, App
from remidatatable import DataTableWithServerSideProcessing
sample_data = (
("Soul Man", "Blues Brothers", "Blues", "2:52"),
("Another Brick in the Wall", "Pink Floyd", "Progressive Rock", "5:35"),
("Feier Frei!", "Rammstein", "Hard Rock", "3:10"),
("Walk Like an Egyptian", "Bangles", "80's", "3:10"),
("46 and 2", "Tool", "Hard Rock", "2:15"),
("I Ran", "Flock of Seagulls", "80's", "3:12"),
("Jackson", "<NAME>", "Country", "1:10"),
("Pop Music", "M Factor", "Pop", "3:40"),
("<NAME>", "Rammstein", "Hard Rock", "3:10"),
("Engel", "Rammstein", "Hard Rock", "3:10"),
("<NAME>", "<NAME>", "Progressive Rock", "12:10"),
("Soul Man 2", "Blues Brothers", "Blues", "2:52"),
("Another Brick in the Wall 2", "Pink Floyd", "Progressive Rock", "5:35"),
("Feier Frei! 2", "Rammstein", "Hard Rock", "3:10"),
("Walk Like an Egyptian 2", "Bangles", "80's", "3:10"),
("46 and 2 2", "Tool", "Hard Rock", "2:15"),
("I Ran 2", "Flock of Seagulls", "80's", "3:12"),
("Jackson 2", "<NAME>", "Country", "1:10"),
("Pop Music 2", "M Factor", "Pop", "3:40"),
("<NAME> 2", "Rammstein", "<NAME>", "3:10"),
("Engel 2", "Rammstein", "<NAME>", "3:10"),
("<NAME> 2", "<NAME>", "Progressive Rock", "12:10"),
)
log = logging.getLogger("server_side_app")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
class ServerSideMusicDataTable(DataTableWithServerSideProcessing):
def onDataRequest(self, request):
log.debug("request: %s" % request)
# the full contents of request is documented here:
# https://datatables.net/manual/server-side
start = request["start"]
length = request["length"]
search = request["search"]["value"]
order = request["order"][0]
# filter based on search parameter
if search != "":
filtered_data = []
for r in sample_data:
if search in str(r):
filtered_data.append(r)
else:
filtered_data = sample_data
num_records_total = len(sample_data)
num_records_after_filtering = len(filtered_data)
# paginate
data = []
for i in range(start, min(length+start, num_records_after_filtering)):
sd = filtered_data[i]
data.append([sd[0], sd[1], sd[2], sd[3]])
# sort per request
col_index = order["column"]
ascending = order["dir"] == "asc"
data.sort(key=operator.itemgetter(col_index), reverse=(not ascending))
# build response object
response = {}
response["draw"] = request["draw"]
response["recordsTotal"] = num_records_total
response["recordsFiltered"] = num_records_after_filtering
response["data"] = data
log.debug("response: %s" % response)
return response
class ExampleFrame(gui.VBox):
def __init__(self, app, **kwargs):
super(ExampleFrame, self).__init__(**kwargs)
self.row1 = gui.HBox()
self.append(self.row1)
self.table = ServerSideMusicDataTable(
app,
{'paging': 'true',
'scrollY': '"200px"',
'scrollCollapse': 'false',
'lengthChange': 'false',
'select': "'single'",
'colReorder': 'false',
'columns': """[
{'name':'title', 'title':'Title'},
{'name':'artist', 'title':'Artist'},
{'name':'genre', 'title':'Genre'},
{'name':'length','title':'Length'}]""",
},
style={"height": "300px", "width": "700px"}
)
self.row1.append(self.table)
class MyApp(App):
def __init__(self, *args):
# The following assumes running from 'remi-datatable/example'.
# Copy the contents of remi-datatabe/res to your own app's res
# directory and remove the '..' path segment from the following
# line when using remi-datatable in your own apps.
res_path = DataTableWithServerSideProcessing.get_res_path()
html_head = """
<link rel="stylesheet" type="text/css" href="/res/datatables.css"/>
<script type="text/javascript" src="/res/datatables.min.js"></script>
"""
super(MyApp, self).__init__(*args, static_file_path=res_path,
html_head=html_head)
def main(self):
return ExampleFrame(self, width=700, height=300)
if __name__ == "__main__":
start(MyApp, title="Server-Side Data Grid", debug=True)
|
[
"sys.path.append",
"remidatatable.DataTableWithServerSideProcessing.get_res_path",
"logging.StreamHandler",
"remi.start",
"operator.itemgetter",
"logging.getLogger",
"remi.gui.HBox"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((1411, 1447), 'logging.getLogger', 'logging.getLogger', (['"""server_side_app"""'], {}), "('server_side_app')\n", (1428, 1447), False, 'import logging\n'), ((1463, 1486), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1484, 1486), False, 'import logging\n'), ((4653, 4708), 'remi.start', 'start', (['MyApp'], {'title': '"""Server-Side Data Grid"""', 'debug': '(True)'}), "(MyApp, title='Server-Side Data Grid', debug=True)\n", (4658, 4708), False, 'from remi import start, App\n'), ((3156, 3166), 'remi.gui.HBox', 'gui.HBox', ([], {}), '()\n', (3164, 3166), True, 'import remi.gui as gui\n'), ((4168, 4216), 'remidatatable.DataTableWithServerSideProcessing.get_res_path', 'DataTableWithServerSideProcessing.get_res_path', ([], {}), '()\n', (4214, 4216), False, 'from remidatatable import DataTableWithServerSideProcessing\n'), ((2634, 2664), 'operator.itemgetter', 'operator.itemgetter', (['col_index'], {}), '(col_index)\n', (2653, 2664), False, 'import operator\n')]
|
# Generated by Django 3.0.4 on 2020-03-30 12:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Province',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Tasker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.CharField(max_length=50)),
('contact_number', models.IntegerField()),
('cnic', models.IntegerField()),
('gender', models.CharField(max_length=10)),
('description', models.TextField(max_length=2000)),
('daily_work_rate', models.IntegerField()),
('profile_visible', models.BooleanField()),
('rating', models.IntegerField()),
('area', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='helper.Area')),
('city', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='helper.City')),
('province', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='helper.Province')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Rewiew',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('star', models.IntegerField()),
('comment', models.TextField(default='.', max_length=1500)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='helper.Tasker')),
],
),
migrations.AddField(
model_name='city',
name='province',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='helper.Province'),
),
migrations.AddField(
model_name='area',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='helper.City'),
),
]
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((2936, 3025), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""helper.Province"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'helper.Province')\n", (2953, 3025), False, 'from django.db import migrations, models\n'), ((3136, 3221), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""helper.City"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='helper.City'\n )\n", (3153, 3221), False, 'from django.db import migrations, models\n'), ((433, 526), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (449, 526), False, 'from django.db import migrations, models\n'), ((550, 581), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (566, 581), False, 'from django.db import migrations, models\n'), ((711, 804), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (727, 804), False, 'from django.db import migrations, models\n'), ((828, 859), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (844, 859), False, 'from django.db import migrations, models\n'), ((993, 1086), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1009, 1086), False, 'from django.db import migrations, models\n'), ((1110, 1141), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1126, 1141), False, 'from django.db import migrations, models\n'), ((1273, 1366), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1289, 1366), False, 'from django.db import migrations, models\n'), ((1391, 1422), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1407, 1422), False, 'from django.db import migrations, models\n'), ((1460, 1481), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1479, 1481), False, 'from django.db import migrations, models\n'), ((1509, 1530), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1528, 1530), False, 'from django.db import migrations, models\n'), ((1560, 1591), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1576, 1591), False, 'from django.db import migrations, models\n'), ((1626, 1659), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(2000)'}), '(max_length=2000)\n', (1642, 1659), False, 'from django.db import migrations, models\n'), ((1698, 1719), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1717, 1719), False, 'from django.db import migrations, models\n'), ((1758, 1779), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1777, 1779), False, 'from django.db import migrations, models\n'), ((1809, 1830), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1828, 1830), False, 'from django.db import migrations, models\n'), ((1858, 1954), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""helper.Area"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='helper.Area')\n", (1875, 1954), False, 'from django.db import migrations, models\n'), ((1978, 2074), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""helper.City"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='helper.City')\n", (1995, 2074), False, 'from django.db import migrations, models\n'), ((2102, 2202), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""helper.Province"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='helper.Province')\n", (2119, 2202), False, 'from django.db import migrations, models\n'), ((2226, 2335), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (2246, 2335), False, 'from django.db import migrations, models\n'), ((2463, 2556), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2479, 2556), False, 'from django.db import migrations, models\n'), ((2580, 2601), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2599, 2601), False, 'from django.db import migrations, models\n'), ((2632, 2678), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""."""', 'max_length': '(1500)'}), "(default='.', max_length=1500)\n", (2648, 2678), False, 'from django.db import migrations, models\n'), ((2706, 2804), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""helper.Tasker"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='helper.Tasker')\n", (2723, 2804), False, 'from django.db import migrations, models\n')]
|
from django.db import models
class Actor(models.Model):
name = models.CharField(max_length=70)
name_beautified = models.CharField(max_length=70)
link = models.CharField(max_length=255)
class Movie(models.Model):
name = models.CharField(max_length=70)
name_beautified = models.CharField(max_length=70)
link = models.CharField(max_length=255)
actors = models.ManyToManyField('Actor', through='ActorMovie')
class ActorMovie(models.Model):
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
actor = models.ForeignKey(Actor, on_delete=models.CASCADE)
order = models.IntegerField(default=0)
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey"
] |
[((69, 100), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (85, 100), False, 'from django.db import models\n'), ((123, 154), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (139, 154), False, 'from django.db import models\n'), ((166, 198), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (182, 198), False, 'from django.db import models\n'), ((239, 270), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (255, 270), False, 'from django.db import models\n'), ((293, 324), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (309, 324), False, 'from django.db import models\n'), ((336, 368), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (352, 368), False, 'from django.db import models\n'), ((382, 435), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Actor"""'], {'through': '"""ActorMovie"""'}), "('Actor', through='ActorMovie')\n", (404, 435), False, 'from django.db import models\n'), ((482, 532), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Movie'], {'on_delete': 'models.CASCADE'}), '(Movie, on_delete=models.CASCADE)\n', (499, 532), False, 'from django.db import models\n'), ((545, 595), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Actor'], {'on_delete': 'models.CASCADE'}), '(Actor, on_delete=models.CASCADE)\n', (562, 595), False, 'from django.db import models\n'), ((608, 638), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (627, 638), False, 'from django.db import models\n')]
|