hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fdbd68dd1e0a0ba0978c1bd0880d05f492ec5829
| 3,063
|
py
|
Python
|
fluentcms_bootstrap_grid/content_plugins.py
|
edoburu/fluentcms-bootstrap-grid
|
67a8255e34e22284eeb05c04517671311305d370
|
[
"Apache-2.0"
] | null | null | null |
fluentcms_bootstrap_grid/content_plugins.py
|
edoburu/fluentcms-bootstrap-grid
|
67a8255e34e22284eeb05c04517671311305d370
|
[
"Apache-2.0"
] | null | null | null |
fluentcms_bootstrap_grid/content_plugins.py
|
edoburu/fluentcms-bootstrap-grid
|
67a8255e34e22284eeb05c04517671311305d370
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.utils.encoding import force_text
from django.utils.translation import pgettext, ugettext_lazy as _
from fluent_contents.extensions import ContainerPlugin, plugin_pool, ContentItemForm
from . import appsettings
from .models import BootstrapRow, BootstrapColumn
GRID_COLUMNS = appsettings.FLUENTCMS_BOOTSTRAP_GRID_COLUMNS
def _get_size_choices():
choices = [('', '----')]
for i in range(1, GRID_COLUMNS + 1):
title = '{0} / {1}'.format(i, GRID_COLUMNS)
choices.append((i, title))
return choices
SIZE_CHOICES = _get_size_choices()
OFFSET_CHOICES = [('', '----')] + [(i, force_text(i)) for i in range(1, GRID_COLUMNS + 1)]
size_widget = forms.Select(choices=SIZE_CHOICES)
offset_widget = forms.Select(choices=OFFSET_CHOICES)
push_widget = forms.Select(choices=OFFSET_CHOICES)
@plugin_pool.register
class BootstrapRowPlugin(ContainerPlugin):
"""
Row plugin
"""
model = BootstrapRow
render_template = 'fluentcms_bootstrap_grid/row.html'
empty_children_message = _("Add a new column here.")
class BootstrapColumnForm(ContentItemForm):
"""
Custom form for the bootstrap column
"""
def __init__(self, *args, **kwargs):
super(BootstrapColumnForm, self).__init__(*args, **kwargs)
for size in appsettings.FLUENTCMS_BOOTSTRAP_GRID_SIZES:
col = self.fields['col_{0}'.format(size)]
offset = self.fields['col_{0}_offset'.format(size)]
push = self.fields['col_{0}_push'.format(size)]
col.label = appsettings.FLUENTCMS_BOOTSTRAP_GRID_TITLES[size]
offset.label = pgettext("bootstrap-grid", u"Offset")
push.label = pgettext("bootstrap-grid", u"Push")
@plugin_pool.register
class BootstrapColumnPlugin(ContainerPlugin):
"""
Column plugin
"""
model = BootstrapColumn
form = BootstrapColumnForm
render_template = 'fluentcms_bootstrap_grid/column.html'
allowed_parent_types = (BootstrapRowPlugin,)
formfield_overrides = {
'col_xs': {'widget': size_widget},
'col_sm': {'widget': size_widget},
'col_md': {'widget': size_widget},
'col_lg': {'widget': size_widget},
'col_xs_offset': {'widget': offset_widget},
'col_sm_offset': {'widget': offset_widget},
'col_md_offset': {'widget': offset_widget},
'col_lg_offset': {'widget': offset_widget},
'col_xs_push': {'widget': push_widget},
'col_sm_push': {'widget': push_widget},
'col_md_push': {'widget': push_widget},
'col_lg_push': {'widget': push_widget},
}
fieldsets = (
(None, {
'fields': (
('col_xs', 'col_xs_offset', 'col_xs_push'),
('col_sm', 'col_sm_offset', 'col_sm_push'),
('col_md', 'col_md_offset', 'col_md_push'),
('col_lg', 'col_lg_offset', 'col_lg_push'),
),
}),
)
class Media:
css = {
'all': ('admin/fluentcms_bootstrap_grid/grid_admin.css',),
}
| 32.935484
| 90
| 0.642507
| 347
| 3,063
| 5.340058
| 0.268012
| 0.053427
| 0.071236
| 0.041015
| 0.229358
| 0.065839
| 0.025904
| 0.025904
| 0
| 0
| 0
| 0.003777
| 0.222005
| 3,063
| 92
| 91
| 33.293478
| 0.773815
| 0.019915
| 0
| 0.029851
| 0
| 0
| 0.184371
| 0.038566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.089552
| 0
| 0.328358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdbd9c9ab6b561ed49a22efac24abb031c9653d8
| 8,083
|
py
|
Python
|
study.py
|
Yougeeg/zhihuiguo
|
21cfa4210b011e0fbc200774b88e570d21e30ab2
|
[
"MIT"
] | null | null | null |
study.py
|
Yougeeg/zhihuiguo
|
21cfa4210b011e0fbc200774b88e570d21e30ab2
|
[
"MIT"
] | null | null | null |
study.py
|
Yougeeg/zhihuiguo
|
21cfa4210b011e0fbc200774b88e570d21e30ab2
|
[
"MIT"
] | 2
|
2017-11-04T08:46:04.000Z
|
2018-09-12T08:16:44.000Z
|
import logging
import json
from datetime import datetime, timedelta
from getpass import getpass
import uuid
import requests
from Cryptodome.PublicKey import RSA
import utils
NONE, SIGN, TICKET = 0, 1, 2
SERVER = 'https://appstudentapi.zhihuishu.com'
SSL_VERIFY = True
TAKE_EXAMS = True
SKIP_FINAL_EXAM = False
EXAM_AUTO_SUBMIT = True
def post(head, url, data, raw=False):
timestamp = str(int(datetime.now().timestamp() * 1000))
s.headers.update({'Timestamp': timestamp})
if head == SIGN:
s.headers.update({'App-Signature': utils.md5_digest(app_key + timestamp + secret)})
elif head == TICKET:
s.headers.update({'App-Ticket': ticket})
r = s.post(SERVER + url, data=data, verify=SSL_VERIFY)
if raw is True:
return r.text
return r.json()['rt']
def login():
account = input(u'账号(手机):')
password = getpass(prompt=u'密码:')
assert account or password
p = {'appkey': app_key}
global ticket
ticket = post(NONE, '/api/ticket', p)
p = {'platform': 'android', 'm': account, 'appkey': app_key, 'p': password, 'client': 'student',
'version': '2.8.9'}
d = post(TICKET, '/api/login', p)
u = d['userId']
se = d['secret']
s.headers.clear()
p = {'type': 3, 'userId': u, 'secretStr': utils.rsa_encrypt(rsa_key, u), 'versionKey': 1}
d = post(SIGN, '/appstudent/student/user/getUserInfoAndAuthentication', p)
ai = json.loads(utils.rsa_decrypt(rsa_key, d['authInfo']))
ui = json.loads(utils.rsa_decrypt(rsa_key, d['userInfo']))
logger.info(ai)
logger.info(ui)
n = ui['realName']
logger.info(f'{u} {n}')
with open('userinfo.py', 'w+', encoding='utf-8') as f:
f.writelines(f'USER = {u}\n')
f.writelines(f'NAME = "{n}"\n')
f.writelines(f'SECRET = "{se}"')
logger.info('Login OK.')
return u, n, se
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO)
logger = logging.getLogger()
logger.info('I love studying! Study makes me happy!')
rsa_key = RSA.import_key(open('key.pem', 'r').read())
app_key = utils.md5_digest(str(uuid.uuid4()).replace('-', ''))
s = requests.Session()
s.headers.update({
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 7.1.1; Nexus 5X Build/NOF27B',
'Accept-Encoding': 'gzip',
'App-Key': app_key})
secret = ''
ticket = ''
try:
import userinfo
user = userinfo.USER
name = userinfo.NAME
secret = userinfo.SECRET
if input(f'Current user:{user} {name}:[y/n]') != 'y':
user, name, secret = login()
except:
user, name, secret = login()
SERVER += '/appstudent'
p = {'userId': user}
d = post(SIGN, '/student/tutorial/getStudyingCourses', p)
course_id, recruit_id, link_course_id = 0, 0, 0
if d is None:
logger.info('No studying courses.')
exit()
for course in d:
if input(course['courseName'] + ':[y/n]') == 'y':
course_id = course['courseId']
recruit_id = course['recruitId']
link_course_id = course['linkCourseId']
break
if course_id == 0:
exit()
def save_record(dic, chapter_id, lesson_id):
if dic['studiedLessonDto'] is not None and dic['studiedLessonDto']['watchState'] == 1:
return
p = {'deviceId': app_key, 'userId': user, 'versionKey': 1}
rt = post(SIGN, '/student/tutorial/getSaveLearningRecordToken', p)
token = utils.rsa_decrypt(rsa_key, rt)
video_time = dic['videoSec']
chapter_id = chapter_id or dic['chapterId']
j = {'lessonId': lesson_id, 'learnTime': str(timedelta(seconds=video_time)), 'userId': user,
'personalCourseId': link_course_id, 'recruitId': recruit_id, 'chapterId': chapter_id, 'sourseType': 3,
'playTimes': video_time, 'videoId': dic['videoId'], 'token': token, 'deviceId': app_key}
if lesson_id is None:
j['lessonId'] = dic['id']
else:
j['lessonVideoId'] = dic['id']
json_str = json.dumps(j, sort_keys=True, separators=(',', ':'))
p = {'jsonStr': json_str, 'secretStr': utils.rsa_encrypt(rsa_key, json_str), 'versionKey': 1}
rt = post(SIGN, '/student/tutorial/saveLearningRecordByToken', p)
logger.info(dic['name'] + rt)
p = {'recruitId': recruit_id, 'courseId': course_id, 'userId': user}
chapter_list = post(SIGN, '/appserver/student/getCourseInfo', p)['chapterList']
for chapter in chapter_list:
for lesson in chapter['lessonList']:
if lesson['sectionList'] is not None:
for section in lesson['sectionList']:
save_record(section, lesson['chapterId'], lesson['id'])
else:
save_record(lesson, None, None)
logger.info('Videos done.')
if TAKE_EXAMS is False:
exit()
p = {'mobileType': 2, 'recruitId': recruit_id, 'courseId': course_id, 'page': 1, 'userId': user, 'examType': 1,
'schoolId': -1, 'pageSize': 20} # examType=2 is for finished exams
exam_list = post(SIGN, '/appserver/exam/findAllExamInfo', p)['stuExamDtoList']
for exam in exam_list:
logger.info(exam['examInfoDto']['name'])
exam_type = exam['examInfoDto']['type']
if exam_type == 2: # Final exams
if SKIP_FINAL_EXAM is True:
logger.info('Skipped final exam.')
continue
exam_id = exam['examInfoDto']['examId']
student_exam_id = exam['studentExamInfoDto']['id']
question_ids = []
p = {'userId': user}
rt = post(SIGN, '/student/exam/canIntoExam', p)
if rt != 1:
logger.info('Cannot into exam.')
continue
p = {'recruitId': recruit_id, 'examId': exam_id, 'isSubmit': 0, 'studentExamId': student_exam_id,
'type': exam_type, 'userId': user}
ids = post(SIGN, '/student/exam/examQuestionIdListByCache', p)['examList']
p.pop('isSubmit')
p.pop('type')
for exam_question in ids:
question_ids.append(str(exam_question['questionId']))
p['questionIds'] = question_ids
questions = post(SIGN, '/student/exam/questionInfos', p)
for question_id in question_ids:
question = questions[question_id]
logger.info(question['firstname'])
if question['questionTypeName'] == '多选题' or '单选题':
answer = question['realAnswer'].split(',')
else:
EXAM_AUTO_SUBMIT = False
continue
pa = [{'deviceType': '1', 'examId': str(exam_id), 'userId': str(user), 'stuExamId': str(student_exam_id),
'questionId': str(question_id), 'recruitId': str(recruit_id), 'answerIds': answer, 'dataIds': []}]
json_str = json.dumps(pa, separators=(',', ':'))
pb = {'mobileType': 2, 'jsonStr': json_str,
'secretStr': utils.rsa_encrypt(rsa_key, json_str),
'versionKey': 1}
rt = post(SIGN, '/student/exam/saveExamAnswer', pb)
logger.info(rt[0]['messages'])
if not EXAM_AUTO_SUBMIT:
continue
pa = {'deviceType': '1', 'userId': str(user), 'stuExamId': str(student_exam_id), 'recruitId': recruit_id,
'examId': str(exam_id), 'questionIds': question_ids, 'remainingTime': '0',
'achieveCount': str(question_ids.__len__())}
json_str = json.dumps(pa, separators=(',', ':'))
pb = {'mobileType': 2, 'recruitId': recruit_id, 'examId': str(exam_id), 'userId': user, 'jsonStr': json_str,
'secretStr': utils.rsa_encrypt(rsa_key, json_str), 'type': exam_type, 'versionKey': 1}
raw = post(SIGN, '/student/exam/submitExamInfo', pb, raw=True)
rt = json.loads(raw.replace('"{', '{').replace('}"', '}').replace('\\', ''))['rt']
logger.info(f'{rt["messages"]} Score: {rt["errorInfo"]["score"]}')
logger.info('Exams done.')
| 39.237864
| 117
| 0.588767
| 974
| 8,083
| 4.756674
| 0.26694
| 0.032376
| 0.025901
| 0.020505
| 0.154328
| 0.137492
| 0.116339
| 0.092597
| 0.06281
| 0.045111
| 0
| 0.008547
| 0.247309
| 8,083
| 205
| 118
| 39.429268
| 0.752959
| 0.005444
| 0
| 0.091429
| 0
| 0.005714
| 0.243405
| 0.051269
| 0
| 0
| 0
| 0
| 0.005714
| 1
| 0.017143
| false
| 0.022857
| 0.057143
| 0
| 0.097143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdc0b230d2a0f01f084eb9ceb1e8ca8a841fb58f
| 1,440
|
py
|
Python
|
python/controllers/ner_annotation_api.py
|
barkavi87/anuvaad-corpus
|
9ea832f4228f61a7d4998205976629ea4b7c3d70
|
[
"MIT"
] | 2
|
2019-12-20T08:58:10.000Z
|
2020-05-15T14:17:43.000Z
|
python/controllers/ner_annotation_api.py
|
barkavi87/anuvaad-corpus
|
9ea832f4228f61a7d4998205976629ea4b7c3d70
|
[
"MIT"
] | 73
|
2019-08-12T16:17:33.000Z
|
2022-01-13T01:24:38.000Z
|
python/controllers/ner_annotation_api.py
|
barkavi87/anuvaad-corpus
|
9ea832f4228f61a7d4998205976629ea4b7c3d70
|
[
"MIT"
] | 1
|
2020-08-24T09:51:46.000Z
|
2020-08-24T09:51:46.000Z
|
import os
import urllib.request
from flask import Flask, request, redirect, render_template, jsonify
from flask import Blueprint, request, current_app as app
from controllers.sc_judgment_header_ner_eval import SC_ner_annotation
import json
from models.response import CustomResponse
from models.status import Status
ner_annotation_api = Blueprint('ner_annotation_api', __name__)
@ner_annotation_api.route('/ner', methods = ['POST'])
def ner_sentences():
data = request.get_json()
if 'sentences' not in data or data['sentences'] is None or not isinstance(data['sentences'],list):
res = CustomResponse(
Status.ERR_GLOBAL_MISSING_PARAMETERS.value, None)
return res.getres(), Status.ERR_GLOBAL_MISSING_PARAMETERS.value['http']['status']
else:
output_ner = list()
for text in data['sentences']:
mix_model_dir = os.getcwd()+'/upload/models/exp_1_mix/'
model_dir_order = os.getcwd()+'/upload/models/exp_1_order/'
model_dir_judgment = os.getcwd()+'/upload/models/exp_1_judgement/'
result_ner = SC_ner_annotation(model_dir_judgment, model_dir_order, mix_model_dir, text).main()
if result_ner is None or mix_model_dir is None:
return "something went wrong"
else:
output_ner.append(result_ner)
res = CustomResponse(Status.SUCCESS.value, output_ner)
return res.getres()
| 43.636364
| 107
| 0.702083
| 190
| 1,440
| 5.036842
| 0.378947
| 0.058516
| 0.045977
| 0.062696
| 0.15256
| 0.15256
| 0
| 0
| 0
| 0
| 0
| 0.002613
| 0.202778
| 1,440
| 32
| 108
| 45
| 0.83101
| 0
| 0
| 0.068966
| 0
| 0
| 0.121612
| 0.057679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.275862
| 0
| 0.413793
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdc1c97e35e98b3788187bd4a1997c5b2842cc3b
| 4,594
|
py
|
Python
|
HandGenerator/HandGenerator.py
|
VASST/SlicerLeapMotion
|
d20215fb657eb5c972d1fe380bdf2d0479796c93
|
[
"MIT"
] | null | null | null |
HandGenerator/HandGenerator.py
|
VASST/SlicerLeapMotion
|
d20215fb657eb5c972d1fe380bdf2d0479796c93
|
[
"MIT"
] | 2
|
2019-09-06T16:06:20.000Z
|
2020-02-16T17:15:28.000Z
|
HandGenerator/HandGenerator.py
|
VASST/SlicerLeapMotion
|
d20215fb657eb5c972d1fe380bdf2d0479796c93
|
[
"MIT"
] | 1
|
2020-01-14T17:49:31.000Z
|
2020-01-14T17:49:31.000Z
|
import sys
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import numpy as np
#
# HandGenerator
#
class HandGenerator(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Hand Generator"
self.parent.categories = ["IGT"]
self.parent.dependencies = ["OpenIGTLinkIF"]
self.parent.contributors = ["Leah Groves (Robarts Research Institute), Thomas Morphew (Robarts Research Institute)"]
self.parent.helpText = """This module creates a number of models (cylinders and spheres), and parents new transforms to those models in order to mimic the human hand. These transforms are then driven by the Leap Motion device."""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """Thanks to the VASST Lab for its support."""
#
# HandGeneratorWidget
#
class HandGeneratorWidget(ScriptedLoadableModuleWidget):
def __init__(self, parent=None):
ScriptedLoadableModuleWidget.__init__(self, parent)
self.connectorNode = None
self.generated = False
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.parametersCollapsibleButton = ctk.ctkCollapsibleButton()
self.parametersCollapsibleButton.text = "Actions"
self.layout.addWidget(self.parametersCollapsibleButton)
self.parametersFormLayout = qt.QFormLayout(self.parametersCollapsibleButton)
self.connectButton = qt.QPushButton()
self.connectButton.setDefault(False)
self.connectButton.text = "Click to connect"
self.parametersFormLayout.addWidget(self.connectButton)
self.pathText = qt.QLabel("Please place hands within view")
self.parametersFormLayout.addRow(self.pathText)
self.layout.addStretch(1)
self.generateButton = qt.QPushButton()
self.generateButton.setDefault(False)
self.generateButton.text = "Generate Hands"
self.parametersFormLayout.addWidget(self.generateButton)
self.connectButton.connect('clicked(bool)', self.onConnectButtonClicked)
self.generateButton.connect('clicked(bool)', self.generateCylinders)
self.layout.addStretch(1)
def onConnectButtonClicked(self):
if self.connectorNode is not None:
self.connectorNode = None
self.connectCheck = 1
self.connectButton.text = 'Click to connect'
else:
self.connectorNode = slicer.vtkMRMLIGTLConnectorNode()
slicer.mrmlScene.AddNode(self.connectorNode)
self.connectorNode.SetTypeClient('localhost', 18944)
self.connectorNode.Start()
self.connectCheck = 0
self.connectButton.text = 'Connected'
def generateCylinders(self):
if self.generated == False:
nodes = slicer.util.getNodesByClass('vtkMRMLLinearTransformNode')
l = slicer.modules.createmodels.logic()
# TODO: Make sure to render the palm as well!
for i in range (0, len(nodes)):
if 'Left' in nodes[i].GetName() or 'Right' in nodes[i].GetName():
if 'Dis' in nodes[i].GetName() or 'Int' in nodes[i].GetName() or 'Prox' in nodes[i].GetName() or 'Meta' in nodes[i].GetName():
# This is a temporary solution, idealy the Plus server and Leap Motion can scan the actual sizes
# This is also subject to change for different model types that look more like a hand.
if 'Dis' in nodes[i].GetName():
length = 16
radiusMm = 1.5
elif 'Int' in nodes[i].GetName():
length = 20
radiusMm = 1.5
elif 'Prox' in nodes[i].GetName():
length = 28
radiusMm = 1.5
elif 'Meta' in nodes[i].GetName():
length = 50
radiusMm = 3
cylinder = l.CreateCylinder(length, radiusMm)
cylinder.SetAndObserveTransformNodeID(nodes[i].GetID())
cylinder.SetName('LHG_Cyl_'+nodes[i].GetName())
self.generated = True
else:
nodes = slicer.util.getNodesByClass('vtkMRMLLinearTransformNode')
models = slicer.util.getNodesByClass('vtkMRMLModelNode')
n = 0
mat = vtk.vtkMatrix4x4()
l = slicer.modules.createmodels.logic()
for j in range(0, len(models)):
if 'LHG_' in models[j].GetName():
slicer.mrmlScene.RemoveNode(models[j])
for i in range (0, len(nodes)):
if 'LHG_Zshift' in nodes[i].GetName():
slicer.mrmlScene.RemoveNode(nodes[i])
self.generated = False
self.generateCylinders()
| 38.932203
| 233
| 0.680888
| 503
| 4,594
| 6.178926
| 0.377734
| 0.027027
| 0.050193
| 0.053089
| 0.156692
| 0.04955
| 0.014157
| 0.014157
| 0
| 0
| 0
| 0.00835
| 0.217893
| 4,594
| 118
| 234
| 38.932203
| 0.856666
| 0.055943
| 0
| 0.186813
| 0
| 0.010989
| 0.140712
| 0.012015
| 0
| 0
| 0
| 0.008475
| 0
| 1
| 0.054945
| false
| 0
| 0.076923
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdc3fc24cea107cba4ab6159b29d6bd76397bdc9
| 434
|
py
|
Python
|
Pratical/Class02/metodo_da_bissecao.py
|
JoaoCostaIFG/MNUM
|
6e042d8a6f64feb9eae9c79afec2fbab51f46fbd
|
[
"MIT"
] | 1
|
2019-12-07T10:34:30.000Z
|
2019-12-07T10:34:30.000Z
|
Pratical/Class02/metodo_da_bissecao.py
|
JoaoCostaIFG/MNUM
|
6e042d8a6f64feb9eae9c79afec2fbab51f46fbd
|
[
"MIT"
] | null | null | null |
Pratical/Class02/metodo_da_bissecao.py
|
JoaoCostaIFG/MNUM
|
6e042d8a6f64feb9eae9c79afec2fbab51f46fbd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Pesquisa binaria
# Read
num = int(input("Number to find the sqrt of? "))
index = 0
step = num / 2
prox = True
while abs(index * index - num) > 1e-10:
if (prox):
index += step
else:
index -= step
step = step / 2
if (index * index) < num:
prox = True
else:
prox = False
print("Result: [", index - 2 * step, ", ", index, "] with percision of +/-", step)
| 17.36
| 82
| 0.534562
| 59
| 434
| 3.932203
| 0.559322
| 0.068966
| 0.112069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026846
| 0.313364
| 434
| 24
| 83
| 18.083333
| 0.751678
| 0.099078
| 0
| 0.266667
| 0
| 0
| 0.160207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdc4efcf38d739230cb577df9971b45dd3d12756
| 2,625
|
py
|
Python
|
final/NerualNetworks/utils/MakeData.py
|
XuYi-fei/HUST-EIC-MathematicalModeling
|
73797bdba17d4f759be3a39603b42be081a98e5c
|
[
"MIT"
] | 1
|
2021-05-04T12:29:21.000Z
|
2021-05-04T12:29:21.000Z
|
final/NerualNetworks/utils/MakeData.py
|
XuYi-fei/HUST-EIC-MathematicalModeling
|
73797bdba17d4f759be3a39603b42be081a98e5c
|
[
"MIT"
] | null | null | null |
final/NerualNetworks/utils/MakeData.py
|
XuYi-fei/HUST-EIC-MathematicalModeling
|
73797bdba17d4f759be3a39603b42be081a98e5c
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import random
class MakeDataset():
def __init__(self, known=30, predict=7, ratio=0.4, path=None):
# ratio means the valid/(train + valid)
assert path is not None, "The path is invalid"
self.df = pd.read_excel(path)
self.columns = self.df.columns
self.knownLength = known
self.ratio = ratio
self.predictLength = predict
self.data_length = len(self.df.values[0]) - 3
# self.datasets: {known data: predict data}
self.datasets = {}
self.train_dataset = []
self.val_dataset = []
self.begin_index = self.data_length % (known + predict) + 3
self.keys = []
self.process()
self.splitDataset()
def process(self):
# self.maxData = 0
# self.minData = 0
maxData = 0
minData = 0
for index, row in self.df.iterrows():
data = list(row[self.begin_index:])
while len(data) >= self.knownLength + self.predictLength:
maxData = 1 if max(data[-(self.predictLength + self.knownLength):-self.predictLength]) * 1.5 == 0 else max(data[-(self.predictLength + self.knownLength):-self.predictLength]) * 1.5
self.datasets[str(data[-(self.predictLength + self.knownLength):-self.predictLength])] = str([(data[-self.predictLength:]), maxData])
# data = data[:-(self.predictLength+self.knownLength)]
data = data[:-1]
return
def splitDataset(self):
writer_train = open('../Data/train2.txt', 'w')
writer_val = open('../Data/val2.txt', 'w')
for index, (k,v) in enumerate(self.datasets.items()):
k, v = eval(k), eval(v)
k = str([i / v[1] for i in k])
v[0] = str([i / v[1] for i in v[0]])
if random.random() > self.ratio:
writer_train.write(k)
writer_train.write('#')
writer_train.write(v[0])
writer_train.write('#')
writer_train.write(str(v[1]))
writer_train.write('\n')
else:
writer_val.write(k)
writer_val.write('#')
writer_val.write(v[0])
writer_val.write('#')
writer_val.write(str(v[1]))
writer_val.write('\n')
writer_val.close()
writer_train.close()
return
if __name__ == '__main__':
data = MakeDataset(path=r'D:\GitRepos\EIC\MathmaticalModeling\HUST-EIC-MathematicalModeling\final\NerualNetworks\Data\Preprocessed_original.xlsx')
| 34.090909
| 196
| 0.55619
| 313
| 2,625
| 4.549521
| 0.290735
| 0.119382
| 0.067416
| 0.089888
| 0.260534
| 0.219803
| 0.135534
| 0.081461
| 0.081461
| 0.081461
| 0
| 0.015917
| 0.305905
| 2,625
| 76
| 197
| 34.539474
| 0.765642
| 0.063238
| 0
| 0.109091
| 0
| 0.018182
| 0.077143
| 0.048163
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.054545
| false
| 0
| 0.054545
| 0
| 0.163636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdc5979ebdcfef679c432bdc3659a7c209d59706
| 326
|
py
|
Python
|
chapter06/example612.py
|
yozw/lio-files
|
e036bc868207ec045a804495fc40cf3a48e37d6d
|
[
"MIT"
] | null | null | null |
chapter06/example612.py
|
yozw/lio-files
|
e036bc868207ec045a804495fc40cf3a48e37d6d
|
[
"MIT"
] | null | null | null |
chapter06/example612.py
|
yozw/lio-files
|
e036bc868207ec045a804495fc40cf3a48e37d6d
|
[
"MIT"
] | null | null | null |
from math import sqrt
from numpy import matrix
from intpm import intpm
A = matrix([[1, 0, 1, 0], [0, 1, 0, 1]])
b = matrix([1, 1]).T
c = matrix([-1, -2, 0, 0]).T
mu = 100
x1 = 0.5 * (-2 * mu + 1 + sqrt(1 + 4*mu*mu))
x2 = 0.5 * (-mu + 1 + sqrt(1 + mu * mu))
x0 = matrix([x1, x2, 1 - x1, 1 - x2]).T
intpm(A, b, c, x0, mu)
| 18.111111
| 44
| 0.509202
| 71
| 326
| 2.338028
| 0.295775
| 0.126506
| 0.036145
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151639
| 0.251534
| 326
| 17
| 45
| 19.176471
| 0.528689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdcf6f1f635a8225a76ad32f552e3db603ad1c14
| 2,369
|
py
|
Python
|
jobs/api/models/keyword.py
|
gitdaniel228/jobSearch
|
5dc1c69a3750f92ca0bcd378dfdc500143204a5a
|
[
"MIT"
] | null | null | null |
jobs/api/models/keyword.py
|
gitdaniel228/jobSearch
|
5dc1c69a3750f92ca0bcd378dfdc500143204a5a
|
[
"MIT"
] | null | null | null |
jobs/api/models/keyword.py
|
gitdaniel228/jobSearch
|
5dc1c69a3750f92ca0bcd378dfdc500143204a5a
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib import admin
from .country import Country
from .filter import Filter
from .setting import Setting
from .site import Site
class Keyword(models.Model):
site = models.ForeignKey('Site')
phrase = models.CharField(max_length=50)
feed_url = models.CharField(
max_length=255, blank=True, default='', help_text='for Upwork')
countries = models.ManyToManyField('Country')
class Meta:
app_label = 'api'
def __str__(self):
return self.phrase
def save(self, *args, **kwargs):
"""For Indeed: automatically adds Keyword-Country relation."""
init = False
if not self.pk:
init = True
super(Keyword, self).save(*args, **kwargs)
if init and self.site.code == 'Indeed':
sobj = Setting.objects.get(code='countries')
l = sobj.value.strip().split(',')
l = [x.strip() for x in l]
for co in l:
try:
c = Country.objects.get(code=co)
except Country.DoesNotExist:
continue
self.countries.add(c)
@staticmethod
def get_keywords(site_id):
table = Site.get_job_table(site_id)
query = 'SELECT COUNT(*) from {} WHERE keyword_id=api_keyword.id AND \
is_viewed=0 AND is_deleted=0 AND is_processed=1'
# Upwork
if site_id == 2:
avh = int(Filter.objects.get(code='avh').value)
budget = int(Filter.objects.get(code='budget').value)
spent = int(Filter.objects.get(code='spent').value)
query += ' AND (avg_hour_price=0 OR avg_hour_price>={})'.format(
avh)
query += ' AND (budget=0 OR budget>={})'.format(budget)
query += ' AND total_spent>={}'.format(spent)
query = query.format(table)
qs = Keyword.objects.filter(site_id=site_id)
qs = qs.extra(select={'quantity_nv_jobs': query})
qs = qs.values('id', 'phrase', 'site_id', 'quantity_nv_jobs')
return list(qs)
class CountryInline(admin.TabularInline):
model = Keyword.countries.through
@admin.register(Keyword)
class KeywordAdmin(admin.ModelAdmin):
inlines = [
CountryInline,
]
exclude = ('countries',)
list_display = ('site', 'phrase', 'feed_url')
| 31.171053
| 78
| 0.594344
| 287
| 2,369
| 4.787456
| 0.393728
| 0.026201
| 0.050946
| 0.041485
| 0.050218
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006448
| 0.279865
| 2,369
| 75
| 79
| 31.586667
| 0.798945
| 0.027016
| 0
| 0
| 0
| 0
| 0.096564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.103448
| 0.017241
| 0.396552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdd4d7c9ea8afe2d857b858645122b0c43587a2f
| 4,262
|
py
|
Python
|
test/units/plugins/inventory/test_script.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
test/units/plugins/inventory/test_script.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
test/units/plugins/inventory/test_script.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 Chris Meyers <cmeyers@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.loader import PluginLoader
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
from ansible.module_utils._text import to_bytes, to_native
class TestInventoryModule(unittest.TestCase):
def setUp(self):
class Inventory():
cache = dict()
class PopenResult():
returncode = 0
stdout = b""
stderr = b""
def communicate(self):
return (self.stdout, self.stderr)
self.popen_result = PopenResult()
self.inventory = Inventory()
self.loader = mock.MagicMock()
self.loader.load = mock.MagicMock()
inv_loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', C.DEFAULT_INVENTORY_PLUGIN_PATH, 'inventory_plugins')
self.inventory_module = inv_loader.get('script')
self.inventory_module.set_options()
def register_patch(name):
patcher = mock.patch(name)
self.addCleanup(patcher.stop)
return patcher.start()
self.popen = register_patch('subprocess.Popen')
self.popen.return_value = self.popen_result
self.BaseInventoryPlugin = register_patch('ansible.plugins.inventory.BaseInventoryPlugin')
self.BaseInventoryPlugin.get_cache_prefix.return_value = 'abc123'
def test_parse_subprocess_path_not_found_fail(self):
self.popen.side_effect = OSError("dummy text")
with pytest.raises(AnsibleError) as e:
self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == "problem running /foo/bar/foobar.py --list (dummy text)"
def test_parse_subprocess_err_code_fail(self):
self.popen_result.stdout = to_bytes(u"fooébar", errors='surrogate_escape')
self.popen_result.stderr = to_bytes(u"dummyédata")
self.popen_result.returncode = 1
with pytest.raises(AnsibleError) as e:
self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == to_native("Inventory script (/foo/bar/foobar.py) had an execution error: "
"dummyédata\n ")
def test_parse_utf8_fail(self):
self.popen_result.returncode = 0
self.popen_result.stderr = to_bytes("dummyédata")
self.loader.load.side_effect = TypeError('obj must be string')
with pytest.raises(AnsibleError) as e:
self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == to_native("failed to parse executable inventory script results from "
"/foo/bar/foobar.py: obj must be string\ndummyédata\n")
def test_parse_dict_fail(self):
self.popen_result.returncode = 0
self.popen_result.stderr = to_bytes("dummyédata")
self.loader.load.return_value = 'i am not a dict'
with pytest.raises(AnsibleError) as e:
self.inventory_module.parse(self.inventory, self.loader, '/foo/bar/foobar.py')
assert e.value.message == to_native("failed to parse executable inventory script results from "
"/foo/bar/foobar.py: needs to be a json dict\ndummyédata\n")
| 40.207547
| 135
| 0.680432
| 543
| 4,262
| 5.209945
| 0.355433
| 0.038176
| 0.04772
| 0.03959
| 0.337575
| 0.30046
| 0.270767
| 0.270767
| 0.270767
| 0.270767
| 0
| 0.00455
| 0.22642
| 4,262
| 105
| 136
| 40.590476
| 0.853503
| 0.169873
| 0
| 0.222222
| 0
| 0
| 0.184712
| 0.019892
| 0
| 0
| 0
| 0
| 0.063492
| 1
| 0.111111
| false
| 0
| 0.126984
| 0.015873
| 0.31746
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdda757521b551e07a7f4e98103818cc6dd32745
| 10,501
|
py
|
Python
|
asr_deepspeech/modules/deepspeech.py
|
shangdibufashi/ASRDeepSpeech
|
f11134abb79e98062fbc25fab99ca4cf675e538b
|
[
"MIT"
] | 44
|
2020-03-03T13:05:57.000Z
|
2022-03-24T03:42:31.000Z
|
asr_deepspeech/modules/deepspeech.py
|
shangdibufashi/ASRDeepSpeech
|
f11134abb79e98062fbc25fab99ca4cf675e538b
|
[
"MIT"
] | 6
|
2020-12-15T10:58:19.000Z
|
2021-10-12T01:59:17.000Z
|
asr_deepspeech/modules/deepspeech.py
|
shangdibufashi/ASRDeepSpeech
|
f11134abb79e98062fbc25fab99ca4cf675e538b
|
[
"MIT"
] | 13
|
2020-05-20T06:42:20.000Z
|
2022-03-24T03:42:31.000Z
|
import math
from collections import OrderedDict
import json
from asr_deepspeech.decoders import GreedyDecoder
import os
from ascii_graph import Pyasciigraph
from asr_deepspeech.data.loaders import AudioDataLoader
from asr_deepspeech.data.samplers import BucketingSampler
from .blocks import *
from asr_deepspeech.data.dataset import SpectrogramDataset
from argparse import Namespace
from zakuro import hub
class DeepSpeech(nn.Module):
def __init__(self,
audio_conf,
decoder,
id="asr",
label_path=None,
labels=None,
rnn_type="nn.LSTM",
rnn_hidden_size=768,
rnn_hidden_layers=5,
bidirectional=True,
context=20,
version='0.0.1',
model_path=None,
):
super(DeepSpeech, self).__init__()
labels = json.load(open(label_path, "r")) if labels is None else labels
self.version = version
self.id =id
self.decoder, self.audio_conf = decoder, audio_conf
self.context = context
self.rnn_hidden_size = rnn_hidden_size
self.rnn_hidden_layers = rnn_hidden_layers
self.rnn_type = eval(rnn_type)
self.labels = labels
self.bidirectional = bidirectional
self.sample_rate = self.audio_conf.sample_rate
self.window_size = self.audio_conf.window_size
self.num_classes = len(self.labels)
self.model_path = model_path
self.build_network()
self.decoder = GreedyDecoder(self.labels)
try:
assert model_path is not None
assert os.path.exists(model_path)
print(f"{self.id}>> Loading {model_path}")
ckpt = Namespace(**torch.load(model_path))
self.load_state_dict(ckpt.state_dict)
except:
pass
def build_network(self):
self.conv = MaskConv(nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(41, 11), stride=(2, 2), padding=(20, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True),
nn.Conv2d(32, 32, kernel_size=(21, 11), stride=(2, 1), padding=(10, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True)
))
# Based on above convolutions and spectrogram size using conv formula (W - F + 2P)/ S+1
rnn_input_size = int(math.floor((self.sample_rate * self.window_size) / 2) + 1)
rnn_input_size = int(math.floor(rnn_input_size + 2 * 20 - 41) / 2 + 1)
rnn_input_size = int(math.floor(rnn_input_size + 2 * 10 - 21) / 2 + 1)
rnn_input_size *= 32
rnns = []
rnn = BatchRNN(input_size=rnn_input_size,
hidden_size=self.rnn_hidden_size,
rnn_type=self.rnn_type,
bidirectional=self.bidirectional,
batch_norm=False)
rnns.append(('0', rnn))
for x in range(self.rnn_hidden_layers - 1):
rnn = BatchRNN(input_size=self.rnn_hidden_size,
hidden_size=self.rnn_hidden_size,
rnn_type=self.rnn_type,
bidirectional=self.bidirectional)
rnns.append(('%d' % (x + 1), rnn))
self.rnns = nn.Sequential(OrderedDict(rnns))
self.lookahead = nn.Sequential(
# consider adding batch norm?
Lookahead(self.rnn_hidden_size,
context=self.context),
nn.Hardtanh(0, 20, inplace=True)
) if not self.bidirectional else None
fully_connected = nn.Sequential(
nn.BatchNorm1d(self.rnn_hidden_size),
nn.Linear(self.rnn_hidden_size, self.num_classes, bias=False)
)
self.fc = nn.Sequential(
SequenceWise(fully_connected),
)
self.inference_softmax = InferenceBatchSoftmax()
def forward(self, x, lengths):
lengths = lengths.cpu().int()
output_lengths = self.get_seq_lens(lengths)
x, _ = self.conv(x, output_lengths)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # Collapse feature dimension
x = x.transpose(1, 2).transpose(0, 1).contiguous() # TxNxH
for rnn in self.rnns:
x = rnn(x, output_lengths)
if not self.bidirectional: # no need for lookahead layer in bidirectional
x = self.lookahead(x)
x = self.fc(x)
x = x.transpose(0, 1)
# identity in training mode, softmax in eval mode
x = self.inference_softmax(x)
return x, output_lengths
def get_loader(self, manifest, batch_size, num_workers):
dataset = SpectrogramDataset(audio_conf=self.audio_conf,
manifest_filepath=manifest,
labels=self.labels,
normalize=True,
spec_augment=self.audio_conf.spec_augment)
sampler = BucketingSampler(dataset,
batch_size=batch_size)
loader = AudioDataLoader(dataset,
num_workers=num_workers,
batch_sampler=sampler)
sampler.shuffle()
return loader, sampler
def __call__(self,
loader = None,
manifest=None,
batch_size=None,
cuda=True,
num_workers=32,
dist=None,
verbose=False,
half=False,
output_file=None,
main_proc=True,
restart_from=None):
with torch.no_grad():
if loader is None:
loader, sampler = self.get_loader(manifest=manifest,
batch_size=batch_size,
num_workers=num_workers)
if restart_from is not None:
hub.restart_from(self, restart_from)
device = "cuda"if cuda else "cpu"
decoder = self.decoder
target_decoder = self.decoder
self.eval()
self.to(device)
total_cer, total_wer, num_tokens, num_chars = 0, 0, 0, 0
output_data = []
min_str, max_str, last_str, min_cer, max_cer = "", "", "", 100, 0
hcers = dict([(k, 1) for k in range(10)])
for i, (data) in enumerate(loader):
inputs, targets, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
inputs = inputs.to(device)
if half:
inputs = inputs.half()
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
out, output_sizes = self.forward(inputs, input_sizes)
decoded_output, _ = decoder.decode(out, output_sizes)
target_strings = target_decoder.convert_to_strings(split_targets)
if output_file is not None:
# add output to data array, and continue
output_data.append((out.detach().cpu().numpy(), output_sizes.numpy(), target_strings))
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer_inst = decoder.wer(transcript, reference)
cer_inst = decoder.cer(transcript, reference)
total_wer += wer_inst
total_cer += cer_inst
num_tokens += len(reference.split())
num_chars += len(reference.replace(' ', ''))
wer_inst = float(wer_inst) / len(reference.split())
cer_inst = float(cer_inst) / len(reference.replace(' ', ''))
wer_inst = wer_inst * 100
cer_inst = cer_inst * 100
wer_inst = min(wer_inst, 100)
cer_inst = min(cer_inst, 100)
hcers[min(int(cer_inst//10), 9)]+=1
last_str = f"Ref:{reference.lower()}" \
f"\nHyp:{transcript.lower()}" \
f"\nWER:{wer_inst} " \
f"- CER:{cer_inst}"
if cer_inst < min_cer:
min_cer = cer_inst
min_str = last_str
if cer_inst > max_cer:
max_cer = cer_inst
max_str = last_str
print(last_str) if verbose else None
wer = float(total_wer) / num_tokens
cer = float(total_cer) / num_chars
cers = [(f'{k*10}-{(k*10) + 10}', v-1) for k, v in hcers.items()]
graph = Pyasciigraph()
asciihistogram = "\n|".join(graph.graph('CER histogram', cers))
if main_proc and output_file is not None:
with open(output_file, "w") as f:
f.write("\n".join([
f"================= {wer*100:.2f}/{cer*100:.2f} =================",
"----- BEST -----",
min_str,
"----- LAST -----",
last_str,
"----- WORST -----",
max_str,
asciihistogram,
"=============================================\n"
]))
return wer * 100, cer * 100, output_data
def get_seq_lens(self, input_length):
"""
Given a 1D Tensor or Variable containing integer sequence lengths, return a 1D tensor or variable
containing the size sequences that will be output by the network.
:param input_length: 1D Tensor
:return: 1D Tensor scaled by model
"""
seq_len = input_length
for m in self.conv.modules():
if type(m) == nn.modules.conv.Conv2d:
seq_len = ((seq_len + 2 * m.padding[1] - m.dilation[1] * (m.kernel_size[1] - 1) - 1) / m.stride[1] + 1)
return seq_len.int()
| 42.172691
| 119
| 0.517475
| 1,156
| 10,501
| 4.500865
| 0.224048
| 0.018835
| 0.022487
| 0.022871
| 0.12858
| 0.081299
| 0.065539
| 0.060734
| 0.060734
| 0.060734
| 0
| 0.023349
| 0.380059
| 10,501
| 248
| 120
| 42.342742
| 0.775883
| 0.050186
| 0
| 0.037209
| 0
| 0
| 0.034378
| 0.0124
| 0
| 0
| 0
| 0
| 0.009302
| 1
| 0.027907
| false
| 0.004651
| 0.055814
| 0
| 0.106977
| 0.009302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fddb0cd219604b7758190e6bb17c4dc60b79a754
| 1,099
|
py
|
Python
|
Scripts/simulation/ensemble/ensemble_interactions.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/ensemble/ensemble_interactions.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/ensemble/ensemble_interactions.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\ensemble\ensemble_interactions.py
# Compiled at: 2016-07-13 03:28:12
# Size of source mod 2**32: 1070 bytes
from objects.base_interactions import ProxyInteraction
from sims4.utils import classproperty, flexmethod
class EnsembleConstraintProxyInteraction(ProxyInteraction):
INSTANCE_SUBCLASSES_ONLY = True
@classproperty
def proxy_name(cls):
return '[Ensemble]'
@classmethod
def generate(cls, proxied_affordance, ensemble):
result = super().generate(proxied_affordance)
result.ensemble = ensemble
return result
@flexmethod
def _constraint_gen(cls, inst, *args, **kwargs):
inst_or_cls = inst if inst is not None else cls
for constraint in (super(__class__, inst_or_cls)._constraint_gen)(*args, **kwargs):
yield constraint
yield inst_or_cls.ensemble.get_center_of_mass_constraint()
| 37.896552
| 107
| 0.724295
| 147
| 1,099
| 5.244898
| 0.632653
| 0.007782
| 0.035019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073415
| 0.181984
| 1,099
| 29
| 108
| 37.896552
| 0.784205
| 0.286624
| 0
| 0
| 0
| 0
| 0.012853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0.055556
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fddc8823ffb3b416234d50c4b32a14d5668ecba6
| 1,663
|
py
|
Python
|
icmpv6socket/__init__.py
|
TheDiveO/icmpv6-socket
|
fe3ee52e6793e3739975aea87e2b6511be96fa12
|
[
"Apache-2.0"
] | null | null | null |
icmpv6socket/__init__.py
|
TheDiveO/icmpv6-socket
|
fe3ee52e6793e3739975aea87e2b6511be96fa12
|
[
"Apache-2.0"
] | null | null | null |
icmpv6socket/__init__.py
|
TheDiveO/icmpv6-socket
|
fe3ee52e6793e3739975aea87e2b6511be96fa12
|
[
"Apache-2.0"
] | null | null | null |
import socket
from typing import Optional, List
__version__ = '0.1.0'
class ICMPv6Socket(socket.socket):
# From https://elixir.bootlin.com/linux/v4.18/source/include/linux/socket.h#L312
SOL_ICMPV6 = 58 # type: int
# From https://elixir.bootlin.com/linux/v4.18/source/include/uapi/linux/icmpv6.h#L139
ICMPV6_FILTER = 1 # type: int
# From ... yeah, not in the Linux kernel header files?!
ICMPV6_ROUTER_SOL = 133 # type: int
ICMPV6_ROUTER_ADV = 134 # type: int
def __init__(self, message_types: Optional[List[int]] = None) \
-> None:
"""Initializes an ICMPv6 socket. There isn't much argument here,
it seems, unless you want to receive only certain ICMPv6 message
types.
:arg message_types: optional list of ICMPv6 message types (int).
Defaults to None.
"""
super(ICMPv6Socket, self).__init__(socket.AF_INET6,
socket.SOCK_RAW,
socket.IPPROTO_ICMPV6)
self._filter_mask = bytearray(b'\x00' * 32) # type: bytes
self.accept_type(message_types)
def accept_type(self, message_types: List[int]):
# Please note that with the ICMPv6 filtering socket option
# a "1" actually means to filter out(!), while "0" means to
# let it pass to the socket. Crooked logic.
self._filter_mask = bytearray(b'\xff' * 32)
for msg_type in message_types:
self._filter_mask[msg_type >> 3] &= ~(1 << (msg_type & 7))
self.setsockopt(self.SOL_ICMPV6, self.ICMPV6_FILTER,
self._filter_mask)
| 36.955556
| 89
| 0.615755
| 217
| 1,663
| 4.529954
| 0.465438
| 0.085453
| 0.056968
| 0.044761
| 0.144456
| 0.095626
| 0.095626
| 0.095626
| 0.095626
| 0.095626
| 0
| 0.042017
| 0.284426
| 1,663
| 44
| 90
| 37.795455
| 0.784034
| 0.387252
| 0
| 0
| 0
| 0
| 0.01357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fddd39b69360c06e6b24844fb2887dcd6cf29f89
| 603
|
py
|
Python
|
game/pkchess/res/map.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 5
|
2020-08-26T20:12:00.000Z
|
2020-12-11T16:39:22.000Z
|
game/pkchess/res/map.py
|
RaenonX/Jelly-Bot
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 234
|
2019-12-14T03:45:19.000Z
|
2020-08-26T18:55:19.000Z
|
game/pkchess/res/map.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 2
|
2019-10-23T15:21:15.000Z
|
2020-05-22T09:35:55.000Z
|
"""Game map resource manager."""
__all__ = ("get_map_template",)
_cache = {}
def get_map_template(name: str):
"""
Get a map template by its ``name``.
Returns ``None`` if not found.
Loaded :class:`MapTemplate` will be cached until the application exits.
:param name: name of the map template.
:return: map template object if found
"""
if name not in _cache:
# On-demand import & avoid circular import
from game.pkchess.map import MapTemplate
_cache[name] = MapTemplate.load_from_file(f"game/pkchess/res/map/{name}")
return _cache[name]
| 24.12
| 81
| 0.658375
| 82
| 603
| 4.670732
| 0.54878
| 0.143603
| 0.073107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228856
| 603
| 24
| 82
| 25.125
| 0.823656
| 0.475954
| 0
| 0
| 0
| 0
| 0.154122
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fddd8f9fba011d6c4a116c9eb70ffe36d73e109e
| 7,127
|
py
|
Python
|
public_data/views.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 57
|
2016-09-01T21:55:52.000Z
|
2022-03-27T22:15:32.000Z
|
public_data/views.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 464
|
2015-03-23T18:08:28.000Z
|
2016-08-25T04:57:36.000Z
|
public_data/views.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 25
|
2017-01-24T16:23:27.000Z
|
2021-11-07T01:51:42.000Z
|
from django.conf import settings
from django.contrib import messages as django_messages
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.views.generic.base import RedirectView, TemplateView
from django.views.generic.edit import CreateView, FormView
from raven.contrib.django.raven_compat.models import client as raven_client
from common.mixins import PrivateMixin
from common.utils import get_source_labels
from private_sharing.models import (
ActivityFeed,
DataRequestProject,
DataRequestProjectMember,
id_label_to_project,
)
from .forms import ConsentForm
from .models import PublicDataAccess, WithdrawalFeedback
class QuizView(PrivateMixin, TemplateView):
"""
Modification of TemplateView that accepts and requires POST.
This prevents users from jumping to the quiz link without going through
the informed consent pages.
"""
template_name = "public_data/quiz.html"
@method_decorator(require_POST)
def dispatch(self, *args, **kwargs):
return super(QuizView, self).dispatch(*args, **kwargs)
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class ConsentView(PrivateMixin, FormView):
"""
Modification of FormView that walks through the informed consent content.
Stepping through the form is triggered by POST requests containing new
values in the 'section' field. If this field is present, the view overrides
form data processing.
"""
template_name = "public_data/consent.html"
form_class = ConsentForm
success_url = reverse_lazy("home")
def get(self, request, *args, **kwargs):
"""Customized to allow additional context."""
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form, **kwargs))
def form_invalid(self, form):
"""
Customized to add final section marker when reloading.
"""
return self.render_to_response(self.get_context_data(form=form, section=6))
def post(self, request, *args, **kwargs):
"""
Customized to convert a POST with 'section' into GET request.
"""
if "section" in request.POST:
kwargs["section"] = int(request.POST["section"])
self.request.method = "GET"
return self.get(request, *args, **kwargs)
else:
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
participant = self.request.user.member.public_data_participant
participant.enrolled = True
participant.save()
django_messages.success(
self.request,
("Thank you! The public data sharing " "feature is now activated."),
)
return super(ConsentView, self).form_valid(form)
class ToggleSharingView(PrivateMixin, RedirectView):
"""
Toggle the specified data_file to the specified value of public.
"""
permanent = False
url = reverse_lazy("my-member-data")
def get_redirect_url(self):
if "next" in self.request.POST:
return self.request.POST["next"]
else:
return super(ToggleSharingView, self).get_redirect_url()
def toggle_data(self, user, source, public):
if source not in get_source_labels() and not source.startswith(
"direct-sharing-"
):
error_msg = (
"Public sharing toggle attempted for "
'unexpected source "{}"'.format(source)
)
django_messages.error(self.request, error_msg)
if not settings.TESTING:
raven_client.captureMessage(error_msg)
return
project = id_label_to_project(source)
project_membership = DataRequestProjectMember.objects.get(
member=user.member, project=project
)
participant = user.member.public_data_participant
access, _ = PublicDataAccess.objects.get_or_create(
participant=participant, project_membership=project_membership
)
access.is_public = False
if public == "True":
if not project.no_public_data:
access.is_public = True
access.save()
if (
project.approved
and not ActivityFeed.objects.filter(
member=user.member, project=project, action="publicly-shared"
).exists()
):
event = ActivityFeed(
member=user.member, project=project, action="publicly-shared"
)
event.save()
def post(self, request, *args, **kwargs):
"""
Toggle public sharing status of a dataset.
"""
if "source" in request.POST and "public" in request.POST:
public = request.POST["public"]
source = request.POST["source"]
if public not in ["True", "False"]:
raise ValueError("'public' must be 'True' or 'False'")
self.toggle_data(request.user, source, public)
else:
raise ValueError("'public' and 'source' must be specified")
return super(ToggleSharingView, self).post(request, *args, **kwargs)
class WithdrawView(PrivateMixin, CreateView):
"""
A form that withdraws the user from the study on POST.
"""
template_name = "public_data/withdraw.html"
model = WithdrawalFeedback
fields = ["feedback"]
success_url = reverse_lazy("public-data:home")
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
participant = self.request.user.member.public_data_participant
participant.enrolled = False
participant.save()
django_messages.success(
self.request,
(
"You have successfully deactivated public data sharing and marked "
"your files as private."
),
)
form.instance.member = self.request.user.member
return super(WithdrawView, self).form_valid(form)
class HomeView(TemplateView):
"""
Provide this page's URL as the next URL for login or signup.
"""
template_name = "public_data/home.html"
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
projects = DataRequestProject.objects.filter(
approved=True, active=True
).order_by("name")
context.update({"projects": projects, "next": reverse_lazy("public-data:home")})
return context
class ActivateOverviewView(PrivateMixin, TemplateView):
"""
Apply PrivateMixin
"""
template_name = "public_data/overview.html"
| 30.852814
| 88
| 0.638698
| 795
| 7,127
| 5.603774
| 0.25912
| 0.029181
| 0.020202
| 0.024691
| 0.205612
| 0.170819
| 0.148148
| 0.127048
| 0.104602
| 0.104602
| 0
| 0.000192
| 0.268697
| 7,127
| 230
| 89
| 30.986957
| 0.854566
| 0.127543
| 0
| 0.173913
| 0
| 0
| 0.097211
| 0.019375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07971
| false
| 0
| 0.094203
| 0.014493
| 0.405797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fde68e690db6e169850d5ee88a2ba6ad82e43a9a
| 853
|
py
|
Python
|
beyondtheadmin/invoices/templatetags/fullurl.py
|
gfavre/invoice-manager
|
2a1db22edd51b461c090282c6fc1f290f3265379
|
[
"MIT"
] | 1
|
2021-11-27T06:40:34.000Z
|
2021-11-27T06:40:34.000Z
|
beyondtheadmin/invoices/templatetags/fullurl.py
|
gfavre/invoice-manager
|
2a1db22edd51b461c090282c6fc1f290f3265379
|
[
"MIT"
] | 2
|
2021-05-13T04:50:50.000Z
|
2022-02-28T21:06:24.000Z
|
beyondtheadmin/invoices/templatetags/fullurl.py
|
gfavre/invoice-manager
|
2a1db22edd51b461c090282c6fc1f290f3265379
|
[
"MIT"
] | null | null | null |
import math
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
def buildfullurl(context, url):
"""Converts relative URL to absolute.
For example:
{% buildfullurl article.get_absolute_url %}
or:
{% buildfullurl "/custom-url/" %}
"""
return context.request.build_absolute_uri(url)
@register.filter(is_safe=True, )
def add_domain(value):
try:
number = float(value)
frac, integer = math.modf(number)
if frac:
return mark_safe('CHF <span class="value">{:1,.2f}</span>'.format(number).replace(',', "'"))
else:
return mark_safe('CHF <span class="value">{:1,.0f}.-</span>'.format(number).replace(',', "'"))
except (ValueError, TypeError):
return value
| 26.65625
| 106
| 0.637749
| 101
| 853
| 5.277228
| 0.564356
| 0.045028
| 0.052533
| 0.06379
| 0.120075
| 0.120075
| 0.120075
| 0.120075
| 0
| 0
| 0
| 0.005952
| 0.212192
| 853
| 31
| 107
| 27.516129
| 0.787202
| 0.16061
| 0
| 0
| 0
| 0
| 0.121916
| 0.087083
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fde73e6cb304ab68d07ceb772b316e170a4014cb
| 967
|
py
|
Python
|
big_o_project/Task4.py
|
CTylerD/Data-Structures-Algorithms-Projects
|
e72248a6433c0242003edf404a715d9f53e3792d
|
[
"MIT"
] | null | null | null |
big_o_project/Task4.py
|
CTylerD/Data-Structures-Algorithms-Projects
|
e72248a6433c0242003edf404a715d9f53e3792d
|
[
"MIT"
] | null | null | null |
big_o_project/Task4.py
|
CTylerD/Data-Structures-Algorithms-Projects
|
e72248a6433c0242003edf404a715d9f53e3792d
|
[
"MIT"
] | null | null | null |
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
def create_lists_of_users():
outgoing_callers = set()
not_telemarketers = set()
for idx, call in enumerate(calls):
outgoing_callers.add(calls[idx][0])
not_telemarketers.add(calls[idx][1])
for idx, text in enumerate(texts):
not_telemarketers.add(texts[idx][0])
not_telemarketers.add(texts[idx][1])
find_telemarketers(outgoing_callers, not_telemarketers)
def find_telemarketers(out_calls, not_telemarketers):
potential_telemarketers = out_calls.difference(not_telemarketers)
print_results(potential_telemarketers)
def print_results(potential_telemarketers):
print("These numbers could be telemarketers:")
for number in sorted(potential_telemarketers):
print(number, sep="\n")
create_lists_of_users()
| 26.861111
| 69
| 0.709411
| 126
| 967
| 5.238095
| 0.349206
| 0.169697
| 0.086364
| 0.021212
| 0.187879
| 0.069697
| 0.069697
| 0.069697
| 0
| 0
| 0
| 0.005025
| 0.176836
| 967
| 35
| 70
| 27.628571
| 0.824121
| 0
| 0
| 0.08
| 0
| 0
| 0.061013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.04
| 0
| 0.16
| 0.16
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fde75eeec45ccf538859617be8047b6998c73dee
| 971
|
py
|
Python
|
codingstars/platinum3/subarray-sum2.py
|
yehyunchoi/Algorithm
|
35e32159ee13b46b30b543fa79ab6e81d6719f13
|
[
"MIT"
] | null | null | null |
codingstars/platinum3/subarray-sum2.py
|
yehyunchoi/Algorithm
|
35e32159ee13b46b30b543fa79ab6e81d6719f13
|
[
"MIT"
] | null | null | null |
codingstars/platinum3/subarray-sum2.py
|
yehyunchoi/Algorithm
|
35e32159ee13b46b30b543fa79ab6e81d6719f13
|
[
"MIT"
] | null | null | null |
"""
[1,2,3,4]의 부분 배열은 다음과 같이 16개가 있습니다.
[]
[1]
[1, 2]
[1, 2, 3]
[1, 2, 3, 4]
[1, 2, 4]
[1, 3]
[1, 3, 4]
[1, 4]
[2]
[2, 3]
[2, 3, 4]
[2, 4]
[3]
[3, 4]
[4]
처음에 있는 blank list([])의 값을 0으로 계산하면
모든 부분 배열의 합(subarray sum)은 80입니다.
숫자로 된 배열을 입력 받아서,
부분 배열의 합을 구하여 보세요.
Input
숫자로 된 배열이 입력됩니다.
숫자는 정수입니다.
Output
부분 배열의 합을 출력합니다.
Sample Input 1
3 26 -14 12 4 -2
Sample Output 1
928
"""
def subarraySum(i=0, s=[]):
global total
total += sum(s)
if (i > n):
return
# duplicate check is here now (i, n); instead of checking duplicate, just skip the index
for k in range(i, n):
subarraySum(k+1, s+[arr[k]])
######################
# sorting step makes it very hard when it comes to computation time.
# infact, it wasn't the sorting -- it was the range(n) --> range(i, n) that made a difference
# different approach:
# just sum it as you iterate
arr = list(map(int, input().split()))
n = len(arr)
total = 0
subarraySum()
print(total)
| 15.412698
| 93
| 0.582904
| 187
| 971
| 3.026738
| 0.540107
| 0.017668
| 0.015901
| 0.014134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078912
| 0.243048
| 971
| 63
| 94
| 15.412698
| 0.691156
| 0.687951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fde7b9b224da23bd030fbd50ece0e5433d5c208e
| 1,683
|
py
|
Python
|
fastalite/__main__.py
|
nhoffman/fastalite
|
2571c126976f26c8ca06401586559f288245ca8d
|
[
"MIT"
] | 2
|
2017-02-16T14:30:18.000Z
|
2019-10-03T19:20:57.000Z
|
fastalite/__main__.py
|
nhoffman/fastalite
|
2571c126976f26c8ca06401586559f288245ca8d
|
[
"MIT"
] | 4
|
2017-06-30T14:05:08.000Z
|
2022-02-17T00:03:28.000Z
|
fastalite/__main__.py
|
nhoffman/fastalite
|
2571c126976f26c8ca06401586559f288245ca8d
|
[
"MIT"
] | null | null | null |
"""Command line interface to the fastlite package
"""
import sys
import argparse
from .fastalite import fastalite, fastqlite, Opener
from . import __version__
def count(seqs, fname):
i = 1
for i, seq in enumerate(seqs, 1):
pass
print(('{}\t{}'.format(fname, i)))
def names(seqs, fname):
for seq in seqs:
print(('{}\t{}'.format(fname, seq.id)))
def lengths(seqs, fname):
for seq in seqs:
print(('{}\t{}\t{}'.format(fname, seq.id, len(seq.seq))))
def main(arguments):
actions = {'count': count, 'names': names, 'lengths': lengths}
parser = argparse.ArgumentParser(
prog='fastalite', description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('action', choices=list(actions.keys()),
help="Action to perform")
parser.add_argument('infiles', help="Input file", nargs='+',
metavar='infile.{fasta,fastq}[{.gz,.bz2}]',
type=Opener('r'))
parser.add_argument('-V', '--version', action='version',
version=__version__,
help='Print the version number and exit')
args = parser.parse_args(arguments)
for infile in args.infiles:
with infile as f:
readfun = fastalite if 'fasta' in f.name else fastqlite
seqs = readfun(f)
try:
fun = actions[args.action]
fun(seqs, infile.name)
except ValueError as err:
sys.stderr.write('Error: {}\n'.format(str(err)))
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 28.525424
| 67
| 0.572193
| 195
| 1,683
| 4.810256
| 0.45641
| 0.028785
| 0.03838
| 0.036247
| 0.092751
| 0.057569
| 0.057569
| 0.057569
| 0
| 0
| 0
| 0.004163
| 0.286393
| 1,683
| 58
| 68
| 29.017241
| 0.776853
| 0.027332
| 0
| 0.04878
| 0
| 0
| 0.120933
| 0.019644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.02439
| 0.097561
| 0
| 0.219512
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdeb572c08b15704dfb038c3b7db65f44b06027a
| 9,482
|
py
|
Python
|
RpgPiratesAndFishers/Individual.py
|
LucasRR94/RPG_Pirates_and_Fishers
|
75bbb57e916f7a878de34676b1d988e5d2506121
|
[
"Apache-2.0"
] | null | null | null |
RpgPiratesAndFishers/Individual.py
|
LucasRR94/RPG_Pirates_and_Fishers
|
75bbb57e916f7a878de34676b1d988e5d2506121
|
[
"Apache-2.0"
] | null | null | null |
RpgPiratesAndFishers/Individual.py
|
LucasRR94/RPG_Pirates_and_Fishers
|
75bbb57e916f7a878de34676b1d988e5d2506121
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from libGamePiratesAndFishers import assertIfIsWeelFormat,makeSureThatIsnumberLimited
class Individual(object):
"""
This class Define object type Individual, it is a base for object used for being the base
of fishers, and is used for enemies in application.
"""
def __init__(self,name,health,attack,defense):
"""
Initializing the class Individual, it's the constructor, assigned
the name and three attributes used for create fishers and enemies.
@param name : (string) constains the name of Individual
@param health : (int) attribute for the class represent the health of the individual, 0..100
@param attack : (int) attribute for the class represent the attack of the individual, 0...100
@param defense : (int) attribute for the class represent the defense of the individual,0...100
@return : None
"""
self.name = assertIfIsWeelFormat(name)
self.health = makeSureThatIsnumberLimited(health,100)
if(self.health == 0):
self.health = 1
self.attack = makeSureThatIsnumberLimited(attack,100)
self.defense = makeSureThatIsnumberLimited(defense,100)
def __del__(self):
"""
it's destructor from the class, cleaning all the attributes
@param none :
@return : None
"""
self.name = ''
self.health = None
self.attack = None
self.defense = None
def getName(self):
"""
it's return an attribute of the class call Name, that represent the name of the individual in game
@param none:
@return : (string) return a attribute name of the object
"""
return self.name
def getHealth(self):
"""
it's return an attribute of the class that represent the health of the individual in the game
@param none:
@return : (int) return a attribute that is the health, between 0 ... 100
"""
return self.health
def getValueDefense(self):
"""
it's return an attribute of the class that represent the defense of the individual in the game
@param none:
@return : (int) return a attribute that is the defense, between 0 ... 100
"""
return self.defense
def getValueAttack(self):
"""
it's return an attribute of the class that represent the attack of the individual in the game
@param none:
@return : (int) return a attribute that is the attack, between 0 ... 100
"""
return self.attack
def __setAttack(self,attack):
"""
it's seting an attribute of the class, the attribute named attack
@param attack:(int) a integer between 0..100, that will be sum in the attribute
@return None :
"""
self.attack = attack
def __setDefense(self,defense):
"""
it's seting an attribute of the class, the attribute named defense
@param defense:(int) a integer between 0..100, that will be sum in the attribute
@return None :
"""
self.defense = defense
def __setHealth(self,health):
"""
it's seting an attribute of the class, the attribute named health
@param health:(int) a integer between 0..100, that will replace the attribute
@return None :
"""
if(health == 0):
self.__del__()
else:
self.health = health
def __changeAttackOrDefense(self,newvalue,option):
"""
it's change the attribute attack or defense by other number, when the player want exchange the weapon
or the defense
when option is : 1 --> attack
: 2 --> defense
@param newvalue:(int) a integer between 0..100, that will replace the attribute
@return oldavalue :(int or tuple) integer that represent the old capacity of attack or defense,
or None plus a message of error
"""
answer = None
if(type(newvalue) is int):
newnumb = newvalue
if(newnumb > 100):
newnumb = 100
if(newnumb < 0):
newnumb = 0
if((type(option) is int)):
if(option >=1 and option <= 2):
if(option == 1):
answer = self.getValueAttack()
self.__setAttack(newnumb) # change the values
return answer
else:
answer = self.getValueDefense()
self.__setDefense(newnumb) # change the values
return answer
else:
resptuple = (None,"error, this is not an supported option")
answer = resptuple
else:
resptuple = (None,"error, this is not an supported option")
answer = resptuple
if(type(newvalue) is str): # try to make conversion between str -> int
try:
newnumb = int(newvalue)
except ValueError:
resptuple = (None,"error, this is not an supported type")
answer = resptuple
else:
if(newnumb > 100):
newnumb = 100
if(newnumb < 0):
newnumb = 0
if((type(option) is int)):
if(option >=1 and option <= 2):
if(option == 1):
answer = self.getValueAttack()
self.__setAttack(newnumb) # change the values
return answer
else:
answer = self.getValueDefense()
self.__setDefense(newnumb) # change the values
return answer
else:
resptuple = (None,"error, this is not an supported option")
answer = resptuple
else:
resptuple = (None,"error, this is not an supported option")
answer = resptuple
finally:
return answer
else:
resptuple = (None,"error, this is not an supported type")
return resptuple
def usingMedkit(self, valuehealth):
"""
it's adding one medkit in health.If the health is on max, it's discarded, and maintaining health in 100
@param valuehealth:(int) a integer between 0..100, that will added in health attribute
@return (int): return 1 , when sucessfull use, 0 when it's not possible the use of medkit
"""
if(type(valuehealth) is int):
newnumb = valuehealth
if(newnumb > 100):
newnumb = 100
if(newnumb < 0):
newnumb = 0
backuphealth = self.getHealth()
updatevalue = newnumb + backuphealth
if(updatevalue >= 100):
self.__setHealth(100)
return 1
elif(newnumb == 0):
return 1
else:
self.__setHealth(updatevalue)
return 1
if(type(valuehealth) is str):
try:
newnumb = int(valuehealth)
except ValueError:
answer = 0
else:
if(newnumb > 100):
newnumb = 100
if(newnumb < 0):
newnumb = 0
backuphealth = self.getHealth()
updatevalue = newnumb + backuphealth
if(updatevalue>=100):
self.__setHealth(100)
answer = 1
elif(newnumb == 0):
answer= 1
else:
self.__setHealth(updatevalue)
answer = 1
finally:
return answer
else:
return 0
def changeAttack(self,newAttack):
"""
it's change the attribute attack,calling the methodchangeAttackOrDefense with the appropriated
parameters
@param newattack:(int) a integer between 0..100, that will replace the attribute
@return oldavalue :(int or tuple) integer that represent the old capacity of attack,
or None plus a message of error
"""
return self.__changeAttackOrDefense(newAttack,1)
def changeDefense(self,newDefense):
"""
it's change the attribute defense,calling the methodchangeAttackOrDefense with the appropriated
parameters
@param newDefense:(int) a integer between 0..100, that will replace the attribute
@return oldavalue :(int or tuple) integer that represent the old capacity of defense,
or None plus a message of error
"""
return self.__changeAttackOrDefense(newDefense,2)
def getDamage(self, valuehit):
"""
it's repass the hit's from enemy, that means check values of health and defense,
updating the values of this two attributes
@param valuehit:(int) a integer between 0..100, that represent the value of an attack that need to be passed to defense and health
@return int: 1 is value hit was correcty repass , 0 if is not.
"""
if(type(valuehit) is str or type(valuehit) is int):
newnumb = valuehit
if(type(valuehit) is str):
try:
newnumb = int(valuehit)
except ValueError:
answer = 0
return answer
if(newnumb < 0): # no hit
newnumb = 0
return 1
defenseBackup = self.getValueDefense()
healthbackup = self.getHealth()
if(defenseBackup == healthbackup == None): #death
self.__del__()
return 1
if(defenseBackup == None):
defensevalue = 0
if(defenseBackup != None):
defensevalue = newnumb - defenseBackup
if(healthbackup == None):
healthbackup = 0
totaldefenseandhealth = defensevalue + healthbackup
if((self.getHealth() + self.getValueDefense()) <= newnumb): #death
self.__del__()
return 1
if(self.getValueDefense() > newnumb):
self.__setDefense(defenseBackup - newnumb)
return 1
elif(self.getValueDefense() <= newnumb):
self.__setDefense(0)
if(newnumb >= (defenseBackup + self.getHealth())):
print("Here")
self.__del__()
return 1
else: # keep live without defense
#newdefinitionattributehealth = healthbackup - defensevalue
self.__setHealth(healthbackup+defenseBackup- newnumb)
return 1
else:
return 0
def getDetail(self):
"""
it's getting the attributes of the class, providing a report of the object
@param None:
@return (string) : getting an report of the attributes of the object
"""
resposta = "\n#########################################################\n"+"Name of individual :" + self.getName() + "\n Health of individual:" + str(self.getHealth())+"\nAttack of individual:"+str(self.getValueAttack())+"\nDefense of individual:"+str(self.getValueDefense())+"\n#########################################################\n"
return resposta
| 25.489247
| 341
| 0.665999
| 1,217
| 9,482
| 5.141331
| 0.143796
| 0.015183
| 0.019338
| 0.023014
| 0.491769
| 0.444622
| 0.413137
| 0.392201
| 0.358479
| 0.345693
| 0
| 0.021058
| 0.228749
| 9,482
| 372
| 342
| 25.489247
| 0.834541
| 0.420165
| 0
| 0.596591
| 0
| 0
| 0.084289
| 0.023318
| 0.005682
| 0
| 0
| 0
| 0.011364
| 1
| 0.085227
| false
| 0
| 0.005682
| 0
| 0.244318
| 0.005682
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdedc1899cf1d0a458c3cd2aa54431d61028feb7
| 1,544
|
py
|
Python
|
08_apples_and_bananas/apples.py
|
FabrizioPe/tiny_python_projects
|
e130d55d36cac43496a8ad482b6159234b5122f3
|
[
"MIT"
] | null | null | null |
08_apples_and_bananas/apples.py
|
FabrizioPe/tiny_python_projects
|
e130d55d36cac43496a8ad482b6159234b5122f3
|
[
"MIT"
] | null | null | null |
08_apples_and_bananas/apples.py
|
FabrizioPe/tiny_python_projects
|
e130d55d36cac43496a8ad482b6159234b5122f3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : FabrizioPe
Date : 2021-02-10
Purpose: Find and replace vowels in a given text
"""
import argparse
import os
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Apples and bananas',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('str',
metavar='str',
help='Input text or file')
parser.add_argument('-v',
'--vowel',
help='The vowel to substitute',
metavar='str',
choices='aeiou',
type=str,
default='a')
args = parser.parse_args()
# but will this file remain open?
if os.path.isfile(args.str):
args.str = open(args.str).read().rstrip()
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
text = args.str
vowel = args.vowel
table = {'a': vowel, 'e': vowel, 'i': vowel, 'o': vowel, 'u': vowel,
'A': vowel.upper(), 'E': vowel.upper(), 'I': vowel.upper(),
'O': vowel.upper(), 'U': vowel.upper()}
# apply the transformation defined in the table, to the input text
print(text.translate(str.maketrans(table)))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 26.169492
| 72
| 0.483808
| 158
| 1,544
| 4.639241
| 0.525316
| 0.068213
| 0.046385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008249
| 0.293394
| 1,544
| 58
| 73
| 26.62069
| 0.663611
| 0.265544
| 0
| 0.066667
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.166667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdedd48ecc2cd5713a5620f6663699573feeb1b9
| 1,434
|
py
|
Python
|
utils.py
|
gabrs-sousa/brasil-rugby-ranking
|
78b8f6b466e688f507cb9d97dbbb80442f3c67de
|
[
"MIT"
] | 1
|
2020-05-30T03:34:31.000Z
|
2020-05-30T03:34:31.000Z
|
utils.py
|
gabrs-sousa/brasil-rugby-ranking
|
78b8f6b466e688f507cb9d97dbbb80442f3c67de
|
[
"MIT"
] | null | null | null |
utils.py
|
gabrs-sousa/brasil-rugby-ranking
|
78b8f6b466e688f507cb9d97dbbb80442f3c67de
|
[
"MIT"
] | null | null | null |
import pandas as pd
from openpyxl import worksheet
def map_team_names(names_sheet: worksheet, games_sheet: worksheet):
mapped_names = []
missing_names = set()
names_last_row = names_sheet.max_row
for row in range(1, names_last_row):
team_name = names_sheet.cell(row, 1).value
if team_name:
mapped_names.append(team_name.upper())
games_last_row = games_sheet.max_row
for row in range(2, games_last_row):
visitor = games_sheet.cell(row, 7).value
home = games_sheet.cell(row, 12).value
if home and home.upper() not in mapped_names:
missing_names.add(home)
if visitor and visitor.upper() not in mapped_names:
missing_names.add(visitor)
if missing_names:
return missing_names
else:
return False
def format_name(name: str) -> str:
"""
Limpa espaços antes e depois da palavra
Nome em caps lock para evitar case sensitive
"""
name = name.strip()
name = name.upper()
return name
def export_output_file(teams: dict, output_file_name: str):
ranking_df = pd.DataFrame(teams)
ranking_df = ranking_df.transpose()
ranking_df = ranking_df.sort_values('points', ascending=False)
ranking_df = ranking_df[ranking_df['total_games'] > 0].dropna()
ranking_df.to_excel(output_file_name)
print(f'Workbook "{output_file_name}" has been created successfully!')
| 28.117647
| 74
| 0.679916
| 204
| 1,434
| 4.52451
| 0.406863
| 0.087757
| 0.069339
| 0.078007
| 0.130011
| 0.130011
| 0.130011
| 0.078007
| 0
| 0
| 0
| 0.006335
| 0.229428
| 1,434
| 50
| 75
| 28.68
| 0.828959
| 0.058577
| 0
| 0
| 0
| 0
| 0.057895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.060606
| 0
| 0.242424
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdee1b940008dee7fb0abcb8d4fb0eaf97c8d578
| 8,889
|
py
|
Python
|
mainapp/views.py
|
AHTOH2001/OOP_4_term
|
c9b0f64f3507486e0670cc95d7252862b673d845
|
[
"MIT"
] | null | null | null |
mainapp/views.py
|
AHTOH2001/OOP_4_term
|
c9b0f64f3507486e0670cc95d7252862b673d845
|
[
"MIT"
] | null | null | null |
mainapp/views.py
|
AHTOH2001/OOP_4_term
|
c9b0f64f3507486e0670cc95d7252862b673d845
|
[
"MIT"
] | null | null | null |
from django.core.mail import send_mail
from django.http import Http404
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User
from django.utils.datastructures import MultiValueDictKeyError
from django.utils import timezone
from django.contrib.auth import login, logout
from django import urls
import copy
# from django.contrib.auth.hashers import make_password, check_password, is_password_usable
# from datetime import datetime
from django.views.generic import DetailView
from CheekLit import settings
from .utils import get_code, is_administrator, should_show_price
from .models import Client, Book, Author, Genre, Basket, Status, SliderImages
from .forms import ClientRegisterForm, ClientAuthorizationForm
from .settings import time_for_registration
def home(request):
books = Book.objects.filter(status=True).order_by('-amount')
slider_images = SliderImages.objects.filter(status=True)
# return render(request, 'home.html', {'books': books, 'is_administrator': is_administrator(request.user)})
return render(request, 'home.html',
{'books': books, 'genres': Genre.objects.all(), 'authors': Author.objects.all(),
'slider_images': slider_images})
def book_detail(request, slug):
current_book = Book.objects.get(slug=slug)
if request.method == 'POST':
if 'add_to_basket' in request.GET:
if is_administrator(request.user):
raise Http404('Administration does not have a basket')
client = request.user.client_set.get()
current_basket, is_created = client.baskets.get_or_create(status=Status.IN_PROCESS)
if should_show_price(request.user):
current_basket.books.add(current_book)
messages.success(request, 'Книга успешно добавлена в корзину')
return render(request, 'book_detail.html', {'book': current_book})
# return super(BookDetailView, self).get(request, *args, **kwargs)
# class BookDetailView(DetailView):
# queryset = Book.objects.all()
# model = Book
# context_object_name = 'book'
# template_name = 'book_detail.html'
# slug_url_kwarg = 'slug'
#
# def post(self, request, *args, **kwargs):
# pass
# # context = super().get_context_data(object=self.object)
# # return super().render_to_response(context)
class AuthorDetailView(DetailView):
queryset = Author.objects.all()
model = Author
context_object_name = 'author'
template_name = 'author_detail.html'
slug_url_kwarg = 'slug'
class GenreDetailView(DetailView):
queryset = Genre.objects.all()
model = Genre
context_object_name = 'genre'
template_name = 'genre_detail.html'
slug_url_kwarg = 'slug'
def register(request):
if request.method == 'POST':
form = ClientRegisterForm(data=request.POST)
if form.is_valid():
client, raw_pass = form.save()
confirmation_url = request.META["HTTP_HOST"] + urls.reverse(
register_complete) + f'?login={client.user.email}&code={get_code(client.user.email, "abs", 20)}'
email_message = f'''Здравствуйте, уважаемый {client.user.last_name} {client.user.first_name}!
Вы в одном шаге от завершения регистрации в интернет библиотеке CheekLit.
Ваши данные для авторизации в системе:
Логин: {client.user.email}
Пароль: {raw_pass}
Внимание! Вы должны подтвердить регистрационные данные!
Для подтверждения достаточно перейти по следующей ссылке:
{confirmation_url}
Если Вы действительно желаете подтвердить регистрацию, пожалуйста, сделайте это до {(timezone.localtime() + time_for_registration).strftime('%H:%M %d.%m.%Y')}. В противном случае Ваши регистрационные данные будут удалены из системы.
С уважением, администрация интернет библиотеки CheekLit'''
send_mail(
f'Подтверждение регистрации на сайте {request.META["HTTP_HOST"]}',
email_message,
'CheekLitBot@gmail.com',
[client.user.email],
fail_silently=False,
)
messages.success(request, 'Пользователь успешно создан, проверьте почту и подтвердите регистрацию')
return redirect('home')
else:
messages.error(request, 'Некоторые данные введены неверно')
else:
form = ClientRegisterForm()
return render(request, 'register.html',
{'form': form, 'genres': Genre.objects.all(), 'authors': Author.objects.all()})
def register_complete(request):
try:
email = request.GET['login']
code = request.GET['code'].replace(' ', '+')
if get_code(email, 'abs', 20) == code:
# Delete outdated clients
User.objects.filter(date_joined__lt=timezone.localtime() - time_for_registration,
is_active=False, is_staff=False, is_superuser=False).delete()
try:
if User.objects.get(email=email).is_active is True:
messages.warning(request, 'Пользователь уже подтверждён')
else:
messages.success(request, 'Пользователь успешно подтверждён, осталось только авторизоваться')
User.objects.filter(email=email).update(is_active=True)
return redirect('authorize')
except User.DoesNotExist:
messages.error(request, 'По всей видимости ссылка регистрации просрочена')
else:
messages.error(request, f'Параметр code неверный')
except MultiValueDictKeyError as e:
messages.error(request, f'Пропущен параметр {e.args}')
return redirect('home')
def authorize(request):
if request.method == 'POST':
form = ClientAuthorizationForm(data=request.POST)
if form.is_valid():
client = form.get_user()
login(request, client)
messages.success(request, f'Добро пожаловать, {client.last_name} {client.first_name}')
return redirect('home')
else:
messages.error(request, 'Некоторые данные введены неверно')
else:
form = ClientAuthorizationForm()
return render(request, 'authorize.html',
{'form': form, 'genres': Genre.objects.all(), 'authors': Author.objects.all()})
def client_logout(request):
logout(request)
return redirect('home')
def useful_information(request):
return render(request, 'useful_information.html')
def about_us(request):
return render(request, 'about_us.html')
def contact(request):
return render(request, 'contact.html')
def basket(request):
if request.user.is_authenticated:
if is_administrator(request.user):
raise Http404('Administration does not have a basket')
client = request.user.client_set.get()
current_basket, is_created = client.baskets.get_or_create(status=Status.IN_PROCESS)
if not should_show_price(request.user):
current_basket.books.clear()
if request.method == 'POST':
if 'delete_book' in request.GET:
current_basket.books.remove(request.GET['delete_book'])
if 'clear' in request.GET:
saved_basket = copy.copy(current_basket)
saved_basket.status = Status.ABANDONED
Basket.objects.filter(client=client, status=Status.ABANDONED).delete()
saved_basket.save()
client.baskets.add(saved_basket)
current_basket.books.clear()
# client.baskets.create(status=Status.ABANDONED, )
if 'restore' in request.GET:
try:
client.baskets.get(status=Status.ABANDONED)
except Basket.DoesNotExist:
raise Http404('Not found abandoned basket')
client.baskets.filter(status=Status.IN_PROCESS).delete()
client.baskets.filter(status=Status.ABANDONED).update(status=Status.IN_PROCESS)
current_basket = client.baskets.get(status=Status.IN_PROCESS)
return render(request, 'basket.html', {'BookModel': Book, 'books_in_basket': current_basket.books.all()})
else:
raise Http404('User is not authenticated')
def order(request):
if request.user.is_authenticated and should_show_price(request.user):
if is_administrator(request.user):
raise Http404('Administration does not have a basket')
client = request.user.client_set.get()
current_basket, is_created = client.baskets.get_or_create(status=Status.IN_PROCESS)
current_basket.status = Status.ON_HANDS
current_basket.date_of_taking = timezone.now()
current_basket.save()
return render(request, 'order.html')
else:
raise Http404('User is not authenticated')
| 39.15859
| 232
| 0.664867
| 1,013
| 8,889
| 5.698914
| 0.253702
| 0.029274
| 0.032912
| 0.021826
| 0.300537
| 0.241642
| 0.206478
| 0.170449
| 0.135805
| 0.135805
| 0
| 0.00366
| 0.231522
| 8,889
| 226
| 233
| 39.331858
| 0.841458
| 0.081336
| 0
| 0.263804
| 0
| 0.01227
| 0.220845
| 0.02934
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067485
| false
| 0.01227
| 0.09816
| 0.018405
| 0.325153
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdef583937c7416dc3c1150c9fa7843d7266de92
| 1,941
|
py
|
Python
|
extra/old-fix-midroll.py
|
FrederikBanke/critrolesync.github.io
|
ca09fff7541014d81472d687e48ecd5587cc15ff
|
[
"MIT"
] | 5
|
2020-06-29T13:39:07.000Z
|
2022-02-07T00:43:55.000Z
|
extra/old-fix-midroll.py
|
FrederikBanke/critrolesync.github.io
|
ca09fff7541014d81472d687e48ecd5587cc15ff
|
[
"MIT"
] | 11
|
2020-06-28T09:45:38.000Z
|
2022-03-30T17:56:35.000Z
|
extra/old-fix-midroll.py
|
FrederikBanke/critrolesync.github.io
|
ca09fff7541014d81472d687e48ecd5587cc15ff
|
[
"MIT"
] | 5
|
2020-06-29T21:17:13.000Z
|
2021-09-08T06:34:32.000Z
|
def str2sec(string):
if len(string.split(':')) == 3:
hours, mins, secs = map(int, string.split(':'))
elif len(string.split(':')) == 2:
mins, secs = map(int, string.split(':'))
hours = 0
else:
raise ValueError('string must have the form [hh:]mm:ss : ' + str(string))
seconds = 3600*hours + 60*mins + secs
return seconds
def sec2str(seconds, format=None):
if seconds < 0:
raise ValueError('seconds must be nonnegative: ' + str(seconds))
mins, secs = divmod(seconds, 60)
hours, mins = divmod(mins, 60)
if format == 'youtube':
string = '%dh%02dm%02ds' % (hours, mins, secs)
else:
string = '%d:%02d:%02d' % (hours, mins, secs)
return string
def fixAnchorMidroll(newAdDuration=61, oldAdDuration=0):
'''For fixing issue #8: https://github.com/critrolesync/critrolesync.github.io/issues/8'''
anchor_podcast_episodes = data[1]['episodes'][19:]
for ep in anchor_podcast_episodes:
if 'timestampsBitrate' in ep:
# need to adjust for new bitrate
bitrateRatio = 128/127.7
else:
# do need to adjust for bitrate
bitrateRatio = 1
print(ep['id'])
print()
print(' "timestamps": [')
for i, (youtube, podcast, comment) in enumerate(ep['timestamps']):
if i<2: # before break
podcast_new = sec2str(str2sec(podcast)*bitrateRatio)
else: # after break
podcast_new = sec2str((str2sec(podcast)-oldAdDuration+newAdDuration)*bitrateRatio)
if i<len(ep['timestamps'])-1: # include final comma
print(f' ["{youtube}", "{podcast_new}", "{comment}"],')
else: # no final comma
print(f' ["{youtube}", "{podcast_new}", "{comment}"]')
print(' ]')
print()
print()
| 37.326923
| 98
| 0.548171
| 215
| 1,941
| 4.911628
| 0.413953
| 0.045455
| 0.036932
| 0.026515
| 0.191288
| 0.191288
| 0.075758
| 0.075758
| 0
| 0
| 0
| 0.034277
| 0.308604
| 1,941
| 51
| 99
| 38.058824
| 0.752608
| 0.106131
| 0
| 0.190476
| 0
| 0
| 0.189095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.119048
| 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdf00539d71fcfd43067729f990a71a6b54c1f86
| 415
|
py
|
Python
|
102-neopixel.d/main.py
|
wa1tnr/cpx-basic-studies
|
772d38803bc2394980d81cea7873a4bc027dfb9f
|
[
"MIT"
] | null | null | null |
102-neopixel.d/main.py
|
wa1tnr/cpx-basic-studies
|
772d38803bc2394980d81cea7873a4bc027dfb9f
|
[
"MIT"
] | null | null | null |
102-neopixel.d/main.py
|
wa1tnr/cpx-basic-studies
|
772d38803bc2394980d81cea7873a4bc027dfb9f
|
[
"MIT"
] | null | null | null |
# Adafruit CircuitPython 2.2.0
# Adafruit CircuitPlayground Express
import board ; import neopixel; import time
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=.2)
pixels.fill((0,0,0))
pixels.show()
def blue():
pixels.fill((0,0,22))
def magenta():
pixels.fill((22,0,22))
blue(); pixels.show(); time.sleep(0.7);
magenta(); pixels.show(); time.sleep(1.4);
pixels.fill((0,0,0)); pixels.show()
| 23.055556
| 61
| 0.684337
| 64
| 415
| 4.4375
| 0.359375
| 0.035211
| 0.116197
| 0.126761
| 0.161972
| 0.161972
| 0.161972
| 0
| 0
| 0
| 0
| 0.068681
| 0.122892
| 415
| 17
| 62
| 24.411765
| 0.711538
| 0.151807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdf306a6233eb15d853aa05d6d7553accacc2060
| 3,717
|
py
|
Python
|
harmoni_detectors/harmoni_stt/test/test_deepspeech.py
|
interaction-lab/HARMONI
|
9c88019601a983a1739744919a95247a997d3bb1
|
[
"MIT"
] | 7
|
2020-09-02T06:31:21.000Z
|
2022-02-18T21:16:44.000Z
|
harmoni_detectors/harmoni_stt/test/test_deepspeech.py
|
micolspitale93/HARMONI
|
cf6a13fb85e3efb4e421dbfd4555359c0a04acaa
|
[
"MIT"
] | 61
|
2020-05-15T16:46:32.000Z
|
2021-07-28T17:44:49.000Z
|
harmoni_detectors/harmoni_stt/test/test_deepspeech.py
|
micolspitale93/HARMONI
|
cf6a13fb85e3efb4e421dbfd4555359c0a04acaa
|
[
"MIT"
] | 3
|
2020-10-05T23:01:29.000Z
|
2022-03-02T11:53:34.000Z
|
#!/usr/bin/env python3
# Common Imports
import io
import rospy
import sys
import unittest
# Specific Imports
import time
import wave
from harmoni_common_lib.action_client import HarmoniActionClient
from harmoni_common_lib.constants import ActionType, DetectorNameSpace, SensorNameSpace, State
from audio_common_msgs.msg import AudioData
from std_msgs.msg import String
PKG = "test_harmoni_stt"
class TestDeepSpeech_Common(unittest.TestCase):
def setUp(self):
self.feedback = State.INIT
self.result = False
self.test_file = rospy.get_param("test_deepspeech_input")
rospy.init_node("test_deepspeech", log_level=rospy.INFO)
self.rate = rospy.Rate(20)
self.output_sub = rospy.Subscriber(
"/harmoni/detecting/stt/default", String, self._detecting_callback
)
# provide mock microphone
self.audio_pub = rospy.Publisher(
SensorNameSpace.microphone.value
+ rospy.get_param("stt/default_param/subscriber_id"),
AudioData,
queue_size=10,
)
rospy.Subscriber(
DetectorNameSpace.stt.value + "stt_default",
String,
self.text_received_callback,
)
# startup stt node
self.server = "stt_default"
self.client = HarmoniActionClient(self.server)
self.client.setup_client(
self.server, self._result_callback, self._feedback_callback, wait=True
)
rospy.loginfo("TestDeepSpeech: Turning ON stt server")
self.client.send_goal(
action_goal=ActionType.ON, optional_data="Setup", wait=False
)
rospy.loginfo("TestDeepSpeech: Started up. waiting for DeepSpeech startup")
time.sleep(5)
rospy.loginfo("TestDeepSpeech: publishing audio")
chunk_size = 1024
wf = wave.open(self.test_file)
# read data (based on the chunk size)
index = 0
audio_length = wf.getnframes()
while index+chunk_size < audio_length:
data = wf.readframes(chunk_size)
self.audio_pub.publish(data)
index = index+chunk_size
time.sleep(0.2)
rospy.loginfo(
f"TestDeepSpeech: audio subscribed to by #{self.output_sub.get_num_connections()} connections."
)
def _feedback_callback(self, data):
rospy.loginfo(f"TestDeepSpeech: Feedback: {data}")
self.feedback = data["state"]
def _status_callback(self, data):
rospy.loginfo(f"TestDeepSpeech: Status: {data}")
self.result = True
def _result_callback(self, data):
rospy.loginfo(f"TestDeepSpeech: Result: {data}")
self.result = True
def text_received_callback(self, data):
rospy.loginfo(f"TestDeepSpeech: Text back: {data}")
self.result = True
def _detecting_callback(self, data):
rospy.loginfo(f"TestDeepSpeech: Detecting: {data}")
self.result = True
class TestDeepSpeech_Valid(TestDeepSpeech_Common):
def test_IO(self):
rospy.loginfo(
"TestDeepSpeech[TEST]: basic IO test to ensure data "
+ "('hello' audio) is received and responded to. Waiting for transcription..."
)
while not rospy.is_shutdown() and not self.result:
self.rate.sleep()
assert self.result
def main():
# TODO combine validity tests into test suite so that setup doesn't have to run over and over.
import rostest
rospy.loginfo("test_deepspeech started")
rospy.loginfo("TestDeepSpeech: sys.argv: %s" % str(sys.argv))
rostest.rosrun(PKG, "test_deepspeech", TestDeepSpeech_Valid, sys.argv)
if __name__ == "__main__":
main()
| 31.235294
| 107
| 0.65456
| 431
| 3,717
| 5.473318
| 0.338747
| 0.061043
| 0.033065
| 0.068673
| 0.117847
| 0.09114
| 0.09114
| 0
| 0
| 0
| 0
| 0.004673
| 0.251547
| 3,717
| 118
| 108
| 31.5
| 0.843278
| 0.059995
| 0
| 0.068966
| 0
| 0
| 0.206768
| 0.041009
| 0
| 0
| 0
| 0.008475
| 0.011494
| 1
| 0.091954
| false
| 0
| 0.126437
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdf47c8f7eacc32cfd98b13ee0730f15d82165c5
| 2,196
|
py
|
Python
|
smoked/management/commands/smoked.py
|
martinsvoboda/django-smoked
|
42b64fff23a37e3df42f8fc54535ea496dd27d84
|
[
"MIT"
] | 6
|
2015-01-14T12:02:58.000Z
|
2021-08-17T23:18:56.000Z
|
smoked/management/commands/smoked.py
|
martinsvoboda/django-smoked
|
42b64fff23a37e3df42f8fc54535ea496dd27d84
|
[
"MIT"
] | 7
|
2015-01-24T11:36:07.000Z
|
2015-01-26T04:55:31.000Z
|
smoked/management/commands/smoked.py
|
martinsvoboda/django-smoked
|
42b64fff23a37e3df42f8fc54535ea496dd27d84
|
[
"MIT"
] | 1
|
2015-01-25T20:48:06.000Z
|
2015-01-25T20:48:06.000Z
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from optparse import make_option
import time
from django import VERSION
from django.core.management.base import NoArgsCommand
from smoked import default_registry
from smoked.runner import run_tests
stats_msg = """
Results
=======
Total: {total}
Success: {success}
Failure: {failure}
--------
Time: {time:.1f}s
"""
class Command(NoArgsCommand):
help = 'Run all registered smoke tests'
option_list = NoArgsCommand.option_list + (
make_option(
'-n', '--dry-run', dest='dry_run',
action='store_true', default=False,
help="Only collect test, don't execute them."),
)
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
start_time = time.time()
if options.get('dry_run'):
count = len(default_registry.tests)
if verbosity:
self.log('{0} smoke test(s) could be run'.format(count))
else:
self.log(str(count))
return
success = failure = 0
for result in run_tests():
positive = 'error' not in result
if positive:
success += 1
else:
failure += 1
if verbosity > 1:
output = 'Success' if positive else 'Fail!'
self.log('{0}... {1}'.format(result['name'], output))
if not positive:
self.log(str(result['error']))
else:
output = '.' if positive else 'F'
self.log(output, ending='')
stats = {
'total': success + failure,
'success': success,
'failure': failure,
'time': time.time() - start_time
}
if verbosity:
self.log(stats_msg.format(**stats))
else:
self.log('') # print out new line after dots
def log(self, msg='', ending='\n'):
# Backward compatability with Dj1.4
if VERSION[1] == 4:
self.stdout.write(msg + ending)
else:
self.stdout.write(msg, ending=ending)
| 27.45
| 72
| 0.539617
| 243
| 2,196
| 4.786008
| 0.407407
| 0.042132
| 0.036114
| 0.048151
| 0.103181
| 0.061909
| 0
| 0
| 0
| 0
| 0
| 0.009635
| 0.338342
| 2,196
| 79
| 73
| 27.797468
| 0.790778
| 0.035064
| 0
| 0.109375
| 0
| 0
| 0.14279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.109375
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdfa15c5c9e42a9b497c846a1dd12bc7ab7f4c76
| 623
|
py
|
Python
|
code/waldo/conf/guisettings.py
|
amarallab/waldo
|
e38d23d9474a0bcb7a94e685545edb0115b12af4
|
[
"MIT"
] | null | null | null |
code/waldo/conf/guisettings.py
|
amarallab/waldo
|
e38d23d9474a0bcb7a94e685545edb0115b12af4
|
[
"MIT"
] | null | null | null |
code/waldo/conf/guisettings.py
|
amarallab/waldo
|
e38d23d9474a0bcb7a94e685545edb0115b12af4
|
[
"MIT"
] | null | null | null |
COLLIDER_SUITE_OFFSHOOT_RANGE = (0, 100)
COLLIDER_SUITE_SPLIT_ABS_RANGE = (0, 10)
COLLIDER_SUITE_SPLIT_REL_RANGE = (-1, 1, 2)
COLLIDER_SUITE_ASSIMILATE_SIZE_RANGE = (0, 10)
TAPE_FRAME_SEARCH_LIMIT_RANGE = (1, 100000)
TAPE_PIXEL_SEARCH_LIMIT_RANGE = (1, 1000000)
DEFAULT_CALIBRATION_ENCLOSURE_SIZE_RANGE = (0, 1000)
COLLISION_PIXEL_OVERLAP_MARGIN_RANGE = (1, 2000)
SCORE_CONTRAST_RADIO_RANGE = (1.0, 5.0)
SCORE_CONTRAST_DIFF_RANGE = (-0.2, 0.2)
SCORE_GOOD_FRACTION_RANGE = (0.0, 1.1)
SCORE_ACCURACY_RANGE = (0.0, 1.1)
SCORE_COVERAGE_RANGE = (0.0, 1.1)
ROI_BORDER_OFFSET_RANGE = (0, 200)
ROI_CORNER_OFFSET_RANGE = (0, 200)
| 34.611111
| 52
| 0.781701
| 106
| 623
| 4.122642
| 0.40566
| 0.1373
| 0.048055
| 0.05492
| 0.084668
| 0.064073
| 0
| 0
| 0
| 0
| 0
| 0.117857
| 0.101124
| 623
| 18
| 53
| 34.611111
| 0.6625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fdfc80e749f6ee439afc826e7feee5425163a88f
| 1,237
|
py
|
Python
|
android_store_service/utils/config_utils.py
|
gpiress/android-store-service
|
da81c7e79a345d790f5e744fc8fdfae0e6941765
|
[
"Apache-2.0"
] | 5
|
2020-12-10T14:05:04.000Z
|
2020-12-18T09:04:35.000Z
|
android_store_service/utils/config_utils.py
|
gpiress/android-store-service
|
da81c7e79a345d790f5e744fc8fdfae0e6941765
|
[
"Apache-2.0"
] | 4
|
2020-12-15T12:34:51.000Z
|
2021-06-28T14:04:34.000Z
|
android_store_service/utils/config_utils.py
|
gpiress/android-store-service
|
da81c7e79a345d790f5e744fc8fdfae0e6941765
|
[
"Apache-2.0"
] | 5
|
2020-12-15T12:10:22.000Z
|
2022-03-18T20:06:38.000Z
|
# Copyright 2019 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import current_app
def read_file(conf_path):
with open(str(conf_path), "r") as _file:
return _file.read()
def get_secret(secret, path=None):
if not path:
path = current_app.config.get("SECRETS_PATH")
file_path = f"{path}/{secret}"
if not secret_exists(secret, path=path):
raise FileNotFoundError(f"Secret {secret} does not exist at {file_path}")
return read_file(file_path)
def secret_exists(secret, path=None):
if not path:
path = current_app.config.get("SECRETS_PATH")
file_path = f"{path}/{secret}"
if os.path.exists(file_path):
return True
return False
| 29.452381
| 81
| 0.712207
| 188
| 1,237
| 4.585106
| 0.484043
| 0.069606
| 0.030162
| 0.037123
| 0.180974
| 0.180974
| 0.180974
| 0.180974
| 0.180974
| 0.180974
| 0
| 0.008048
| 0.196443
| 1,237
| 41
| 82
| 30.170732
| 0.859155
| 0.442199
| 0
| 0.315789
| 0
| 0
| 0.147929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a90000a60889fa2e13612a2352497c1c01e09cb6
| 71,385
|
py
|
Python
|
DeSu2SE.py
|
XxArcaiCxX/Devil-Survivor-2-Record-Breaker-Save-Editor
|
872717f66f1d9045d48f8d4c2621a925ee4e2817
|
[
"MIT"
] | null | null | null |
DeSu2SE.py
|
XxArcaiCxX/Devil-Survivor-2-Record-Breaker-Save-Editor
|
872717f66f1d9045d48f8d4c2621a925ee4e2817
|
[
"MIT"
] | null | null | null |
DeSu2SE.py
|
XxArcaiCxX/Devil-Survivor-2-Record-Breaker-Save-Editor
|
872717f66f1d9045d48f8d4c2621a925ee4e2817
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import os
import sys
print_n = sys.stdout.write
STAT_TXT = ("ST", "MA", "VI", "AG")
# Characters
CHAR_OFFSET = "0x24"
CHAR_ID = ("0x75", 2)
CHAR_LVL = ("0x79", 1)
CHAR_EXP = ("0x7C", 2)
CHAR_HP = ("0x82", 2)
CHAR_MP = ("0x84", 2)
CHAR_ST = ("0x7E", 1)
CHAR_MA = ("0x7F", 1)
CHAR_VI = ("0x80", 1)
CHAR_AG = ("0x81", 1)
CHAR_CMD1 = ("0x86", 1)
CHAR_CMD2 = ("0x87", 1)
CHAR_CMD3 = ("0x88", 1)
CHAR_PAS1 = ("0x89", 1)
CHAR_PAS2 = ("0x8A", 1)
CHAR_PAS3 = ("0x8B", 1)
CHAR_RAC = ("0x8C", 1)
CHAR_MOV = ("0x9F", 1)
# Miscellaneous
MISC_MACCA = ("0x6C4", 4)
# Demons
DE_NUM_MAX = 27
DE_OFFSET = "0x20"
DE_ID = ("0x2B6", 2)
DE_LVL = ("0x2B9", 1)
DE_EXP = ("0x2BC", 2)
DE_HP = ("0x2C2", 2)
DE_MP = ("0x2C4", 2)
DE_ST = ("0x2BE", 1)
DE_MA = ("0x2BF", 1)
DE_VI = ("0x2C0", 1)
DE_AG = ("0x2C1", 1)
DE_CMD1 = ("0x2C6", 1)
DE_CMD2 = ("0x2C7", 1)
DE_CMD3 = ("0x2C8", 1)
DE_PAS1 = ("0x2C9", 1)
DE_PAS2 = ("0x2CA", 1)
DE_PAS3 = ("0x2CB", 1)
DE_RAC = ("0x2CC", 1)
# Skill Information
CMD_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107', '108', '109',
'110', '111', '112', '113', '114', '115', '116', '117', '118', '119',
'120', '121', '122', '123', '124', '125', '126', '127', '128', '129',
'130', '131', '132', '133', '134', '135', '136', '137', '138', '139',
'140', '141', '142', '143', '144', '145', '146', '147', '148', '149',
'150', '151', '152', '153', '154', '155', '156', '157', '158', '159',
'160', '161', '162', '163', '164', '165', '166', '167', '168', '169',
'170', '171', '172')
PAS_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107')
AUTO_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39')
RAC_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107', '108', '109')
DEMON_IDS = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99',
'100', '101', '102', '103', '104', '105', '106', '107', '108', '109',
'110', '111', '112', '113', '114', '115', '116', '117', '118', '119',
'120', '121', '122', '123', '124', '125', '126', '127', '128', '129',
'130', '131', '132', '133', '134', '135', '136', '137', '138', '139',
'140', '141', '142', '143', '144', '145', '146', '147', '148', '149',
'150', '151', '152', '153', '154', '155', '156', '157', '158', '159',
'160', '161', '162', '163', '164', '165', '166', '167', '168', '169',
'170', '171', '172', '173', '174', '175', '176', '177', '178', '179',
'180', '181', '182', '183', '184', '185', '186', '187', '188', '189',
'190', '191', '192', '193', '194', '195', '196', '197', '198', '199',
'200', '201', '202', '203', '204', '205', '206', '207', '208', '209',
'210', '211', '212', '213', '214', '215', '216', '217', '218', '219',
'220', '221', '222', '223', '224', '225', '226', '227', '228', '229',
'230', '231', '232', '233', '234', '235', '236', '237', '238', '239',
'240', '241', '242', '243', '244', '245', '246', '247', '248', '249',
'250', '251', '252', '253', '254', '255', '256', '257', '258', '259',
'260', '261', '262', '263', '264', '265', '266', '267', '268', '269',
'270', '271', '272', '273', '274', '275', '276', '277', '278', '279',
'280', '281', '282', '283', '284', '285', '286', '287', '288', '289',
'290', '291', '292', '293', '294', '295', '296', '297', '298', '299',
'300', '301', '302', '303', '304', '305', '306', '307', '308', '309',
'310', '311', '312', '313', '314', '315', '316', '317', '318', '319',
'320', '321', '322', '323', '324', '325', '326', '327', '328', '329',
'330', '331', '332', '333', '334', '335', '336', '337', '338', '339',
'340', '341', '342', '343', '344', '345', '346', '347', '348', '349',
'350', '351', '352', '353', '354', '355', '356', '357', '358', '359',
'360', '361', '362', '363', '364', '365', '366', '367', '368', '369',
'370', '371', '372', '373', '374', '375', '376', '377', '378', '379',
'380', '381', '382', '383', '384', '385', '386', '387', '388', '389',
'390', '391', '392', "65535")
# DONE
CMD_SKILLS = {
"0": "None",
"1": "Attack",
"2": "Agi",
"3": "Agidyne",
"4": "Maragi",
"5": "Maragidyne",
"6": "Bufu",
"7": "Bufudyne",
"8": "Mabufu",
"9": "Mabufudyne",
"10": "Zio",
"11": "Ziodyne",
"12": "Mazio",
"13": "Maziodyne",
"14": "Zan",
"15": "Zandyne",
"16": "Mazan",
"17": "Mazandyne",
"18": "Megido",
"19": "Megidolaon",
"20": "Fire Dance",
"21": "Ice Dance",
"22": "Elec Dance",
"23": "Force Dance",
"24": "Holy Dance",
"25": "Drain",
"26": "Judgement",
"27": "Petra Eyes",
"28": "Mute Eyes",
"29": "Paral Eyes",
"30": "Death Call",
"31": "Power Hit",
"32": "Berserk",
"33": "Mighty Hit",
"34": "Anger Hit",
"35": "Brutal Hit",
"36": "Hassohappa",
"37": "Deathbound",
"38": "Weak Kill",
"39": "Desperation",
"40": "Makajamon",
"41": "Gigajama",
"42": "Diajama",
"43": "Makarakarn",
"44": "Tetrakarn",
"45": "Might Call",
"46": "Shield All",
"47": "Taunt",
"48": "Dia",
"49": "Diarahan",
"50": "Media",
"51": "Mediarahan",
"52": "Amrita",
"53": "Prayer",
"54": "Recarm",
"55": "Samerecarm",
"56": "Gunfire",
"57": "Guard",
"58": "Devil's Fuge",
"59": "Vampiric Mist",
"60": "Lost Flame",
"61": "Spawn",
"62": "Fire of Sodom",
"63": "Purging Light",
"64": "Babylon",
"65": "Megidoladyne",
"66": "Piercing Hit",
"67": "Multi-Hit",
"68": "Holy Strike",
"69": "Power Charge",
"70": "Sexy Gaze",
"71": "Marin Karin",
"72": "Extra Cancel",
"73": "Assassinate",
"74": "Fatal Strike",
"75": "Diarama",
"76": "Nigayomogi",
"77": "Recarmloss",
"78": "Mow Down",
"79": "Snipe",
"80": "Life Drain",
"81": "Multi-strike",
"82": "Inferno",
"83": "Escape",
"84": "Remain",
"85": "Double Strike",
"86": "Binary Fire",
"87": "Heat Charge",
"88": "N/A",
"89": "Marked Wing",
"90": "Eject Shot",
"91": "Circumpolarity",
"92": "N/A",
"93": "N/A",
"94": "Hacking",
"95": "Dark Tunder",
"96": "Diastrophism",
"97": "Regenerate",
"98": "Ultimate Hit",
"99": "Twin Ultimate",
"100": "Swallow",
"101": "N/A",
"102": "Binary Fire",
"103": "Circumpolarity",
"104": "Alkaid",
"105": "Areadbhar",
"106": "Dark Thunder",
"107": "Regenerate",
"108": "Supernova",
"109": "Power Up",
"110": "Ominous Star",
"111": "Heaven Wrath",
"112": "Cepheid",
"113": "Unheard Prayer",
"114": "Steal Macca",
"115": "Barrage Strike",
"116": "Heaven Wrath",
"117": "Necromancy",
"118": "Gomorrah Fire",
"119": "Vitality Drain",
"120": "Die for Me!",
"121": "Ruinous Wind",
"122": "Star Pressure",
"123": "Ruinous Wind",
"124": "Diastrophism",
"125": "Final Hit",
"126": "Dream Eater",
"127": "Demon Dance",
"128": "Roche Lobe",
"129": "Darkness Blade",
"130": "Defense Knife",
"131": "Carney",
"132": "Then, die!",
"133": "Don't Hurt Me",
"134": "Wanna Beating?",
"135": "Shadow Scythe",
"136": "No Killing...",
"137": "Shadow Shield",
"138": "Nemean Roar",
"139": "Wider-Radius",
"140": "Spica Spear",
"141": "Memory-Sharing",
"142": "Frozen Pillar",
"143": "Vicarious Spell",
"144": "Vicarious Doll",
"145": "Quaser",
"146": "Life Plower",
"147": "Asterion",
"148": "Partial Blast",
"149": "Vrano=Metria",
"150": "Megidoladyne",
"151": "Darkness Blade(Phys)",
"152": "Darkness Blade(Fire)",
"153": "Darkness Blade(Ice)",
"154": "Darkness Blade(Elec)",
"155": "Darkness Blade(Force)",
"156": "Then, die!(Phys)",
"157": "Then, die!(Phys)",
"158": "Then, die!(Phys)",
"159": "Then, die!(Almighty)",
"160": "Lion's Armor",
"161": "Ley Line True",
"162": "Life Plower True",
"163": "Beheadal",
"164": "Primal Fire",
"165": "Gravity Anomaly",
"166": "Orogin Selection",
"167": "Earthly Stars",
"168": "Master of Life",
"169": "Heavenly Rule",
"170": "Fringer's Brand",
"171": "Flaming Fanfare",
"172": "Ley Line"
}
# DONE
PAS_SKILLS = {
"0": "None",
"1": "+Mute",
"2": "+Poison",
"3": "+Paralyze",
"4": "+Stone",
"5": "Life Bonus",
"6": "Mana Bonus",
"7": "Life Surge",
"8": "Mana Surge",
"9": "Hero Aid",
"10": "Ares Aid",
"11": "Drain Hit",
"12": "Attack All",
"13": "Counter",
"14": "Retaliate",
"15": "Avenge",
"16": "Phys Boost",
"17": "Phys Amp",
"18": "Fire Boost",
"19": "Fire Amp",
"20": "Ice Boost",
"21": "Ice Amp",
"22": "Elec Boost",
"23": "Elec Amp",
"24": "Force Boost",
"25": "Force Amp",
"26": "Anti-Phys",
"27": "Anti-Fire",
"28": "Anti-Ice",
"29": "Anti-Elec",
"30": "Anti-Force",
"31": "Anti-Curse",
"32": "Anti-Most",
"33": "Anti-All",
"34": "Null Phys",
"35": "Null Fire",
"36": "Null Ice",
"37": "Null Elec",
"38": "Null Force",
"39": "Null Curse",
"40": "Phys Drain",
"41": "Fire Drain",
"42": "Ice Drain",
"43": "Elec Drain",
"44": "Force Drain",
"45": "Phys Repel",
"46": "Fire Repel",
"47": "Ice Repel",
"48": "Elec Repel",
"49": "Force Repel",
"50": "Watchful",
"51": "Endure",
"52": "Life Aid",
"53": "Life Lift",
"54": "Mana Aid",
"55": "Victory Cry",
"56": "Pierce",
"57": "Race-O",
"58": "Race-D",
"59": "Dual Shadow",
"60": "Extra One",
"61": "Leader Soul",
"62": "Knight Soul",
"63": "Paladin Soul",
"64": "Hero Soul",
"65": "Beast Eye",
"66": "Dragon Eye",
"67": "Crit Up",
"68": "Dodge",
"69": "MoneyBags",
"70": "Quick Move",
"71": "Vigilant",
"72": "Grimoire",
"73": "Double Strike",
"74": "Perserve Extra",
"75": "Anti-Element",
"76": "+Forget",
"77": "Extra Bonus",
"78": "Swift Step",
"79": "Life Stream",
"80": "Mana Stream",
"81": "Ultimate Hit",
"82": "Anti-Almighty",
"83": "Phys Up",
"84": "Pacify Human",
"85": "Dragon Power",
"86": "True Dragon",
"87": "Final Dragon",
"88": "Heavenly Gift",
"89": "Chaos Stir",
"90": "Undead",
"91": "Hidden Strength",
"92": "Holy Blessing",
"93": "Exchange",
"94": "Extra Zero",
"95": "Spirit Gain",
"96": "Hit Rate Gain",
"97": "Quick Wit",
"98": "Parkour",
"99": "Hitori Nabe",
"100": "Ikebukuro King",
"101": "Immortal Barman",
"102": "Defenseless",
"103": "Coiste Bodhar",
"104": "Dark Courier",
"105": "Massive Shadow",
"106": "Hound Eyes",
"107": "Fighting Doll",
}
# DONE
RAC_SKILLS = {
"0": "None",
"1": "Affection",
"2": "Awakening",
"3": "Chaos Wave",
"4": "Constrict",
"5": "Evil Wave",
"6": "Blood Wine",
"7": "Flight",
"8": "Sacrifice",
"9": "Switch",
"10": "Animal Leg",
"11": "Devil Speed",
"12": "Phantasm",
"13": "Glamour",
"14": "Tyranny",
"15": "Double Up",
"16": "Aggravate",
"17": "Bind",
"18": "Devotion",
"19": "Long Range",
"20": "Immortal",
"21": "Evil Flame",
"22": "Hot Flower",
"23": "Dark Hand",
"24": "Violent God",
"25": "King's Gate",
"26": "King's Gate",
"27": "Fiend",
"28": "Four Devas",
"29": "Dark Finger",
"30": "Asura Karma",
"31": "Ghost Wounds",
"32": "Hero's Mark",
"33": "Uncanny Form",
"34": "Asura Destiny",
"35": "Goddess Grace",
"36": "Enlightenment",
"37": "Chaos Breath",
"38": "Dragon Bind",
"39": "Evil Flow",
"40": "Angel Stigma",
"41": "Winged Flight",
"42": "Fallen's Mark",
"43": "Warp Step",
"44": "Free Leap",
"45": "Devil Flash",
"46": "True Phantasm",
"47": "Fairy Dust",
"48": "Blood Treaty",
"49": "Matchless",
"50": "Agitate",
"51": "Evil Bind",
"52": "Mother's Love",
"53": "Possesion",
"54": "Hero's Proof",
"55": "Unearthy Form",
"56": "Dubhe Proof",
"57": "Merak Proof",
"58": "Phecda Proof",
"59": "Megrez Proof",
"60": "Alioth Proof",
"61": "Mizar Proof",
"62": "Alkaid Proof",
"63": "Polaris Proof",
"64": "Alcor Proof",
"65": "Alcor Warrant",
"66": "Merak Envoy",
"67": "Phecda Clone",
"68": "Megrez Bud",
"69": "Alioth Shot",
"70": "Alkaid Bud",
"71": "Alkaid Spawn",
"72": "Alkaid Spawn",
"73": "Alkaid Spawn",
"74": "Alkaid Spawn",
"75": "Polaris Proof",
"76": "Polaris Proof",
"77": "Heaven Throne",
"78": "Dragon Shard",
"79": "Lugh Blessing",
"80": "Heaven Shield",
"81": "Bounty Shield",
"82": "Heaven Spear",
"83": "Bounty Spear",
"84": "Temptation",
"85": "Mizar Proof",
"86": "Mizar Proof",
"87": "Star's Gate",
"88": "Shinjuku Intel",
"89": "Fighting Doll",
"90": "Headless Rider",
"91": "Leonid Five",
"92": "Spica Sign",
"93": "Spica Sign",
"94": "Shiki-Ouji",
"95": "Arcturus Sign",
"96": "Miyako",
"97": "Cor Caroli Sign",
"98": "Cor Caroli Half",
"99": "Agent of Order",
"100": "Universal Law",
"101": "Factor of Heat",
"102": "Factor of Power",
"103": "Factor of Space",
"104": "Factor of Time",
"105": "???",
"106": "Program: Joy",
"107": "Program: Ultra",
"108": "Fangs of Order",
"109": "Gate of Order"
}
# DONE
AUTO_SKILLS = {
"0": "None",
"1": "Blitzkrieg",
"2": "Hustle",
"3": "Fortify",
"4": "Barrier",
"5": "Wall",
"6": "Full Might",
"7": "Ban Phys",
"8": "Ban Fire",
"9": "Ban Ice",
"10": "Ban Elec",
"11": "Ban Force",
"12": "Ban Curse",
"13": "Rage Soul",
"14": "Grace",
"15": "Marksman",
"16": "Tailwind",
"17": "Magic Yin",
"18": "Battle Aura",
"19": "Revive",
"20": "Magic Yang",
"21": "Healing",
"22": "Alter Pain",
"23": "Weaken",
"24": "Debilitate",
"25": "Health Save",
"26": "Strengthen",
"27": "Grimoire +",
"28": "Desperation",
"29": "Rejuvenate",
"30": "Null Auto",
"31": "Pierce +",
"32": "Endure +",
"33": "Neurotoxin",
"34": "Temptation",
"35": "Shield All EX",
"36": "Dual Shadow EX",
"37": "Kinetic Vision",
"38": "Magnet Barrier",
"39": "Distortion",
}
# Character ID's
ALL_CHARS = {
"0": "MC",
"400": "Fumi",
"300": "Yamato",
"900": "Keita",
"800": "Makoto",
"700": "Jungo",
"a00": "Airi",
"b00": "Joe",
"600": "Otome",
"500": "Daichi",
"c00": "Hinako",
"200": "Io",
"100": "Ronaldo",
"d00": "Alcor"
}
# Demon Information
ALL_DEMONS = {
"0": "Human MC",
"1": "Human Ronaldo",
"2": "Human Io",
"3": "Human Yamato",
"4": "Human Fumi",
"5": "Human Daichi",
"6": "Human Otome",
"7": "Human Jungo",
"8": "Human Makoto",
"9": "Human Keita",
"10": "Human Airi",
"11": "Human Joe",
"12": "Human Hinako",
"13": "Human Alcor",
"14": "Omega Tonatiuh",
"15": "Omega Chernobog",
"16": "Omega Wu Kong",
"17": "Omega Susano-o",
"18": "Omega Kartikeya",
"19": "Omega Shiva",
"20": "Megami Hathor",
"21": "Megami Sarasvati",
"22": "Megami Kikuri-hime",
"23": "Megami Brigid",
"24": "Megami Scathach",
"25": "Megami Laksmi",
"26": "Megami Norn",
"27": "Megami Isis",
"28": "Megami Amaterasu",
"29": "Deity Mahakala",
"30": "Deity Thor",
"31": "Deity Arahabaki",
"32": "Deity Odin",
"33": "Deity Yama",
"34": "Deity Lugh",
"35": "Deity Baal",
"36": "Deity Asura",
"37": "Vile Orcus",
"38": "Vile Pazuzu",
"39": "Vile Abaddon",
"40": "Vile Tao Tie",
"41": "Vile Arioch",
"42": "Vile Tezcatlipoca",
"43": "Vile Nyarlathotep",
"44": "Snake Makara",
"45": "Snake Nozuchi",
"46": "Snake Pendragon",
"47": "Snake Gui Xian",
"48": "Snake Quetzacoatl",
"49": "Snake Seiyuu",
"50": "Snake Orochi",
"51": "Snake Ananta",
"52": "Snake Hoyau Kamui",
"53": "Dragon Toubyou",
"54": "Dragon Bai Suzhen",
"55": "Dragon Basilisk",
"56": "Dragon Ym",
"57": "Dragon Python",
"58": "Dragon Culebre",
"59": "Dragon Vritra",
"60": "Dragon Vasuki",
"61": "Divine Holy Ghost",
"62": "Divine Angel",
"63": "Divine Power",
"64": "Divine Lailah",
"65": "Divine Aniel",
"66": "Divine Kazfiel",
"67": "Divine Remiel",
"68": "Divine Metatron",
"69": "Avian Itsumade",
"70": "Avian Moh Shuvuu",
"71": "Avian Hamsa",
"72": "Avian Suparna",
"73": "Avian Vidofnir",
"74": "Avian Badb Catha",
"75": "Avian Anzu",
"76": "Avian Feng Huang",
"77": "Avian Garuda",
"78": "Fallen Gagyson",
"79": "Fallen Abraxas",
"80": "Fallen Flauros",
"81": "Fallen Nisroc",
"82": "Fallen Orobas",
"83": "Fallen Decarabia",
"84": "Fallen Nebiros",
"85": "Fallen Agares",
"86": "Fallen Murmur",
"87": "Avatar Heqet",
"88": "Avatar Kamapua'a",
"89": "Avatar Shiisaa",
"90": "Avatar Bai Ze",
"91": "Avatar Baihu",
"92": "Avatar Airavata",
"93": "Avatar Ukano Mitama",
"94": "Avatar Barong",
"95": "Avatar Anubis",
"96": "Beast Kabuso",
"97": "Beast Hairy Jack",
"98": "Beast Nekomata",
"99": "Beast Cait Sith",
"100": "Beast Nue",
"101": "Beast Orthrus",
"102": "Beast Myrmecolion",
"103": "Beast Cerberus",
"104": "Beast Fenrir",
"105": "Wilder Hare of Inaba",
"106": "Wilder Waira",
"107": "Wilder Garm",
"108": "Wilder Afanc",
"109": "Wilder Mothman",
"110": "Wilder Taown",
"111": "Wilder Behemoth",
"112": "Wilder Ammut",
"113": "Genma Tam Lin",
"114": "Genma Jambavan",
"115": "Genma Tlaloc",
"116": "Genma Ictinike",
"117": "Genma Hanuman",
"118": "Genma Cu Chulainn",
"119": "Genma Kresnik",
"120": "Genma Ganesha",
"121": "Genma Heimdal",
"122": "Fairy Pixie",
"123": "Fairy Knocker",
"124": "Fairy Kijimunaa",
"125": "Fairy Jack Frost",
"126": "Fairy Pyro Jack",
"127": "Fairy Silky",
"128": "Fairy Lorelei",
"129": "Fairy Vivian",
"130": "Fairy Titania",
"131": "Fairy Oberon",
"132": "Tyrant King Frost",
"133": "Tyrant Moloch",
"134": "Tyrant Hecate",
"135": "Tyrant Tzizimitl",
"136": "Tyrant Astaroth",
"137": "Tyrant Mot",
"138": "Tyrant Loki",
"139": "Tyrant Lucifer",
"140": "Kishin Ubelluris",
"141": "Kishin Nalagiri",
"142": "Hitokotonusi",
"143": "Kishin Take-Mikazuchi",
"144": "Kishin Zouchouten",
"145": "Kishin Jikokuten",
"146": "Kishin Koumokuten",
"147": "Kishin Bishamonten",
"148": "Kishin Zaou Gongen",
"149": "Touki Kobold",
"150": "Touki Bilwis",
"151": "Touki Gozuki",
"152": "Touki Mezuki",
"153": "Touki Ikusa",
"154": "Touki Lham Dearg",
"155": "Touki Berserker",
"156": "Touki Yaksa",
"157": "Touki Nata Taishi",
"158": "Touki Oumitsunu",
"159": "Jaki Obariyon",
"160": "Jaki Ogre",
"161": "Jaki Mokoi",
"162": "Jaki Ogun",
"163": "Jaki Wendigo",
"164": "Jaki Legion",
"165": "Jaki Rakshasa",
"166": "Jaki Girimehkala",
"167": "Jaki Grendel",
"168": "Jaki Black Frost",
"169": "Femme Kikimora",
"170": "Femme Lilim",
"171": "Femme Yuki Jyorou",
"172": "Femme Leanan Sidhe",
"173": "Femme Peri",
"174": "Femme Hariti",
"175": "Femme Rangda",
"176": "Femme Kali",
"177": "Femme Lilith",
"178": "Ghost Poltergeist",
"179": "Ghost Agathion",
"180": "Ghost Tenon Cut",
"181": "Ghost Kumbhanda",
"182": "Ghost Loa",
"183": "Ghost Pisaca",
"184": "Ghost Kudlak",
"185": "Ghost Purple Mirror",
"186": "Fiend Biliken",
"187": "Fiend Ghost Q ",
"188": "Fiend Sage of Time",
"189": "Fiend Alice",
"190": "Fiend Trumpeter",
"191": "Hero Neko Shogun",
"192": "Hero Hagen",
"193": "Hero Jeanne d'Arc",
"194": "Hero Yoshitsune",
"195": "Hero Guan Yu",
"196": "Element Flaemis",
"197": "Element Aquans",
"198": "Element Aeros",
"199": "Element Erthys",
"200": "Mitama Ara Mitama",
"201": "Mitama Nigi Mitama",
"202": "Mitama Kusi Mitama",
"203": "Mitama Saki Mitama",
"204": "Fallen Satan",
"205": "Fallen Beelzebub",
"206": "Fallen Belial",
"207": "Divine Sariel",
"208": "Divine Anael",
"209": "Human Atsuro",
"210": "Human Yuzu",
"211": "Dragon Asp",
"212": "Avatar Apis",
"213": "Avatar Pabilsag",
"214": "Wilder Sleipnir",
"215": "Wilder Xiezhai",
"216": "Genma Kangiten",
"217": "Vile Baphomet",
"218": "Famme Anat",
"219": "Megami Pallas Athena",
"220": "Deity Mithra",
"221": "Deity Osiris",
"222": "Snake Gucumatz",
"223": "Avian Da Peng",
"224": "Kishin Ometeotl",
"225": "Genma Jarilo",
"226": "Human Miyako",
"227": "Fallen Botis",
"228": "Human JP's Member",
"229": "Human Salaryman(1)",
"230": "Human Salaryman(2)",
"231": "Human Salaryman(3)",
"232": "Fallen Samael",
"233": "Human Office Lady(1)",
"234": "Human Office Lady(2)",
"235": "Human Office Lady(3)",
"236": "Human Punk(1)",
"237": "Human Punk(2)",
"238": "Human Punk(3)",
"239": "Human Yakuza(1)",
"240": "Human Yakuza(2)",
"241": "Device Module",
"242": "Human Policeman",
"243": "Human JP's Member(F)",
"244": "Human JP's Member(M)",
"245": "Human Young Man(?)",
"246": "Human Old Woman(?)",
"247": "Human Worker",
"248": "Human Student",
"249": "Human Young man",
"250": "Human Buffer(1)",
"251": "Human Buffer(2)",
"252": "Human JP'S Agent(?)",
"253": "Human JP'S Agent(?)",
"254": "Human JP'S Agent(?)",
"255": "Human JP'S Agent(?)",
"256": "Human ?",
"257": "Fallen Bifrons",
"258": "Fallen Barbatos",
"259": "Femme Dzelarhons",
"260": "Genma Kama",
"261": "Megami Parvati",
"262": "Femme Ixtab",
"263": "Tyrant Balor",
"264": "Tyrant Negral",
"265": "Deity Inti",
"266": "Deity Alilat",
"267": "Omega Beji-Weng",
"268": "Deity Lord Nan Dou",
"269": "Hero Masakado",
"270": "Megami Ishtar",
"271": "Megami Black Maria",
"272": "Snake Yurlungr",
"273": "Dragon Fafnir",
"274": "Divine Sraosha",
"275": "Avian Rukh",
"276": "Avian Kau",
"277": "Beast Cbracan",
"278": "Beast Catoblepas",
"279": "Genma Roitschaggata",
"280": "Fairy Spriggan",
"281": "Fairy Troll",
"282": "Tyrant Lucifuge",
"283": "Kishin Okuninushi",
"284": "Touki Dokkaebi",
"285": "Touki Ongyo-Ki",
"286": "Jaki Macabre",
"287": "Femme Jahi",
"288": "Divine Sandalphon",
"289": "Snake Kohruy",
"290": "Exotic Izaya",
"291": "Exotic Celty",
"292": "Exotic Shizuo",
"293": "Touki Momunofu",
"294": "Tyrant Lucifer Frost",
"295": "(Crashes GUI)",
"296": "Hero Frost Five",
"297": "Hero Milk-Kin Frost",
"298": "Hero Strawberry Fost",
"299": "Fairy Lemon Frost",
"300": "Fairy Melon Frost",
"301": "Fairy B. Hawaii Frost",
"302": "Touki Titan",
"303": "Omega Dyonisus",
"304": "Omega Aramisaki",
"305": "Jaki Shiki-Ouji",
"306": "Feeme Xi Wangmu",
"307": "Divine Dominion",
"308": "Fiend Mother Harlot",
"309": "Fiend Dantalian",
"310": "Vile Seth",
"311": "Jaki Shinigami",
"312": "Bel Belberith",
"313": "Bel Jezebel",
"314": "Bel Beldr",
"315": "Maggot Maggot",
"316": "Star Dubhe",
"317": "Star Merak",
"318": "Star Phecda",
"319": "Star Megrez",
"320": "Star Alioth Core",
"321": "Star Mizar",
"322": "Star Benetnasch",
"323": "Star Alcor",
"324": "Star Polaris",
"325": "Star Merak Missile",
"326": "Star Phecda(WK MAG)",
"327": "Star Phecda(WK PHYS)",
"328": "Star Megrez(Empty)",
"329": "Star Alioth (Poison)",
"330": "Energy LayLine Dragon",
"331": "Star Dubhe",
"332": "Star Dunhe(weak)",
"333": "Star Mizar",
"334": "Star Mizar",
"335": "Star Tentacle",
"336": "Star Tentacle",
"337": "Star Tentacle",
"338": "Star Tentacle",
"339": "Star Tentacle",
"340": "Star Tentacle",
"341": "Star Benetnasch(dubhe)",
"342": "Star Benetnasch(merak)",
"343": "Star Benetnasch(phecda)",
"344": "Star Benetnasch(Alioth)",
"345": "Star Benetnasch",
"346": "Star Alcor",
"347": "Star Polaris A",
"348": "Star Polaris Ab",
"349": "Star Polaris B",
"350": "Human Tall Woman",
"351": "Device Tico",
"352": "Device Tico",
"353": "Human Daichi",
"354": "Human Io",
"355": "Human Io",
"356": "Human MC",
"357": "Human SDF Captain",
"358": "Human SDF Member",
"359": "Human Fireman",
"360": "Deity Io",
"361": "Star Guardian",
"362": "Star Guardian",
"363": "Star Guardian",
"364": "Star Guardian",
"365": "Star Guardian",
"366": "Star Guardian",
"367": "Star Guardian",
"368": "Human Salaryman(1)",
"369": "Human Punk(1)",
"370": "Human Student(1)",
"371": "Human Student(2)",
"372": "Human Young Man(1)",
"373": "Human Young Man(2)",
"374": "Human Salaryman(2)",
"375": "Human Salaryman(3)",
"376": "Human Punk(2)",
"377": "Human Punk(3)",
"378": "Human Kitten",
"379": "Human @",
"380": "Human Ronaldo*",
"381": "Human Io*",
"382": "Human Yamato*",
"383": "Human Fumi*",
"384": "Human Daichi*",
"385": "Human Otome*",
"386": "Human Jungo*",
"387": "Human Makoto*",
"388": "Human Keita*",
"389": "Human Airi*",
"390": "Human Joe*",
"391": "Human Hinako*",
"392": "Human Alcor*",
"65535": "Empty"
}
class mytestapp(tk.Tk):
def __init__(self, parent):
tk.Tk.__init__(self, parent)
self.parent = parent
self.minsize(400, 100)
self.title("Devil Survivor 2 Record Breaker Save Editor")
self.bind(sequence="<Escape>", func=lambda x: self.quit())
self.resizable(width="False", height="False")
self.grid()
self.initVars()
self.processLists()
self.createWidgets()
# x = (self.winfo_screenwidth() - self.winfo_reqwidth()) / 2
# y = (self.winfo_screenheight() - self.winfo_reqheight()) / 2
# self.geometry("+%d+%d" % (x, y))
# self.initialSetup()
def initVars(self):
self.saveFilePath = None
self.saveFileDir = None
self.saveFileName = None
self.save_bytes = None
self.charValues = {}
self.curChar = {}
self.curDemon = {}
self.charNameList = []
self.demonNameList = []
self.charList = []
self.demonList = []
self.vcmd = (self.register(self.validate_int), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
def processLists(self):
skillIDNameList = []
# max_width = 0
for val in CMD_IDS:
if val in CMD_SKILLS:
# tmp_str = val + " - " + ALL_SKILLS[val][0]
# if len(tmp_str) > max_width:
# max_width = len(tmp_str)
skillIDNameList.append(val + " - " + CMD_SKILLS[val])
else:
skillIDNameList.append(val + " - None")
self.skillIDNameList = skillIDNameList
# self.skillIDNameWidth = max_width
def createWidgets(self):
# Menu bar
menubar = tk.Menu(self)
submenu1 = tk.Menu(menubar, tearoff=0)
submenu1.add_command(label="Open Save File", underline=0, command=self.openFileChooser)
submenu1.add_command(label="Save Changes", underline=0, command=self.saveChanges)
submenu1.add_separator()
submenu1.add_command(label="Exit", underline=0, command=self.exitApp)
menubar.add_cascade(label="File", underline=0, menu=submenu1)
menubar.add_separator()
# submenu2 = tk.Menu(menubar, tearoff=0)
# submenu2.add_command(label="Compare difference(s)", command=self.>Diff)
# menubar.add_cascade(label="Compare", menu=submenu2)
menubar.add_command(label="About", command=self.aboutCreator)
menubar.add_command(label="Help", command=self.help)
self.config(menu=menubar)
# Main content frame
mainFrame = tk.Frame(self)
mainFrame.grid(column=0, row=0, padx=10, pady=10, sticky="EW")
self.mainFrame = mainFrame
# Frame for folder paths
folderpathFrame = tk.Frame(mainFrame)
folderpathFrame.grid(column=0, row=1, sticky="EW")
folderpathFrame.grid_columnconfigure(1, weight=1)
tk.Label(folderpathFrame, text="Save file: ").grid(column=0, row=1, sticky="W")
self.saveFilePathTxt = tk.StringVar()
tk.Entry(
folderpathFrame, textvariable=self.saveFilePathTxt, state='readonly', width=80
).grid(column=1, row=1, sticky="EW", padx="5 0")
# Frame for tab buttons
tabButtonsFrame = tk.Frame(mainFrame)
tabButtonsFrame.grid(column=0, row=2, pady="20 0", sticky="EW")
self.tab1Button = tk.Button(
tabButtonsFrame, text="Characters", relief="sunken", state="disabled",
command=lambda: self.changeTab(self.tab1Frame, self.tab1Button)
)
self.tab1Button.grid(column=0, row=0, sticky="W")
self.tab2Button = tk.Button(
tabButtonsFrame, text="Demons",
command=lambda: self.changeTab(self.tab2Frame, self.tab2Button)
)
self.tab2Button.grid(column=1, row=0, sticky="W")
# Frame for tab frames
tabFramesFrame = tk.Frame(mainFrame)
tabFramesFrame.grid(column=0, row=3, sticky="EW")
tabFramesFrame.columnconfigure(0, weight=1)
# Frame for 1st tab
tab1Frame = tk.Frame(tabFramesFrame, bd="2", relief="sunken", padx="10", pady="10")
self.tab1Frame = tab1Frame
tab1Frame.grid(column=0, row=0, sticky="EW")
# Top inner frame for 1st tab
tab1TopFrame = tk.Frame(tab1Frame)
tab1TopFrame.grid(column=0, row=0, columnspan=2, sticky="NW")
# Top left inner frame for 1st tab
tab1TopLFrame = tk.Frame(tab1TopFrame)
tab1TopLFrame.grid(column=0, row=0, sticky="NW")
tab1ComboLabel = tk.Label(tab1TopLFrame, text="Select Character")
tab1ComboLabel.grid(column=1, row=0)
# ComboBox
tab1ComboBox = ttk.Combobox(tab1TopLFrame, values=self.charNameList)
print(self.charNameList)
tab1ComboBox.grid(column=2, row=0, padx=10, pady=10)
def changeCharacter(*args):
name = tab1ComboBox.get()
def get_key(val):
for char_info in self.charList:
for key, value in char_info.items():
if val == value:
return char_info
self.curChar = get_key(name)
print(self.curChar)
tab1txtbLVL.delete(0, 5)
tab1txtbLVL.insert(0, self.curChar["level"])
tab1txtbEXP.delete(0, 5)
tab1txtbEXP.insert(0, self.curChar["exp"])
tab1txtbHP.delete(0, 5)
tab1txtbHP.insert(0, self.curChar["hp"])
tab1txtbMP.delete(0, 5)
tab1txtbMP.insert(0, self.curChar["mp"])
tab1txtbST.delete(0, 5)
tab1txtbST.insert(0, self.curChar["st"])
tab1txtbMA.delete(0, 5)
tab1txtbMA.insert(0, self.curChar["ma"])
tab1txtbVI.delete(0, 5)
tab1txtbVI.insert(0, self.curChar["vi"])
tab1txtbAG.delete(0, 5)
tab1txtbAG.insert(0, self.curChar["ag"])
tab1txtbCMD1.delete(0, 5)
tab1txtbCMD1.insert(0, self.curChar["cmd1"])
tab1txtbCMD2.delete(0, 5)
tab1txtbCMD2.insert(0, self.curChar["cmd2"])
tab1txtbCMD3.delete(0, 5)
tab1txtbCMD3.insert(0, self.curChar["cmd3"])
tab1txtbPAS1.delete(0, 5)
tab1txtbPAS1.insert(0, self.curChar["pas1"])
tab1txtbPAS2.delete(0, 5)
tab1txtbPAS2.insert(0, self.curChar["pas2"])
tab1txtbPAS3.delete(0, 5)
tab1txtbPAS3.insert(0, self.curChar["pas3"])
tab1txtbRAC.delete(0, 5)
tab1txtbRAC.insert(0, self.curChar["rac"])
tab1txtbMOV.delete(0, 5)
tab1txtbMOV.insert(0, self.curChar["mov"])
tab1ComboBox.bind("<<ComboboxSelected>>", changeCharacter)
# Labels
tab1LVL = tk.Label(tab1TopLFrame, text="Level:", padx=50)
tab1LVL.grid(column=0, row=1)
tab1EXP = tk.Label(tab1TopLFrame, text="Experience:")
tab1EXP.grid(column=0, row=2)
tab1HP = tk.Label(tab1TopLFrame, text="Health:")
tab1HP.grid(column=0, row=3)
tab1MP = tk.Label(tab1TopLFrame, text="Mana:")
tab1MP.grid(column=0, row=4)
tab1ST = tk.Label(tab1TopLFrame, text="Strength:")
tab1ST.grid(column=0, row=5)
tab1MA = tk.Label(tab1TopLFrame, text="Magic:")
tab1MA.grid(column=0, row=6)
tab1VI = tk.Label(tab1TopLFrame, text="Vitality:")
tab1VI.grid(column=0, row=7)
tab1AG = tk.Label(tab1TopLFrame, text="Agility:")
tab1AG.grid(column=0, row=8)
tab1CMD1 = tk.Label(tab1TopLFrame, text="Command 1:")
tab1CMD1.grid(column=2, row=1)
tab1CMD2 = tk.Label(tab1TopLFrame, text="Command 2:")
tab1CMD2.grid(column=2, row=2)
tab1CMD3 = tk.Label(tab1TopLFrame, text="Command 3:")
tab1CMD3.grid(column=2, row=3)
tab1PAS1 = tk.Label(tab1TopLFrame, text="Passive 1:")
tab1PAS1.grid(column=2, row=4)
tab1PAS2 = tk.Label(tab1TopLFrame, text="Passive 2:")
tab1PAS2.grid(column=2, row=5)
tab1PAS3 = tk.Label(tab1TopLFrame, text="Passive 3:")
tab1PAS3.grid(column=2, row=6)
tab1RAC = tk.Label(tab1TopLFrame, text="Automatic:")
tab1RAC.grid(column=2, row=7)
tab1MOV = tk.Label(tab1TopLFrame, text="Move:")
tab1MOV.grid(column=2, row=8)
# Text Boxes
tab1txtbLVL = tk.Entry(tab1TopLFrame)
tab1txtbLVL.grid(column=1, row=1)
tab1txtbEXP = tk.Entry(tab1TopLFrame)
tab1txtbEXP.grid(column=1, row=2)
tab1txtbHP = tk.Entry(tab1TopLFrame)
tab1txtbHP.grid(column=1, row=3)
tab1txtbMP = tk.Entry(tab1TopLFrame)
tab1txtbMP.grid(column=1, row=4)
tab1txtbST = tk.Entry(tab1TopLFrame)
tab1txtbST.grid(column=1, row=5)
tab1txtbMA = tk.Entry(tab1TopLFrame)
tab1txtbMA.grid(column=1, row=6)
tab1txtbVI = tk.Entry(tab1TopLFrame)
tab1txtbVI.grid(column=1, row=7)
tab1txtbAG = tk.Entry(tab1TopLFrame)
tab1txtbAG.grid(column=1, row=8)
tab1txtbCMD1 = tk.Entry(tab1TopLFrame)
tab1txtbCMD1.grid(column=3, row=1)
tab1txtbCMD2 = tk.Entry(tab1TopLFrame)
tab1txtbCMD2.grid(column=3, row=2)
tab1txtbCMD3 = tk.Entry(tab1TopLFrame)
tab1txtbCMD3.grid(column=3, row=3)
tab1txtbPAS1 = tk.Entry(tab1TopLFrame)
tab1txtbPAS1.grid(column=3, row=4)
tab1txtbPAS2 = tk.Entry(tab1TopLFrame)
tab1txtbPAS2.grid(column=3, row=5)
tab1txtbPAS3 = tk.Entry(tab1TopLFrame)
tab1txtbPAS3.grid(column=3, row=6)
tab1txtbRAC = tk.Entry(tab1TopLFrame)
tab1txtbRAC.grid(column=3, row=7)
tab1txtbMOV = tk.Entry(tab1TopLFrame)
tab1txtbMOV.grid(column=3, row=8)
tab1emptylabel = tk.Label(tab1TopLFrame, text=" ")
tab1emptylabel.grid(column=0, row=9)
# Skill Frame
tab1SkillFrame = tk.Frame(tab1TopLFrame, bd="2", relief="sunken")
tab1SkillFrame.grid(column=0, row=11, columnspan=4)
# Skill Labels
tab1CMD1label = tk.Label(tab1SkillFrame, text="Command")
tab1CMD1label.grid(column=0, row=0)
tab1CMD2label = tk.Label(tab1SkillFrame, text="Passive")
tab1CMD2label.grid(column=1, row=0)
tab1CMD3label = tk.Label(tab1SkillFrame, text="Automatic")
tab1CMD3label.grid(column=2, row=0)
# Listboxes
tab1ListBoxCMD = tk.Listbox(tab1SkillFrame)
for i in range(0, len(CMD_IDS)):
tab1ListBoxCMD.insert(i, " " + str(CMD_IDS[i]) + " - " + str(CMD_SKILLS[CMD_IDS[i]]))
tab1ListBoxCMD.grid(column=0, row=1)
tab1ListBoxPAS = tk.Listbox(tab1SkillFrame)
for i in range(0, len(PAS_IDS)):
tab1ListBoxPAS.insert(i, " " + str(PAS_IDS[i]) + " - " + str(PAS_SKILLS[PAS_IDS[i]]))
tab1ListBoxPAS.grid(column=1, row=1)
tab1ListBoxAUT = tk.Listbox(tab1SkillFrame)
for i in range(0, len(AUTO_IDS)):
tab1ListBoxAUT.insert(i, " " + str(AUTO_IDS[i]) + " - " + str(AUTO_SKILLS[AUTO_IDS[i]]))
tab1ListBoxAUT.grid(column=2, row=1)
# Save Characters Changes
def applyCharChange(*args):
print("\n BEFORE APPLY \n " + str(self.charList))
name = tab1ComboBox.get()
if self.curChar != {}:
def get_key(val):
i = -1
for char_info in self.charList:
i = i + 1
for key, value in char_info.items():
if val == value:
return i, char_info
index, self.cur_char = get_key(name)
# put textbox values in global variable
self.curChar["level"] = tab1txtbLVL.get()
self.curChar["exp"] = tab1txtbEXP.get()
self.curChar["hp"] = tab1txtbHP.get()
self.curChar["mp"] = tab1txtbMP.get()
self.curChar["st"] = tab1txtbST.get()
self.curChar["ma"] = tab1txtbMA.get()
self.curChar["vi"] = tab1txtbVI.get()
self.curChar["ag"] = tab1txtbAG.get()
self.curChar["cmd1"] = tab1txtbCMD1.get()
self.curChar["cmd2"] = tab1txtbCMD2.get()
self.curChar["cmd3"] = tab1txtbCMD3.get()
self.curChar["pas1"] = tab1txtbPAS1.get()
self.curChar["pas2"] = tab1txtbPAS2.get()
self.curChar["pas3"] = tab1txtbPAS3.get()
self.curChar["rac"] = tab1txtbRAC.get()
self.curChar["mov"] = tab1txtbMOV.get()
print("\n AFTER APPLY \n " + str(self.charList))
# put char_info back on list
self.charList[index] = self.curChar
# Bottom Frame
tab1BtmFrame = tk.Frame(tab1Frame, bd="2", relief="sunken")
tab1BtmFrame.grid(column=0, row=2, columnspan=2, sticky="EW", pady="20 0")
tab1BtmFrame.columnconfigure(0, weight=1)
tk.Button(
tab1BtmFrame, text="Apply", command=applyCharChange
).grid(column=0, row=0, sticky="EW")
# Frame for 2nd tab
tab2Frame = tk.Frame(tabFramesFrame, bd="2", relief="sunken", padx="10", pady="10")
self.tab2Frame = tab2Frame
tab2Frame.grid(column=0, row=0, sticky="EW")
# Top inner frame for 2nd tab
tab2TopFrame = tk.Frame(tab2Frame)
tab2TopFrame.grid(column=0, row=0, columnspan=2, sticky="NW")
# Top left inner frame for 2nd tab
tab2TopLFrame = tk.Frame(tab2TopFrame)
tab2TopLFrame.grid(column=0, row=0, sticky="NW")
tab2ComboLabel = tk.Label(tab2TopLFrame, text="Select Demon")
tab2ComboLabel.grid(column=1, row=0)
# 2nd ComboBox
tab2ComboBox = ttk.Combobox(tab2TopLFrame, values=self.demonNameList)
print(self.demonNameList)
tab2ComboBox.grid(column=2, row=0, padx=10, pady=10)
def changeDemon(*args):
index = tab2ComboBox.current()
self.curDemon = self.demonList[index]
print(self.curDemon)
tab2txtbLVL.delete(0, 5)
tab2txtbLVL.insert(0, self.curDemon["level"])
tab2txtbEXP.delete(0, 5)
tab2txtbEXP.insert(0, self.curDemon["exp"])
tab2txtbHP.delete(0, 5)
tab2txtbHP.insert(0, self.curDemon["hp"])
tab2txtbMP.delete(0, 5)
tab2txtbMP.insert(0, self.curDemon["mp"])
tab2txtbST.delete(0, 5)
tab2txtbST.insert(0, self.curDemon["st"])
tab2txtbMA.delete(0, 5)
tab2txtbMA.insert(0, self.curDemon["ma"])
tab2txtbVI.delete(0, 5)
tab2txtbVI.insert(0, self.curDemon["vi"])
tab2txtbAG.delete(0, 5)
tab2txtbAG.insert(0, self.curDemon["ag"])
tab2txtbCMD1.delete(0, 5)
tab2txtbCMD1.insert(0, self.curDemon["cmd1"])
tab2txtbCMD2.delete(0, 5)
tab2txtbCMD2.insert(0, self.curDemon["cmd2"])
tab2txtbCMD3.delete(0, 5)
tab2txtbCMD3.insert(0, self.curDemon["cmd3"])
tab2txtbPAS1.delete(0, 5)
tab2txtbPAS1.insert(0, self.curDemon["pas1"])
tab2txtbPAS2.delete(0, 5)
tab2txtbPAS2.insert(0, self.curDemon["pas2"])
tab2txtbPAS3.delete(0, 5)
tab2txtbPAS3.insert(0, self.curDemon["pas3"])
tab2txtbRAC.delete(0, 5)
tab2txtbRAC.insert(0, self.curDemon["rac"])
tab2txtbID.delete(0, 5)
tab2txtbID.insert(0, self.curDemon["id"])
tab2ComboBox.bind("<<ComboboxSelected>>", changeDemon)
# Labels
tab2LVL = tk.Label(tab2TopLFrame, text="Level:", padx=50)
tab2LVL.grid(column=0, row=1)
tab2EXP = tk.Label(tab2TopLFrame, text="Experience:")
tab2EXP.grid(column=0, row=2)
tab2HP = tk.Label(tab2TopLFrame, text="Health:")
tab2HP.grid(column=0, row=3)
tab2MP = tk.Label(tab2TopLFrame, text="Mana:")
tab2MP.grid(column=0, row=4)
tab2ST = tk.Label(tab2TopLFrame, text="Strength:")
tab2ST.grid(column=0, row=5)
tab2MA = tk.Label(tab2TopLFrame, text="Magic:")
tab2MA.grid(column=0, row=6)
tab2VI = tk.Label(tab2TopLFrame, text="Vitality:")
tab2VI.grid(column=0, row=7)
tab2AG = tk.Label(tab2TopLFrame, text="Agility:")
tab2AG.grid(column=0, row=8)
tab2CMD1 = tk.Label(tab2TopLFrame, text="Command 1:")
tab2CMD1.grid(column=2, row=1)
tab2CMD2 = tk.Label(tab2TopLFrame, text="Command 2:")
tab2CMD2.grid(column=2, row=2)
tab2CMD3 = tk.Label(tab2TopLFrame, text="Command 3:")
tab2CMD3.grid(column=2, row=3)
tab2PAS1 = tk.Label(tab2TopLFrame, text="Passive 1:")
tab2PAS1.grid(column=2, row=4)
tab2PAS2 = tk.Label(tab2TopLFrame, text="Passive 2:")
tab2PAS2.grid(column=2, row=5)
tab2PAS3 = tk.Label(tab2TopLFrame, text="Passive 3:")
tab2PAS3.grid(column=2, row=6)
tab2RAC = tk.Label(tab2TopLFrame, text="Racial:")
tab2RAC.grid(column=2, row=7)
tab2ID = tk.Label(tab2TopLFrame, text="Id:")
tab2ID.grid(column=2, row=8)
# Text Boxes
tab2txtbLVL = tk.Entry(tab2TopLFrame)
tab2txtbLVL.grid(column=1, row=1)
tab2txtbEXP = tk.Entry(tab2TopLFrame)
tab2txtbEXP.grid(column=1, row=2)
tab2txtbHP = tk.Entry(tab2TopLFrame)
tab2txtbHP.grid(column=1, row=3)
tab2txtbMP = tk.Entry(tab2TopLFrame)
tab2txtbMP.grid(column=1, row=4)
tab2txtbST = tk.Entry(tab2TopLFrame)
tab2txtbST.grid(column=1, row=5)
tab2txtbMA = tk.Entry(tab2TopLFrame)
tab2txtbMA.grid(column=1, row=6)
tab2txtbVI = tk.Entry(tab2TopLFrame)
tab2txtbVI.grid(column=1, row=7)
tab2txtbAG = tk.Entry(tab2TopLFrame)
tab2txtbAG.grid(column=1, row=8)
tab2txtbCMD1 = tk.Entry(tab2TopLFrame)
tab2txtbCMD1.grid(column=3, row=1)
tab2txtbCMD2 = tk.Entry(tab2TopLFrame)
tab2txtbCMD2.grid(column=3, row=2)
tab2txtbCMD3 = tk.Entry(tab2TopLFrame)
tab2txtbCMD3.grid(column=3, row=3)
tab2txtbPAS1 = tk.Entry(tab2TopLFrame)
tab2txtbPAS1.grid(column=3, row=4)
tab2txtbPAS2 = tk.Entry(tab2TopLFrame)
tab2txtbPAS2.grid(column=3, row=5)
tab2txtbPAS3 = tk.Entry(tab2TopLFrame)
tab2txtbPAS3.grid(column=3, row=6)
tab2txtbRAC = tk.Entry(tab2TopLFrame)
tab2txtbRAC.grid(column=3, row=7)
tab2txtbID = tk.Entry(tab2TopLFrame)
tab2txtbID.grid(column=3, row=8)
tab2emptylabel = tk.Label(tab2TopLFrame, text=" ")
tab2emptylabel.grid(column=0, row=9)
# Skill Frame
tab2SkillFrame = tk.Frame(tab2TopLFrame, bd="2", relief="sunken")
tab2SkillFrame.grid(column=0, row=11, columnspan=4)
# Skill Labels
tab2CMD1label = tk.Label(tab2SkillFrame, text="Command")
tab2CMD1label.grid(column=0, row=0)
tab2CMD2label = tk.Label(tab2SkillFrame, text="Passive")
tab2CMD2label.grid(column=1, row=0)
tab2CMD3label = tk.Label(tab2SkillFrame, text="Racial")
tab2CMD3label.grid(column=2, row=0)
tab2IDlabel = tk.Label(tab2SkillFrame, text="Demon ID")
tab2IDlabel.grid(column=3, row=0)
# Listboxes
tab2ListBoxCMD = tk.Listbox(tab2SkillFrame)
for i in range(0, len(CMD_IDS)):
tab2ListBoxCMD.insert(i, " " + str(CMD_IDS[i]) + " - " + str(CMD_SKILLS[CMD_IDS[i]]))
tab2ListBoxCMD.grid(column=0, row=1)
tab2ListBoxPAS = tk.Listbox(tab2SkillFrame)
for i in range(0, len(PAS_IDS)):
tab2ListBoxPAS.insert(i, " " + str(PAS_IDS[i]) + " - " + str(PAS_SKILLS[PAS_IDS[i]]))
tab2ListBoxPAS.grid(column=1, row=1)
tab2ListBoxRAC = tk.Listbox(tab2SkillFrame)
for i in range(0, len(RAC_IDS)):
tab2ListBoxRAC.insert(i, " " + str(RAC_IDS[i]) + " - " + str(RAC_SKILLS[RAC_IDS[i]]))
tab2ListBoxRAC.grid(column=2, row=1)
tab2ListBoxID = tk.Listbox(tab2SkillFrame, width=23)
for i in range(0, len(DEMON_IDS)):
tab2ListBoxID.insert(i, " " + str(DEMON_IDS[i]) + " - " + str(ALL_DEMONS[DEMON_IDS[i]]))
tab2ListBoxID.grid(column=3, row=1)
# Save Characters Changes
def applyDemonChange(*args):
print("\n BEFORE APPLY \n " + str(self.demonList))
if self.curDemon != {}:
index = tab2ComboBox.current()
self.curDemon = self.demonList[index]
# put textbox values in global variable
self.curDemon["level"] = tab2txtbLVL.get()
self.curDemon["exp"] = tab2txtbEXP.get()
self.curDemon["hp"] = tab2txtbHP.get()
self.curDemon["mp"] = tab2txtbMP.get()
self.curDemon["st"] = tab2txtbST.get()
self.curDemon["ma"] = tab2txtbMA.get()
self.curDemon["vi"] = tab2txtbVI.get()
self.curDemon["ag"] = tab2txtbAG.get()
self.curDemon["cmd1"] = tab2txtbCMD1.get()
self.curDemon["cmd2"] = tab2txtbCMD2.get()
self.curDemon["cmd3"] = tab2txtbCMD3.get()
self.curDemon["pas1"] = tab2txtbPAS1.get()
self.curDemon["pas2"] = tab2txtbPAS2.get()
self.curDemon["pas3"] = tab2txtbPAS3.get()
self.curDemon["rac"] = tab2txtbRAC.get()
self.curDemon["id"] = tab2txtbID.get()
print("\n AFTER APPLY \n " + str(self.demonList))
# put char_info back on list
self.demonList[index] = self.curDemon
# Bottom Frame
tab2BtmFrame = tk.Frame(tab2Frame, bd="2", relief="sunken")
tab2BtmFrame.grid(column=0, row=2, columnspan=2, sticky="EW", pady="20 0")
tab2BtmFrame.columnconfigure(0, weight=1)
tk.Button(
tab2BtmFrame, text="Apply", command=applyDemonChange
).grid(column=0, row=0, sticky="EW")
# Hide the other tabs, only show first tab
self.tabShown = self.tab1Frame
self.tabButton = self.tab1Button
self.tab2Frame.grid_remove()
def validate_int(self, action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
if action == "0":
return True
try:
int(value_if_allowed)
return True
except ValueError:
return False
def processSaveFile(self):
with open(self.saveFilePath, 'rb') as fh:
self.save_bytes = bytearray(fh.read())
if self.save_bytes is not None:
# For 1st Tab (Main Character)
del self.charList[:]
for x in range(0, 13):
c_start_add = int(CHAR_OFFSET, 16) * x
c_id_add = c_start_add + int(CHAR_ID[0], 16)
char_id = self.getHexStr(self.save_bytes, c_id_add, CHAR_ID[1], add_is_dec=True)
print(char_id)
if char_id in ALL_CHARS:
c_id_add = c_start_add + int(CHAR_ID[0], 16)
c_lvl_add = c_start_add + int(CHAR_LVL[0], 16)
c_exp_add = c_start_add + int(CHAR_EXP[0], 16)
c_hp_add = c_start_add + int(CHAR_HP[0], 16)
c_mp_add = c_start_add + int(CHAR_MP[0], 16)
c_st_add = c_start_add + int(CHAR_ST[0], 16)
c_ma_add = c_start_add + int(CHAR_MA[0], 16)
c_vi_add = c_start_add + int(CHAR_VI[0], 16)
c_ag_add = c_start_add + int(CHAR_AG[0], 16)
c_cmd1_add = c_start_add + int(CHAR_CMD1[0], 16)
c_cmd2_add = c_start_add + int(CHAR_CMD2[0], 16)
c_cmd3_add = c_start_add + int(CHAR_CMD3[0], 16)
c_pas1_add = c_start_add + int(CHAR_PAS1[0], 16)
c_pas2_add = c_start_add + int(CHAR_PAS2[0], 16)
c_pas3_add = c_start_add + int(CHAR_PAS3[0], 16)
c_rac_add = c_start_add + int(CHAR_RAC[0], 16)
c_mov_add = c_start_add + int(CHAR_MOV[0], 16)
char_info = ALL_CHARS[char_id]
c_info = {
"start_add": c_start_add,
"name": char_info,
"id": int(self.getHexStr(self.save_bytes, c_id_add, CHAR_ID[1], add_is_dec=True), 16),
"level": int(self.getHexStr(self.save_bytes, c_lvl_add, CHAR_LVL[1], add_is_dec=True), 16),
"exp": int(self.getHexStr(self.save_bytes, c_exp_add, CHAR_EXP[1], add_is_dec=True), 16),
"hp": int(self.getHexStr(self.save_bytes, c_hp_add, CHAR_HP[1], add_is_dec=True), 16),
"mp": int(self.getHexStr(self.save_bytes, c_mp_add, CHAR_MP[1], add_is_dec=True), 16),
"st": int(self.getHexStr(self.save_bytes, c_st_add, CHAR_ST[1], add_is_dec=True), 16),
"ma": int(self.getHexStr(self.save_bytes, c_ma_add, CHAR_MA[1], add_is_dec=True), 16),
"vi": int(self.getHexStr(self.save_bytes, c_vi_add, CHAR_VI[1], add_is_dec=True), 16),
"ag": int(self.getHexStr(self.save_bytes, c_ag_add, CHAR_AG[1], add_is_dec=True), 16),
"cmd1": int(self.getHexStr(self.save_bytes, c_cmd1_add, CHAR_CMD1[1], add_is_dec=True), 16),
"cmd2": int(self.getHexStr(self.save_bytes, c_cmd2_add, CHAR_CMD2[1], add_is_dec=True), 16),
"cmd3": int(self.getHexStr(self.save_bytes, c_cmd3_add, CHAR_CMD3[1], add_is_dec=True), 16),
"pas1": int(self.getHexStr(self.save_bytes, c_pas1_add, CHAR_PAS1[1], add_is_dec=True), 16),
"pas2": int(self.getHexStr(self.save_bytes, c_pas2_add, CHAR_PAS2[1], add_is_dec=True), 16),
"pas3": int(self.getHexStr(self.save_bytes, c_pas3_add, CHAR_PAS3[1], add_is_dec=True), 16),
"rac": int(self.getHexStr(self.save_bytes, c_rac_add, CHAR_RAC[1], add_is_dec=True), 16),
"mov": int(self.getHexStr(self.save_bytes, c_mov_add, CHAR_MOV[1], add_is_dec=True), 16),
}
self.charList.append(c_info)
self.charNameList.append(char_info)
print("Start Address: %x, Char ID: %s, Name: %s." % (c_info["start_add"], char_id, char_info))
# For 2nd Tab (Demons)
del self.demonList[:]
for x in range(0, DE_NUM_MAX):
d_start_add = int(DE_OFFSET, 16) * x
d_id_add = d_start_add + int(DE_ID[0], 16)
demon_id = self.getHexStr(self.save_bytes, d_id_add, DE_ID[1], add_is_dec=True)
if True:
d_id_add = d_start_add + int(DE_ID[0], 16)
d_lvl_add = d_start_add + int(DE_LVL[0], 16)
d_exp_add = d_start_add + int(DE_EXP[0], 16)
d_hp_add = d_start_add + int(DE_HP[0], 16)
d_mp_add = d_start_add + int(DE_MP[0], 16)
d_st_add = d_start_add + int(DE_ST[0], 16)
d_ma_add = d_start_add + int(DE_MA[0], 16)
d_vi_add = d_start_add + int(DE_VI[0], 16)
d_ag_add = d_start_add + int(DE_AG[0], 16)
d_cmd1_add = d_start_add + int(DE_CMD1[0], 16)
d_cmd2_add = d_start_add + int(DE_CMD2[0], 16)
d_cmd3_add = d_start_add + int(DE_CMD3[0], 16)
d_pas1_add = d_start_add + int(DE_PAS1[0], 16)
d_pas2_add = d_start_add + int(DE_PAS2[0], 16)
d_pas3_add = d_start_add + int(DE_PAS3[0], 16)
d_rac_add = d_start_add + int(DE_RAC[0], 16)
demon_id = int(self.getHexStr(self.save_bytes, d_id_add, DE_ID[1], add_is_dec=True), 16)
print(demon_id)
demon_info = ALL_DEMONS[str(demon_id)]
d_info = {
"start_add": d_start_add,
"name": demon_info,
"id": int(self.getHexStr(self.save_bytes, d_id_add, DE_ID[1], add_is_dec=True), 16),
"level": int(self.getHexStr(self.save_bytes, d_lvl_add, DE_LVL[1], add_is_dec=True), 16),
"exp": int(self.getHexStr(self.save_bytes, d_exp_add, DE_EXP[1], add_is_dec=True), 16),
"hp": int(self.getHexStr(self.save_bytes, d_hp_add, DE_HP[1], add_is_dec=True), 16),
"mp": int(self.getHexStr(self.save_bytes, d_mp_add, DE_MP[1], add_is_dec=True), 16),
"st": int(self.getHexStr(self.save_bytes, d_st_add, DE_ST[1], add_is_dec=True), 16),
"ma": int(self.getHexStr(self.save_bytes, d_ma_add, DE_MA[1], add_is_dec=True), 16),
"vi": int(self.getHexStr(self.save_bytes, d_vi_add, DE_VI[1], add_is_dec=True), 16),
"ag": int(self.getHexStr(self.save_bytes, d_ag_add, DE_AG[1], add_is_dec=True), 16),
"cmd1": int(self.getHexStr(self.save_bytes, d_cmd1_add, DE_CMD1[1], add_is_dec=True), 16),
"cmd2": int(self.getHexStr(self.save_bytes, d_cmd2_add, DE_CMD2[1], add_is_dec=True), 16),
"cmd3": int(self.getHexStr(self.save_bytes, d_cmd3_add, DE_CMD3[1], add_is_dec=True), 16),
"pas1": int(self.getHexStr(self.save_bytes, d_pas1_add, DE_PAS1[1], add_is_dec=True), 16),
"pas2": int(self.getHexStr(self.save_bytes, d_pas2_add, DE_PAS2[1], add_is_dec=True), 16),
"pas3": int(self.getHexStr(self.save_bytes, d_pas3_add, DE_PAS3[1], add_is_dec=True), 16),
"rac": int(self.getHexStr(self.save_bytes, d_rac_add, DE_RAC[1], add_is_dec=True), 16),
}
self.demonList.append(d_info)
self.demonNameList.append(demon_info)
print("Start Address: %x, Demon ID: %s." % (d_start_add, demon_id))
def changeTab(self, tab_to_show, tab_button):
if self.tabShown is tab_to_show:
return
self.tabShown.grid_remove()
tab_to_show.grid()
self.tabButton.config(state="normal", relief="raised")
tab_button.config(state="disabled", relief="sunken")
self.tabButton = tab_button
self.tabShown = tab_to_show
def writeHexBytes(self, byte_arr, hex_str, start_add, num_bytes, skip_bytes=None, add_is_dec=False):
hex_str = hex_str.zfill(num_bytes * 2)
hex_bytes = [hex_str[i:i + 2] for i in range(0, len(hex_str), 2)]
hex_bytes.reverse()
if add_is_dec:
curr_add = start_add
else:
curr_add = int(start_add, 16)
if skip_bytes:
curr_add += skip_bytes
for val in hex_bytes:
# print("old: %d, new: %d" % (byte_arr[curr_add], int(val, 16)))
byte_arr[curr_add] = int(val, 16)
curr_add += 1
def getHexStr(self, byte_arr, start_add, num_bytes, skip_bytes=None, add_is_dec=False):
hex_str = ""
if add_is_dec:
curr_add = start_add
else:
curr_add = int(start_add, 16)
if skip_bytes:
curr_add += skip_bytes
while num_bytes > 0:
hex_str = format(byte_arr[curr_add], '02x') + hex_str
num_bytes -= 1
curr_add += 1
hex_str = hex_str.lstrip("0")
return hex_str if hex_str else "0"
# Menu Functions
def openFileChooser(self):
sel_file = filedialog.askopenfilenames(parent=self, initialdir=os.path.dirname(os.path.realpath(sys.argv[0])),
filetypes=(("Save files", "*.dat"), ("All files", "*.*")))
if sel_file:
sel_file = sel_file[0]
# print(sel_file)
if os.path.isfile(sel_file):
self.saveFilePathTxt.set(sel_file)
self.saveFilePath = sel_file
self.saveFileDir = os.path.dirname(sel_file)
self.saveFileName = os.path.basename(sel_file)
print(self.saveFileName)
self.processSaveFile()
self.createWidgets()
def saveCharChanges(self):
if self.save_bytes:
for char in self.charList:
# Level
tmp_val = format(int(char["level"]), "x")
c_lvl_write = char["start_add"] + int(CHAR_LVL[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_lvl_write, CHAR_LVL[1], add_is_dec=True)
# EXP
tmp_val = format(int(char["exp"]), "x")
c_exp_write = char["start_add"] + int(CHAR_EXP[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_exp_write, CHAR_EXP[1], add_is_dec=True)
# hp
tmp_val = format(int(char["hp"]), "x")
c_hp_write = char["start_add"] + int(CHAR_HP[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_hp_write, CHAR_HP[1], add_is_dec=True)
# MP
tmp_val = format(int(char["mp"]), "x")
c_mp_write = char["start_add"] + int(CHAR_MP[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_mp_write, CHAR_MP[1], add_is_dec=True)
# ST
tmp_val = format(int(char["st"]), "x")
c_st_write = char["start_add"] + int(CHAR_ST[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_st_write, CHAR_ST[1], add_is_dec=True)
# MA
tmp_val = format(int(char["ma"]), "x")
c_ma_write = char["start_add"] + int(CHAR_MA[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_ma_write, CHAR_MA[1], add_is_dec=True)
# VI
tmp_val = format(int(char["vi"]), "x")
c_vi_write = char["start_add"] + int(CHAR_VI[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_vi_write, CHAR_VI[1], add_is_dec=True)
# AG
tmp_val = format(int(char["ag"]), "x")
c_ag_write = char["start_add"] + int(CHAR_AG[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_ag_write, CHAR_AG[1], add_is_dec=True)
# CMD1
tmp_val = format(int(char["cmd1"]), "x")
c_cmd1_write = char["start_add"] + int(CHAR_CMD1[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_cmd1_write, CHAR_CMD1[1], add_is_dec=True)
# CMD2
tmp_val = format(int(char["cmd2"]), "x")
c_cmd2_write = char["start_add"] + int(CHAR_CMD2[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_cmd2_write, CHAR_CMD2[1], add_is_dec=True)
# CMD3
tmp_val = format(int(char["cmd3"]), "x")
c_cmd3_write = char["start_add"] + int(CHAR_CMD3[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_cmd3_write, CHAR_CMD3[1], add_is_dec=True)
# PAS1
tmp_val = format(int(char["pas1"]), "x")
c_pas1_write = char["start_add"] + int(CHAR_PAS1[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_pas1_write, CHAR_PAS1[1], add_is_dec=True)
# PAS2
tmp_val = format(int(char["pas2"]), "x")
c_pas2_write = char["start_add"] + int(CHAR_PAS2[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_pas2_write, CHAR_PAS2[1], add_is_dec=True)
# PAS3
tmp_val = format(int(char["pas3"]), "x")
c_pas3_write = char["start_add"] + int(CHAR_PAS3[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_pas3_write, CHAR_PAS3[1], add_is_dec=True)
# RAC
tmp_val = format(int(char["rac"]), "x")
c_rac_write = char["start_add"] + int(CHAR_RAC[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_rac_write, CHAR_RAC[1], add_is_dec=True)
# MOV
tmp_val = format(int(char["mov"]), "x")
c_mov_write = char["start_add"] + int(CHAR_MOV[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, c_mov_write, CHAR_MOV[1], add_is_dec=True)
return
def saveDemonChanges(self):
if self.save_bytes:
for demon in self.demonList:
# ID
tmp_val = format(int(demon["id"]), "x")
d_id_write = demon["start_add"] + int(DE_ID[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_id_write, DE_ID[1], add_is_dec=True)
# Level
tmp_val = format(int(demon["level"]), "x")
d_lvl_write = demon["start_add"] + int(DE_LVL[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_lvl_write, DE_LVL[1], add_is_dec=True)
# EXP
tmp_val = format(int(demon["exp"]), "x")
d_exp_write = demon["start_add"] + int(DE_EXP[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_exp_write, DE_EXP[1], add_is_dec=True)
# HP
tmp_val = format(int(demon["hp"]), "x")
d_hp_write = demon["start_add"] + int(DE_HP[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_hp_write, DE_HP[1], add_is_dec=True)
# MP
tmp_val = format(int(demon["mp"]), "x")
d_mp_write = demon["start_add"] + int(DE_MP[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_mp_write, DE_MP[1], add_is_dec=True)
# ST
tmp_val = format(int(demon["st"]), "x")
d_st_write = demon["start_add"] + int(DE_ST[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_st_write, DE_ST[1], add_is_dec=True)
# MA
tmp_val = format(int(demon["ma"]), "x")
d_ma_write = demon["start_add"] + int(DE_MA[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_ma_write, DE_MA[1], add_is_dec=True)
# VI
tmp_val = format(int(demon["vi"]), "x")
d_vi_write = demon["start_add"] + int(DE_VI[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_vi_write, DE_VI[1], add_is_dec=True)
# AG
tmp_val = format(int(demon["ag"]), "x")
d_ag_write = demon["start_add"] + int(DE_AG[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_ag_write, DE_AG[1], add_is_dec=True)
# CMD1
tmp_val = format(int(demon["cmd1"]), "x")
d_cmd1_write = demon["start_add"] + int(DE_CMD1[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_cmd1_write, DE_CMD1[1], add_is_dec=True)
# CMD2
tmp_val = format(int(demon["cmd2"]), "x")
d_cmd2_write = demon["start_add"] + int(DE_CMD2[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_cmd2_write, DE_CMD2[1], add_is_dec=True)
# CMD3
tmp_val = format(int(demon["cmd3"]), "x")
d_cmd3_write = demon["start_add"] + int(DE_CMD3[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_cmd3_write, DE_CMD3[1], add_is_dec=True)
# PAS1
tmp_val = format(int(demon["pas1"]), "x")
d_pas1_write = demon["start_add"] + int(DE_PAS1[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_pas1_write, DE_PAS1[1], add_is_dec=True)
# PAS2
tmp_val = format(int(demon["pas2"]), "x")
d_pas2_write = demon["start_add"] + int(DE_PAS2[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_pas2_write, DE_PAS2[1], add_is_dec=True)
# PAS3
tmp_val = format(int(demon["pas3"]), "x")
d_pas3_write = demon["start_add"] + int(DE_PAS3[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_pas3_write, DE_PAS3[1], add_is_dec=True)
# RAC
tmp_val = format(int(demon["rac"]), "x")
d_rac_write = demon["start_add"] + int(DE_RAC[0], 16)
self.writeHexBytes(self.save_bytes, tmp_val, d_rac_write, DE_RAC[1], add_is_dec=True)
return
def saveChanges(self):
if self.saveFilePath and os.path.isdir(self.saveFileDir):
self.saveCharChanges()
self.saveDemonChanges()
edited_dir = os.path.join(self.saveFileDir, "Edited")
if not os.path.isdir(edited_dir):
os.mkdir(edited_dir)
with open(os.path.join(edited_dir, self.saveFileName), 'wb') as fh:
fh.write(self.save_bytes)
def exitApp(self):
self.quit()
def aboutCreator(self):
tk.messagebox.showinfo("About This", "Made by XxArcaiCxX" +
"\n\nCredits to:" +
"\nwaynelimt (GitHub) - SMT IV Save Editor from which this Editor was adapted from")
def help(self):
tk.messagebox.showinfo("Help", "Just don't be stupid lol")
if __name__ == "__main__":
app = mytestapp(None)
app.mainloop()
| 38.255627
| 118
| 0.525867
| 8,896
| 71,385
| 4.101731
| 0.184353
| 0.028502
| 0.026364
| 0.016772
| 0.317986
| 0.261476
| 0.24084
| 0.190879
| 0.183836
| 0.175505
| 0
| 0.110351
| 0.285802
| 71,385
| 1,865
| 119
| 38.276139
| 0.605359
| 0.021139
| 0
| 0.062983
| 0
| 0
| 0.22733
| 0
| 0
| 0
| 0.002307
| 0
| 0
| 1
| 0.013072
| false
| 0.004753
| 0.003565
| 0
| 0.022579
| 0.008318
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a900d8dec7fd37ab4adca645a03f1689e7145bd6
| 6,692
|
py
|
Python
|
tutorials/examples/interp_plot.py
|
ReynLieu/tf-pwa
|
f354b5036bc8c37ffba95849de5ec3367934eef8
|
[
"MIT"
] | 4
|
2021-05-10T15:17:24.000Z
|
2021-08-16T07:40:06.000Z
|
tutorials/examples/interp_plot.py
|
ReynLieu/tf-pwa
|
f354b5036bc8c37ffba95849de5ec3367934eef8
|
[
"MIT"
] | 45
|
2020-10-24T08:26:19.000Z
|
2022-03-20T06:14:58.000Z
|
tutorials/examples/interp_plot.py
|
ReynLieu/tf-pwa
|
f354b5036bc8c37ffba95849de5ec3367934eef8
|
[
"MIT"
] | 8
|
2020-10-24T06:41:06.000Z
|
2022-01-03T01:29:49.000Z
|
import json
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
import yaml
from mpl_toolkits.mplot3d.axes3d import Axes3D
from scipy.interpolate import interp1d
from tf_pwa.config_loader import ConfigLoader
from tf_pwa.experimental.extra_amp import spline_matrix
# import mplhep
# plt.style.use(mplhep.style.LHCb)
def vialid_name(s):
return s.replace("+", ".")
def polar_err(r, phi, r_e, phi_e):
"""polar errors for r and phi"""
# print(r, phi, r_e, phi_e)
dxdr = np.cos(phi)
dxdphi = r * np.sin(phi)
dydr = np.sin(phi)
dydphi = -r * np.cos(phi)
x_e = np.sqrt((dxdr * r_e) ** 2 + (dxdphi * phi_e) ** 2)
y_e = np.sqrt((dydr * r_e) ** 2 + (dydphi * phi_e) ** 2)
# print(x_e, y_e)
return x_e, y_e
def dalitz_weight(s12, m0, m1, m2, m3):
"""phase space weight in dalitz plot"""
m12 = np.sqrt(s12)
m12 = np.where(m12 > (m1 + m2), m12, m1 + m2)
m12 = np.where(m12 < (m0 - m3), m12, m0 - m3)
# if(mz < (m_d+m_pi)) return 0;
# if(mz > (m_b-m_pi)) return 0;
E2st = 0.5 * (m12 * m12 - m1 * m1 + m2 * m2) / m12
E3st = 0.5 * (m0 * m0 - m12 * m12 - m3 * m3) / m12
p2st2 = E2st * E2st - m2 * m2
p3st2 = E3st * E3st - m3 * m3
p2st = np.sqrt(np.where(p2st2 > 0, p2st2, 0))
p3st = np.sqrt(np.where(p3st2 > 0, p3st2, 0))
return p2st * p3st
def load_params(
config_file="config.yml", params="final_params.json", res="li(1+)S"
):
with open(params) as f:
final_params = json.load(f)
val = final_params["value"]
err = final_params["error"]
with open(config_file) as f:
config = yaml.safe_load(f)
xi = config["particle"][res].get("points", None)
if xi is None:
m_max = config["particle"][res].get("m_max", None)
m_min = config["particle"][res].get("m_min", None)
N = config["particle"][res].get("interp_N", None)
dx = (m_max - m_min) / (N - 1)
xi = [m_min + dx * i for i in range(N)]
N = len(xi)
head = "{}_point".format(vialid_name(res))
r = np.array(
[0] + [val["{}_{}r".format(head, i)] for i in range(N - 2)] + [0]
)
phi = np.array(
[0] + [val["{}_{}i".format(head, i)] for i in range(N - 2)] + [0]
)
r_e = np.array(
[0, 0]
+ [
err.get("{}_{}r".format(head, i), r[i] * 0.1)
for i in range(1, N - 2)
]
+ [0]
)
phi_e = np.array(
[0, 0]
+ [
err.get("{}_{}i".format(head, i), phi[i] * 0.1)
for i in range(1, N - 2)
]
+ [0]
)
return np.array(xi), r, phi, r_e, phi_e
def trans_r2xy(r, phi, r_e, phi_e):
"""r,phi -> x,y """
x = np.array(r) * np.cos(phi)
y = np.array(r) * np.sin(phi)
err = np.array(
[polar_err(i, j, k, l) for i, j, k, l in zip(r, phi, r_e, phi_e)]
)
return x, y, err[:, 0], err[:, 1]
def plot_x_y(name, x, y, x_i, y_i, xlabel, ylabel, ylim=(None, None)):
"""plot x vs y"""
plt.clf()
plt.plot(x, y)
plt.scatter(x_i, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ylim(ylim)
plt.savefig(name)
def plot_phi(name, m, phi, m_i, phi_i):
""" plot phi and gradient of phi"""
grad = phi[2:] - phi[:-2]
mask = (phi < 3) & (phi > -3)
grad_max = np.mean(np.abs(grad))
# grad_max = np.max(grad[mask[1:-1]])
(idx,) = signal.argrelextrema(grad, np.less)
plt.clf()
# plt.plot(m, pq/np.max(pq))# np.sqrt(x_new**2+y_new**2)**2)
plt.plot(m[1:-1], grad / grad_max, label="$\\Delta \\phi$ ")
plt.plot(m, phi, label="$\\phi$") # np.sqrt(x_new**2+y_new**2)**2)
m_delta = m[idx + 1]
print("min Delta phi in mass:", m_delta)
plt.scatter(m_delta, [-np.pi] * len(m_delta))
plt.scatter(m_i, phi_i, label="points")
plt.xlabel("mass")
plt.ylabel("$\\phi$")
plt.ylim((-np.pi, np.pi))
plt.legend()
plt.savefig(name)
def plot_x_y_err(name, x, y, x_e, y_e):
"""plot eror bar of x y"""
plt.clf()
plt.errorbar(x, y, xerr=x_e, yerr=y_e)
plt.xlabel("real R(m)")
plt.ylabel("imag R(m)")
plt.savefig(name)
def plot3d_m_x_y(name, m, x, y):
fig = plt.figure()
axes3d = Axes3D(fig)
axes3d.plot(m, x, y)
axes3d.set_xlabel("m")
axes3d.set_ylabel("real R(m)")
axes3d.set_zlabel("imag R(m)")
def update(frame):
axes3d.view_init(elev=30, azim=frame)
return None
anim = animation.FuncAnimation(
fig, update, interval=10, frames=range(0, 360, 10)
)
anim.save(name, writer="imagemagick")
def plot_all(
res="MI(1+)S",
config_file="config.yml",
params="final_params.json",
prefix="figure/",
):
"""plot all figure"""
config = ConfigLoader(config_file)
config.set_params(params)
particle = config.get_decay().get_particle(res)
mi, r, phi_i, r_e, phi_e = load_params(config_file, params, res)
x, y, x_e, y_e = trans_r2xy(r, phi_i, r_e, phi_e)
m = np.linspace(mi[0], mi[-1], 1000)
M_Kpm = 0.49368
M_Dpm = 1.86961
M_Dstar0 = 2.00685
M_Bpm = 5.27926
# x_new = interp1d(xi, x, "cubic")(m)
# y_new = interp1d(xi, y, "cubic")(m)
rm_new = particle.interp(m).numpy()
x_new, y_new = rm_new.real, rm_new.imag
pq = dalitz_weight(m * m, M_Bpm, M_Dstar0, M_Dpm, M_Kpm)
pq_i = dalitz_weight(mi * mi, M_Bpm, M_Dstar0, M_Dpm, M_Kpm)
phi = np.arctan2(y_new, x_new)
r2 = x_new * x_new + y_new * y_new
plot_phi(f"{prefix}phi.png", m, phi, mi, np.arctan2(y, x))
plot_x_y(
f"{prefix}r2.png",
m,
r2,
mi,
r * r,
"mass",
"$|R(m)|^2$",
ylim=(0, None),
)
plot_x_y(f"{prefix}x_y.png", x_new, y_new, x, y, "real R(m)", "imag R(m)")
plot_x_y_err(
f"{prefix}x_y_err.png", x[1:-1], y[1:-1], x_e[1:-1], y_e[1:-1]
)
plot_x_y(
f"{prefix}r2_pq.png",
m,
r2 * pq,
mi,
r * r * pq_i,
"mass",
"$|R(m)|^2 p \cdot q$",
ylim=(0, None),
)
plot3d_m_x_y(f"{prefix}m_r.gif", m, x_new, y_new)
def main():
import argparse
parser = argparse.ArgumentParser(description="plot interpolation")
parser.add_argument("particle", type=str)
parser.add_argument("-c", "--config", default="config.yml", dest="config")
parser.add_argument(
"-i", "--params", default="final_params.json", dest="params"
)
parser.add_argument("-p", "--prefix", default="figure/", dest="prefix")
results = parser.parse_args()
plot_all(results.particle, results.config, results.params, results.prefix)
if __name__ == "__main__":
main()
| 27.539095
| 78
| 0.558876
| 1,127
| 6,692
| 3.157941
| 0.189885
| 0.011801
| 0.009834
| 0.011801
| 0.148356
| 0.111548
| 0.082046
| 0.066873
| 0.033717
| 0.024164
| 0
| 0.042044
| 0.257173
| 6,692
| 242
| 79
| 27.652893
| 0.673909
| 0.074567
| 0
| 0.140541
| 0
| 0
| 0.089829
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064865
| false
| 0
| 0.059459
| 0.005405
| 0.156757
| 0.005405
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a902196e210ce0c9d3fc255989473f3fdb1ab785
| 3,316
|
py
|
Python
|
scripts/val_step_images_pull.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | null | null | null |
scripts/val_step_images_pull.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | 2
|
2017-11-18T00:53:33.000Z
|
2017-11-18T00:53:40.000Z
|
scripts/val_step_images_pull.py
|
neuroailab/curiosity_deprecated
|
65f7cde13b07cdac52eed39535a94e7544c396b8
|
[
"Apache-2.0"
] | null | null | null |
'''
A script for accessing visualization data (saving images at validation steps during training) and saving them to a local directory.
'''
import pymongo as pm
import pickle
import os
import gridfs
import cPickle
import numpy as np
from PIL import Image
dbname = 'future_pred_test'
collname = 'asymmetric'
port = 27017
exp_id = '3_3'
save_loc = '/home/nhaber/really_temp'
save_fn = os.path.join(save_loc, exp_id + '.p')
target_name = 'valid0'
one_channel_softmax = True
conn = pm.MongoClient(port = 27017)
coll = conn[dbname][collname + '.files']
print('experiments')
print(coll.distinct('exp_id'))
cur = coll.find({'exp_id' : exp_id})
q = {'exp_id' : exp_id, 'validation_results' : {'$exists' : True}}
val_steps = coll.find(q)
val_count = val_steps.count()
print('num val steps so far')
print(val_count)
saved_data = {}
def convert_to_viz(np_arr):
'''I did a silly thing and saved discretized-loss predictions as if they were image predictions.
This recovers and converts to an ok visualization.'''
my_shape = np_arr.shape
num_classes = np_arr.shape[-1]
#I fixed things so that it saves the prediction not converted to 255
if np_arr.dtype == 'float32':
exp_arr = np.exp(np_arr)
else:
exp_arr = np.exp(np_arr.astype('float32') / 255.)
sum_arr = np.sum(exp_arr, axis = -1)
#hack for broadcasting...I don't know broadcasting
softy = (exp_arr.T / sum_arr.T).T
return np.sum((softy * range(num_classes) * 255. / float(num_classes)), axis = -1).astype('uint8')
def convert_to_viz_sharp(np_arr):
'''Similar to the above, but just taking the argmax, hopefully giving a sharper visualization.
'''
num_classes = np_arr.shape[-1]
a_m = np.argmax(np_arr, axis = -1)
return (a_m * 255. / float(num_classes)).astype('uint8')
def sigmoid_it(np_arr):
sigm = 1. / (1. + np.exp( - np_arr))
return (255 * sigm).astype('uint8')
for val_num in range(val_count):
idx = val_steps[val_num]['_id']
fn = coll.find({'item_for' : idx})[0]['filename']
fs = gridfs.GridFS(coll.database, collname)
fh = fs.get_last_version(fn)
saved_data[val_num] = cPickle.loads(fh.read())['validation_results']
fh.close()
exp_dir = os.path.join(save_loc, exp_id)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
for val_num, val_data in saved_data.iteritems():
val_dir = os.path.join(exp_dir, 'val_' + str(val_num))
if not os.path.exists(val_dir):
os.mkdir(val_dir)
for tgt_desc, tgt in val_data[target_name].iteritems():
tgt_images = [arr for step_results in tgt for arr in step_results]
for (instance_num, arr) in enumerate(tgt_images):
instance_dir = os.path.join(val_dir, 'instance_' + str(instance_num))
if not os.path.exists(instance_dir):
os.mkdir(instance_dir)
if len(arr.shape) == 4:
fn = os.path.join(instance_dir, tgt_desc + '_' + str(instance_num) + '.jpeg')
arr = convert_to_viz_sharp(arr)
im = Image.fromarray(arr)
im.save(fn)
#just save in human-readable form if 1-array
elif len(arr.shape) == 1:
fn = os.path.join(instance_dir, tgt_desc + '_' + str(instance_num) + '.txt')
np.savetxt(fn, arr)
else:
assert len(arr.shape) == 3
fn = os.path.join(instance_dir, tgt_desc + '_' + str(instance_num) + '.jpeg')
if one_channel_softmax and 'pred' in tgt_desc:
arr = sigmoid_it(arr)
im = Image.fromarray(arr)
im.save(fn)
| 29.607143
| 131
| 0.701448
| 551
| 3,316
| 4.018149
| 0.317604
| 0.024842
| 0.031617
| 0.02168
| 0.169377
| 0.161698
| 0.110208
| 0.090334
| 0.063234
| 0.063234
| 0
| 0.016834
| 0.158022
| 3,316
| 111
| 132
| 29.873874
| 0.776146
| 0.162545
| 0
| 0.128205
| 0
| 0
| 0.08735
| 0.008699
| 0
| 0
| 0
| 0
| 0.012821
| 1
| 0.038462
| false
| 0
| 0.089744
| 0
| 0.166667
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a90540d0d0a5a9bc45b650e47d3f81668b272c4b
| 338
|
py
|
Python
|
test from collections import defaultdict.py
|
meeve602/nn-network
|
2bc422785b8d7e5fa78d73a218f5ed8d499902e7
|
[
"Apache-2.0"
] | null | null | null |
test from collections import defaultdict.py
|
meeve602/nn-network
|
2bc422785b8d7e5fa78d73a218f5ed8d499902e7
|
[
"Apache-2.0"
] | null | null | null |
test from collections import defaultdict.py
|
meeve602/nn-network
|
2bc422785b8d7e5fa78d73a218f5ed8d499902e7
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
computing_graph = defaultdict(list)#defaultdict(list),会构建一个默认value为list的字典,
"""
for (key, value) in data:
result[key].append(value)
print(result)#defaultdict(<class 'list'>, {'p': [1, 2, 3], 'h': [1, 2, 3]})
"""
n = 'p'
m = [1,2,23]
computing_graph[n].append(m)
print(computing_graph)
| 26
| 76
| 0.659763
| 47
| 338
| 4.680851
| 0.531915
| 0.190909
| 0.027273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034722
| 0.147929
| 338
| 12
| 77
| 28.166667
| 0.729167
| 0.115385
| 0
| 0
| 0
| 0
| 0.006803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a905bc7c157d96b2e4f0eee9148f0267c5d741fe
| 597
|
py
|
Python
|
examples/web-scraper/playground.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
examples/web-scraper/playground.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
examples/web-scraper/playground.py
|
relikd/botlib
|
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from botlib.curl import Curl
from botlib.html2list import HTML2List, MatchGroup
URL = 'https://www.vice.com/en/topic/motherboard'
SOURCE = Curl.get(URL, cache_only=True)
SELECT = '.vice-card__content'
match = MatchGroup({
'url': r'<a href="([^"]*)"',
'title': r'<h3[^>]*><a [^>]*>([\s\S]*?)</a>[\s\S]*?</h3>',
'desc': r'<p[^>]*>([\s\S]*?)</p>',
'wrong-regex': r'<a xref="([\s\S]*?)"',
})
for elem in reversed(HTML2List(SELECT).parse(SOURCE)):
match.set_html(elem)
for k, v in match.to_dict().items():
print(k, '=', v)
print()
break
| 28.428571
| 62
| 0.571189
| 88
| 597
| 3.818182
| 0.590909
| 0.02381
| 0.017857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.155779
| 597
| 20
| 63
| 29.85
| 0.654762
| 0.035176
| 0
| 0
| 0
| 0
| 0.326957
| 0.095652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a906359018ecf72d4a4f117b4a1b82b665b383a6
| 3,912
|
py
|
Python
|
examples/j1j2_2d_exact_4.py
|
vigsterkr/FlowKet
|
0d8f301b5f51a1bab83021f10f65cfb5f2751079
|
[
"MIT"
] | 21
|
2019-11-19T13:59:13.000Z
|
2021-12-03T10:26:30.000Z
|
examples/j1j2_2d_exact_4.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 10
|
2019-11-15T12:07:28.000Z
|
2020-11-07T18:12:18.000Z
|
examples/j1j2_2d_exact_4.py
|
HUJI-Deep/PyKet
|
61238afd3fe1488d35c57d280675f544c559bd01
|
[
"MIT"
] | 11
|
2019-12-09T22:51:17.000Z
|
2021-11-29T22:05:41.000Z
|
from collections import OrderedDict
import itertools
import sys
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from flowket.callbacks import TensorBoard
from flowket.callbacks.exact import default_wave_function_callbacks_factory, ExactObservableCallback
from flowket.operators.j1j2 import J1J2
from flowket.operators import NetketOperatorWrapper
from flowket.machines import ConvNetAutoregressive2D
from flowket.optimization import ExactVariational, VariationalMonteCarlo, loss_for_energy_minimization
from flowket.samplers import FastAutoregressiveSampler
from flowket.optimizers import convert_to_accumulate_gradient_optimizer
import numpy
import netket
def total_spin_netket_operator(hilbert_state_shape):
edge_colors = []
for i in range(numpy.prod(hilbert_state_shape)):
edge_colors.append([i, i, 1])
g = netket.graph.CustomGraph(edge_colors)
hi = netket.hilbert.Spin(s=0.5, graph=g)
sigmaz = [[1, 0], [0, -1]]
sigmax = [[0, 1], [1, 0]]
sigmay = [[0, -1j], [1j, 0]]
interaction = numpy.kron(sigmaz, sigmaz) + numpy.kron(sigmax, sigmax) + numpy.kron(sigmay, sigmay)
bond_operator = [
(interaction).tolist(),
]
bond_color = [1]
return netket.operator.GraphOperator(hi, bondops=bond_operator, bondops_colors=bond_color)
params_grid_config = {
'width': [32],
'depth': [5],
'lr': [5e-3, 1e-3],
'weights_normalization': [False, True]
}
run_index = int(sys.argv[-1].strip())
ks, vs = zip(*params_grid_config.items())
params_options = list(itertools.product(*vs))
chosen_v = params_options[run_index % len(params_options)]
params = dict(zip(ks, chosen_v))
print('Chosen params: %s' % str(params))
hilbert_state_shape = (4, 4)
inputs = Input(shape=hilbert_state_shape, dtype='int8')
convnet = ConvNetAutoregressive2D(inputs, depth=params['depth'], num_of_channels=params['width'],
weights_normalization=params['weights_normalization'])
predictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs
model = Model(inputs=inputs, outputs=predictions)
conditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)
batch_size = 2 ** 12
# For fair comparison with monte carlo eacg epoch see 2 ** 18 sampels
steps_per_epoch = 2 ** 6
true_ground_state_energy = -30.022227800323677
operator = J1J2(hilbert_state_shape=hilbert_state_shape, j2=0.5, pbc=False)
exact_variational = ExactVariational(model, operator, batch_size)
optimizer = Adam(lr=params['lr'], beta_1=0.9, beta_2=0.999)
convert_to_accumulate_gradient_optimizer(
optimizer,
update_params_frequency=exact_variational.num_of_batch_until_full_cycle,
accumulate_sum_or_mean=True)
model.compile(optimizer=optimizer, loss=loss_for_energy_minimization)
model.summary()
total_spin = NetketOperatorWrapper(total_spin_netket_operator(hilbert_state_shape), hilbert_state_shape)
run_name = 'j1j2_4_exact_weights_normalization_%s_depth_%s_width_%s_adam_lr_%s_run_%s' % \
(params['weights_normalization'], params['depth'], params['width'], params['lr'], run_index)
tensorboard = TensorBoard(log_dir='tensorboard_logs/%s' % run_name,
update_freq='epoch',
write_output=False)
callbacks = default_wave_function_callbacks_factory(exact_variational, log_in_batch_or_epoch=False,
true_ground_state_energy=true_ground_state_energy) + [
ExactObservableCallback(exact_variational, total_spin, 'total_spin', log_in_batch_or_epoch=False),
tensorboard]
model.fit_generator(exact_variational.to_generator(), steps_per_epoch=steps_per_epoch, epochs=1000, callbacks=callbacks,
max_queue_size=0, workers=0)
model.save_weights('final_%s.h5' % run_name)
| 40.329897
| 120
| 0.748466
| 502
| 3,912
| 5.531873
| 0.348606
| 0.031689
| 0.048974
| 0.023767
| 0.162045
| 0.097587
| 0.063378
| 0.03457
| 0
| 0
| 0
| 0.024361
| 0.150051
| 3,912
| 96
| 121
| 40.75
| 0.810827
| 0.017127
| 0
| 0
| 0
| 0
| 0.061931
| 0.035389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013333
| false
| 0
| 0.213333
| 0
| 0.24
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a907a743744664923c1dc0146b6eda52d8a91360
| 3,833
|
py
|
Python
|
build/package_version/archive_info.py
|
MicrohexHQ/nacl_contracts
|
3efab5eecb3cf7ba43f2d61000e65918aa4ba77a
|
[
"BSD-3-Clause"
] | 6
|
2015-02-06T23:41:01.000Z
|
2015-10-21T03:08:51.000Z
|
build/package_version/archive_info.py
|
MicrohexHQ/nacl_contracts
|
3efab5eecb3cf7ba43f2d61000e65918aa4ba77a
|
[
"BSD-3-Clause"
] | null | null | null |
build/package_version/archive_info.py
|
MicrohexHQ/nacl_contracts
|
3efab5eecb3cf7ba43f2d61000e65918aa4ba77a
|
[
"BSD-3-Clause"
] | 1
|
2019-10-02T08:41:50.000Z
|
2019-10-02T08:41:50.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A archive_info is a json file describing a single package archive."""
import collections
import hashlib
import json
import os
ArchiveInfoTuple = collections.namedtuple(
'ArchiveInfoTuple',
['name', 'hash', 'url', 'tar_src_dir', 'extract_dir'])
def GetArchiveHash(archive_file):
"""Gets the standardized hash value for a given archive.
This hash value is the expected value used to verify package archives.
Args:
archive_file: Path to archive file to hash.
Returns:
Hash value of archive file, or None if file is invalid.
"""
if os.path.isfile(archive_file):
with open(archive_file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
return None
class ArchiveInfo(object):
"""A archive_info file is a single json file describine an archive.
Archive Fields:
name: Name of the package archive.
hash: Hash value of the package archive, for validation purposes.
url: Web URL location where the archive can be found.
tar_src_dir: Where files are located within the tar archive.
extract_dir: Where files should be extracted to within destination dir.
"""
def __init__(self, name='', archive_hash=0, url=None, tar_src_dir='',
extract_dir='', archive_info_file=None):
"""Initialize ArchiveInfo object.
When an archive_info_file is specified, all other fields are ignored.
Otherwise, uses first fields as constructor for archive info object.
"""
self._archive_tuple = None
if archive_info_file is not None:
self.LoadArchiveInfoFile(archive_info_file)
else:
self.SetArchiveData(name, archive_hash, url, tar_src_dir, extract_dir)
def __eq__(self, other):
return (type(self) == type(other) and
self.GetArchiveData() == other.GetArchiveData())
def __repr__(self):
return "ArchiveInfo(" + str(self._archive_tuple) + ")"
def LoadArchiveInfoFile(self, archive_info_file):
"""Loads a archive info file into this object.
Args:
archive_info_file: Filename or archive info json.
"""
archive_json = None
if isinstance(archive_info_file, dict):
archive_json = archive_info_file
elif isinstance(archive_info_file, basestring):
with open(archive_info_file, 'rt') as f:
archive_json = json.load(f)
else:
raise RuntimeError('Invalid load archive file type (%s): %s',
type(archive_info_file),
archive_info_file)
self._archive_tuple = ArchiveInfoTuple(**archive_json)
def SaveArchiveInfoFile(self, archive_info_file):
"""Saves this object as a serialized JSON file if the object is valid.
Args:
archive_info_file: File path where JSON file will be saved.
"""
if self._archive_tuple and self._archive_tuple.hash:
archive_json = self.DumpArchiveJson()
with open(archive_info_file, 'wt') as f:
json.dump(archive_json, f, sort_keys=True,
indent=2, separators=(',', ': '))
def DumpArchiveJson(self):
"""Returns a dict representation of this object for JSON."""
if self._archive_tuple is None or not self._archive_tuple.hash:
return {}
return dict(self._archive_tuple._asdict())
def SetArchiveData(self, name, archive_hash, url=None, tar_src_dir='',
extract_dir=''):
"""Replaces currently set with new ArchiveInfoTuple."""
self._archive_tuple = ArchiveInfoTuple(name, archive_hash, url,
tar_src_dir, extract_dir)
def GetArchiveData(self):
"""Returns the current ArchiveInfoTuple tuple."""
return self._archive_tuple
| 33.920354
| 76
| 0.687712
| 508
| 3,833
| 4.996063
| 0.30315
| 0.086682
| 0.100473
| 0.031521
| 0.08156
| 0.063436
| 0.063436
| 0.042947
| 0.031521
| 0.031521
| 0
| 0.002347
| 0.221758
| 3,833
| 112
| 77
| 34.223214
| 0.848475
| 0.375163
| 0
| 0.037736
| 0
| 0
| 0.048267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169811
| false
| 0
| 0.075472
| 0.037736
| 0.396226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bef32020f0494687a4f159a327cd70c156e52e5
| 3,546
|
py
|
Python
|
tests/test_lsstdoc.py
|
lsst-sqre/dochub-adapter
|
3c155bc7ffe46f41e8de5108c936aed7587c8cdb
|
[
"MIT"
] | null | null | null |
tests/test_lsstdoc.py
|
lsst-sqre/dochub-adapter
|
3c155bc7ffe46f41e8de5108c936aed7587c8cdb
|
[
"MIT"
] | null | null | null |
tests/test_lsstdoc.py
|
lsst-sqre/dochub-adapter
|
3c155bc7ffe46f41e8de5108c936aed7587c8cdb
|
[
"MIT"
] | null | null | null |
"""Ad hoc tests of the LsstLatexDoc class. Other test modules rigorously verify
LsstLatexDoc against sample documents.
"""
from pybtex.database import BibliographyData
import pytest
from lsstprojectmeta.tex.lsstdoc import LsstLatexDoc
def test_no_short_title():
"""title without a short title."""
sample = r"\title{Title}"
lsstdoc = LsstLatexDoc(sample)
assert lsstdoc.title == "Title"
def test_title_variations():
"""Test variations on the title command's formatting."""
# Test with whitespace in title command
input_txt = r"\title [Test Plan] { \product ~Test Plan}"
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.title == r"\product ~Test Plan"
assert lsstdoc.short_title == "Test Plan"
def test_author_variations():
"""Test variations on the author command's formatting."""
input_txt = (r"\author {William O'Mullane, Mario Juric, "
r"Frossie Economou}"
r" % the author(s)")
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == ["William O'Mullane",
"Mario Juric",
"Frossie Economou"]
def test_author_list_amanda():
"""Test author list parsing where one author's name is Amanda.
"""
input_txt = (
r"\author {William O'Mullane, John Swinbank, Leanne Guy, "
r"Amanda Bauer}"
)
expected = [
"William O'Mullane",
"John Swinbank",
"Leanne Guy",
"Amanda Bauer"
]
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == expected
def test_author_list_and():
input_txt = r"\author{A.~Author, B.~Author, and C.~Author}"
expected = ['A. Author', 'B. Author', 'C. Author']
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.authors == expected
def test_handle_variations():
"""Test variations on the handle command's formatting."""
input_txt = r"\setDocRef {LDM-503} % the reference code "
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.handle == "LDM-503"
def test_abstract_variations():
"""Test variations on the abstract command's formatting."""
input_txt = (r"\setDocAbstract {" + "\n"
r"This is the Test Plan for \product. In it we define terms "
r"associated with testing and further test specifications "
r"for specific items.}")
expected_abstract = (
r"This is the Test Plan for \product. In it we define terms "
r"associated with testing and further test specifications for "
r"specific items."
)
lsstdoc = LsstLatexDoc(input_txt)
assert lsstdoc.abstract == expected_abstract
@pytest.mark.parametrize(
'sample, expected',
[(r'\documentclass[DM,lsstdraft,toc]{lsstdoc}', True),
(r'\documentclass[DM,toc]{lsstdoc}', False),
(r'\documentclass[DM, lsstdraft, toc]{lsstdoc}', True)])
def test_is_draft(sample, expected):
lsstdoc = LsstLatexDoc(sample)
assert lsstdoc.is_draft == expected
def test_html_title():
sample = r"\title{``Complex'' title \textit{like} $1+2$}"
expected = ('“Complex” title <em>like</em> '
'<span class="math inline">1\u2005+\u20052</span>\n')
lsstdoc = LsstLatexDoc(sample)
converted = lsstdoc.html_title
assert converted == expected
def test_default_load_bib_db():
"""Test that the common lsst-texmf bibliographies are always loaded.
"""
lsstdoc = LsstLatexDoc('')
assert isinstance(lsstdoc.bib_db, BibliographyData)
| 32.833333
| 79
| 0.645798
| 424
| 3,546
| 5.299528
| 0.294811
| 0.042724
| 0.024032
| 0.072096
| 0.433912
| 0.332888
| 0.247441
| 0.139742
| 0.139742
| 0.139742
| 0
| 0.006669
| 0.238861
| 3,546
| 107
| 80
| 33.140187
| 0.825861
| 0.1489
| 0
| 0.180556
| 0
| 0
| 0.342406
| 0.034946
| 0
| 0
| 0
| 0
| 0.152778
| 1
| 0.138889
| false
| 0
| 0.041667
| 0
| 0.180556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bf171a05404452569f820648c7f427a69c301b2
| 8,012
|
py
|
Python
|
bluesky_kafka/tests/test_kafka.py
|
gwbischof/bluesky-kafka
|
fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4
|
[
"BSD-3-Clause"
] | null | null | null |
bluesky_kafka/tests/test_kafka.py
|
gwbischof/bluesky-kafka
|
fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4
|
[
"BSD-3-Clause"
] | null | null | null |
bluesky_kafka/tests/test_kafka.py
|
gwbischof/bluesky-kafka
|
fb5ab9c2caa023b91722e1dfc1aac00b6e0d7ec4
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
import logging
import msgpack
import msgpack_numpy as mpn
from confluent_kafka.cimpl import KafkaException
import numpy as np
import pickle
import pytest
from bluesky_kafka import Publisher, BlueskyConsumer
from bluesky_kafka.tests.conftest import get_all_documents_from_queue
from bluesky.plans import count
from event_model import sanitize_doc
# mpn.patch() is recommended by msgpack-numpy as a way
# to patch msgpack but it caused a utf-8 decode error
mpn.patch()
logging.getLogger("bluesky.kafka").setLevel("DEBUG")
# the Kafka test broker should be configured with
# KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
def test_producer_config():
test_topic = "test.producer.config"
kafka_publisher = Publisher(
topic=test_topic,
bootstrap_servers="1.2.3.4:9092",
key="kafka-unit-test-key",
# work with a single broker
producer_config={
"bootstrap.servers": "5.6.7.8:9092",
"acks": 1,
"enable.idempotence": False,
"request.timeout.ms": 5000,
},
)
assert (
kafka_publisher._producer_config["bootstrap.servers"]
== "1.2.3.4:9092,5.6.7.8:9092"
)
def test_get_cluster_metadata(publisher_factory):
# the topic test.get.cluster.metadata will be created
# by the call to publisher.get_cluster_metadata
# if automatic topic creation is enabled
# otherwise this test will fail
publisher = publisher_factory(topic="test.get.cluster.metadata")
cluster_metadata = publisher.get_cluster_metadata()
assert "test.get.cluster.metadata" in cluster_metadata.topics
def test_get_cluster_metadata_failure(publisher_factory):
publisher = publisher_factory(
topic="test.get.cluster.metadata.failure",
bootstrap_servers="5.6.7.8:9092"
)
with pytest.raises(KafkaException):
publisher.get_cluster_metadata()
def test_consumer_config():
test_topic = "test.consumer.config"
bluesky_consumer = BlueskyConsumer(
topics=[test_topic],
bootstrap_servers="1.2.3.4:9092",
group_id="abc",
consumer_config={
"bootstrap.servers": "5.6.7.8:9092",
"auto.offset.reset": "latest",
},
)
assert (
bluesky_consumer._consumer_config["bootstrap.servers"]
== "1.2.3.4:9092,5.6.7.8:9092"
)
def test_bad_consumer_config():
test_topic = "test.bad.consumer.config"
with pytest.raises(ValueError) as excinfo:
BlueskyConsumer(
topics=[test_topic],
bootstrap_servers="1.2.3.4:9092",
group_id="abc",
consumer_config={
"bootstrap.servers": "5.6.7.8:9092",
"auto.offset.reset": "latest",
"group.id": "raise an exception!",
},
)
assert (
"do not specify 'group.id' in consumer_config, use only the 'group_id' argument"
in excinfo.value
)
@pytest.mark.parametrize(
"serializer, deserializer",
[(pickle.dumps, pickle.loads), (msgpack.dumps, msgpack.loads)],
)
def test_kafka_remote_dispatcher(
RE,
hw,
serializer,
deserializer,
publisher_factory,
remote_dispatcher_process_factory,
external_process_document_queue,
):
# COMPONENT 1
# a Kafka broker must be running
# in addition the topic "test.remote.dispatcher" must exist
# or the broker must be configured to create topics on demand (recommended)
# COMPONENT 2
# Run a RemoteDispatcher on a separate process. Pass the documents
# it receives over a Queue to this process so we can count them for
# our test.
test_topic = "test.remote.dispatcher"
with external_process_document_queue(
topics=[test_topic],
deserializer=deserializer,
process_factory=remote_dispatcher_process_factory,
) as document_queue:
# COMPONENT 3
# Set up a RunEngine in this process that will
# send all documents to a bluesky_kafka.Publisher
# and accumulate all documents in the local_documents list
kafka_publisher = publisher_factory(
topic=test_topic, serializer=serializer, flush_on_stop_doc=True
)
RE.subscribe(kafka_publisher)
local_documents = []
RE.subscribe(
lambda local_name, local_doc: local_documents.append(
(local_name, local_doc)
)
)
# test that numpy data is transmitted correctly
md = {
"numpy_data": {"nested": np.array([1, 2, 3])},
"numpy_scalar": np.float64(3),
"numpy_array": np.ones((3, 3)),
}
# documents will be generated by this plan
# and published by the Kafka Publisher
RE(count([hw.det]), md=md)
# retrieve the documents published by the Kafka broker
remote_documents = get_all_documents_from_queue(document_queue=document_queue)
# sanitize_doc normalizes some document data, such as numpy arrays, that are
# problematic for direct comparison of documents by "assert"
sanitized_local_documents = [sanitize_doc(doc) for doc in local_documents]
sanitized_remote_documents = [sanitize_doc(doc) for doc in remote_documents]
assert len(sanitized_remote_documents) == len(sanitized_local_documents)
assert sanitized_remote_documents == sanitized_local_documents
@pytest.mark.parametrize(
"serializer, deserializer",
[(pickle.dumps, pickle.loads), (msgpack.dumps, msgpack.loads)],
)
def test_bluesky_consumer(
RE,
hw,
serializer,
deserializer,
publisher_factory,
consumer_process_factory,
external_process_document_queue,
):
# COMPONENT 1
# a Kafka broker must be running
# in addition the broker must have topic "test.bluesky.consumer"
# or be configured to create topics on demand (recommended)
# COMPONENT 2
# Run a BlueskyConsumer polling loop in a separate process.
# Pass the documents it receives over a Queue to this process
# and compare them against the documents published directly
# by the RunEngine.
test_topic = "test.bluesky.consumer"
with external_process_document_queue(
topics=[test_topic],
deserializer=deserializer,
process_factory=partial(
consumer_process_factory, consumer_factory=BlueskyConsumer
),
) as document_queue:
# COMPONENT 3
# Set up a RunEngine in this process that will
# send all documents to a bluesky_kafka.Publisher
# and accumulate all documents in the local_documents list
kafka_publisher = publisher_factory(
topic=test_topic, serializer=serializer, flush_on_stop_doc=True
)
RE.subscribe(kafka_publisher)
local_documents = []
RE.subscribe(
lambda local_name, local_doc: local_documents.append(
(local_name, local_doc)
)
)
# test that numpy data is transmitted correctly
md = {
"numpy_data": {"nested": np.array([1, 2, 3])},
"numpy_scalar": np.float64(3),
"numpy_array": np.ones((3, 3)),
}
# documents will be generated by this plan
# and published by the Kafka Publisher
RE(count([hw.det]), md=md)
# retrieve the documents published by the Kafka broker
remote_documents = get_all_documents_from_queue(document_queue=document_queue)
# sanitize_doc normalizes some document data, such as numpy arrays, that are
# problematic for direct comparison of documents by "assert"
sanitized_local_documents = [sanitize_doc(doc) for doc in local_documents]
sanitized_remote_documents = [sanitize_doc(doc) for doc in remote_documents]
assert len(sanitized_remote_documents) == len(sanitized_local_documents)
assert sanitized_remote_documents == sanitized_local_documents
| 33.383333
| 92
| 0.668622
| 978
| 8,012
| 5.285276
| 0.206544
| 0.037918
| 0.031341
| 0.004643
| 0.690075
| 0.649836
| 0.633585
| 0.628942
| 0.603018
| 0.596634
| 0
| 0.019279
| 0.249002
| 8,012
| 239
| 93
| 33.523013
| 0.839787
| 0.25337
| 0
| 0.519231
| 0
| 0
| 0.12957
| 0.033698
| 0
| 0
| 0
| 0
| 0.051282
| 1
| 0.044872
| false
| 0
| 0.076923
| 0
| 0.121795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bf5aa849ab9919f36bd06cb32baf1102cd57b0f
| 13,653
|
py
|
Python
|
sunpy/coordinates/frames.py
|
s0nskar/sunpy
|
60ca4792ded4c3938a78da7055cf2c20e0e8ccfd
|
[
"MIT"
] | null | null | null |
sunpy/coordinates/frames.py
|
s0nskar/sunpy
|
60ca4792ded4c3938a78da7055cf2c20e0e8ccfd
|
[
"MIT"
] | null | null | null |
sunpy/coordinates/frames.py
|
s0nskar/sunpy
|
60ca4792ded4c3938a78da7055cf2c20e0e8ccfd
|
[
"MIT"
] | null | null | null |
"""
Common solar physics coordinate systems.
This submodule implements various solar physics coordinate frames for use with
the `astropy.coordinates` module.
"""
from __future__ import absolute_import, division
import numpy as np
from astropy import units as u
from astropy.coordinates.representation import (CartesianRepresentation,
UnitSphericalRepresentation,
SphericalRepresentation)
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
RepresentationMapping)
from astropy.coordinates import FrameAttribute
from sunpy import sun # For Carrington rotation number
from .representation import (SphericalWrap180Representation,
UnitSphericalWrap180Representation)
from .frameattributes import TimeFrameAttributeSunPy
RSUN_METERS = sun.constants.get('radius').si.to(u.m)
DSUN_METERS = sun.constants.get('mean distance').si.to(u.m)
__all__ = ['HeliographicStonyhurst', 'HeliographicCarrington',
'Heliocentric', 'Helioprojective']
class HeliographicStonyhurst(BaseCoordinateFrame):
"""
A coordinate or frame in the Stonyhurst Heliographic
system.
This frame has its origin at the solar centre and the north pole above the
solar north pole, and the zero line on longitude pointing towards the
Earth.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or `None`
A representation object or None to have no data.
lon: `Angle` object.
The longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat: `Angle` object.
The latitude for this object (``lon`` must also be given and
``representation`` must be None).
radius: `astropy.units.Quantity` object.
This quantity holds the radial distance. If not specified, it is, by
default, the radius of the photosphere. Optional.
Examples
--------
>>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km, frame="heliographic_stonyhurst",
dateobs="2010/01/01T00:00:45")
>>> sc
<SkyCoord (HelioGraphicStonyhurst): dateobs=2010-01-01 00:00:45,
lon=1.0 deg, lat=1.0 deg, rad=2.0 km>
>>> sc.frame
<HelioGraphicStonyhurst Coordinate: dateobs=2010-01-01 00:00:45,
lon=1.0 deg, lat=1.0 deg, rad=2.0 km>
>>> sc = SkyCoord(HelioGraphicStonyhurst(-10*u.deg, 2*u.deg))
>>> sc
<SkyCoord (HelioGraphicStonyhurst): dateobs=None, lon=-10.0 deg,
lat=2.0 deg, rad=695508.0 km>
Notes
-----
This frame will always be converted a 3D frame where the radius defaults to
rsun.
"""
name = "heliographic_stonyhurst"
default_representation = SphericalWrap180Representation
_frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')],
'sphericalwrap180': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')]
}
dateobs = TimeFrameAttributeSunPy()
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation', None)
super(HeliographicStonyhurst, self).__init__(*args, **kwargs)
# Make 3D if specified as 2D
# If representation was explicitly passed, do not change the rep.
if not _rep_kwarg:
# The base __init__ will make this a UnitSphericalRepresentation
# This makes it Wrap180 instead
if isinstance(self._data, UnitSphericalRepresentation):
self._data = SphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon,
distance=RSUN_METERS.to(u.km))
self.representation = SphericalWrap180Representation
# Make a Spherical Wrap180 instead
if isinstance(self._data, SphericalRepresentation):
self._data = SphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon,
distance=self._data.distance)
self.representation = SphericalWrap180Representation
class HeliographicCarrington(HeliographicStonyhurst):
"""
A coordinate or frame in the Carrington Heliographic
system.
This frame differs from the Stonyhurst version in the
definition of the longitude, which is defined using
an offset which is a time-dependent scalar value.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form.
lon: `Angle` object.
The longitude for this object (``lat`` must also be given and
``representation`` must be None).
lat: `Angle` object.
The latitude for this object (``lon`` must also be given and
``representation`` must be None).
radius: `astropy.units.Quantity` object, optional, must be keyword.
This quantity holds the radial distance. If not specified, it is, by
default, the solar radius. Optional, must be keyword.
Examples
--------
>>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km, frame="heliographic_carrington",
dateobs="2010/01/01T00:00:30")
>>> sc
<SkyCoord (HelioGraphicCarrington): dateobs=2010-01-01 00:00:30,
lon=1.0 deg, lat=2.0 deg, rad=3.0 km>
>>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km,
dateobs="2010/01/01T00:00:45", frame="heliographic_carrington")
>>> sc
<SkyCoord (HelioGraphicCarrington): dateobs=2010-01-01 00:00:45,
(lon, lat, rad) in (deg, deg, km)
[(1.0, 4.0, 5.0), (2.0, 5.0, 6.0), (3.0, 6.0, 7.0)]>
"""
name = "heliographic_carrington"
default_representation = SphericalWrap180Representation
_frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')],
'sphericalwrap180': [RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')]
}
dateobs = TimeFrameAttributeSunPy()
class Heliocentric(BaseCoordinateFrame):
"""
A coordinate or frame in the Heliocentric system.
This frame may either be specified in Cartesian
or cylindrical representation.
Cylindrical representation replaces (x, y) with
(rho, psi) where rho is the impact parameter and
psi is the position angle in degrees.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form and if x, y and z are specified, it must
be None.
x: `Quantity` object.
X-axis coordinate, optional, must be keyword.
y: `Quantity` object.
Y-axis coordinate, optional, must be keyword.
z: `Quantity` object. Shared by both representations.
Z-axis coordinate, optional, must be keyword.
D0: `Quantity` object.
Represents the distance between the observer and the Sun center.
Defaults to 1AU.
Examples
--------
>>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km),
dateobs="2011/01/05T00:00:50", frame="heliocentric")
>>> sc
<SkyCoord (HelioCentric): dateobs=2011-01-05 00:00:50, D0=149597870.7 km,
x=10.0 km, y=1.0 km, z=2.0 km>
>>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm, frame="heliocentric",
dateobs="2011/01/01T00:00:54")
>>> sc
<SkyCoord (HelioCentric): dateobs=2011-01-01 00:00:54, D0=149597870.7 km,
(x, y, z) in (km, m, cm)
[(1.0, 3.0, 5.0), (2.0, 4.0, 6.0)]>
"""
default_representation = CartesianRepresentation
_frame_specific_representation_info = {
'cylindrical': [RepresentationMapping('phi', 'psi', u.deg)]}
# d = FrameAttribute(default=(1*u.au).to(u.km))
D0 = FrameAttribute(default=(1*u.au).to(u.km))
dateobs = TimeFrameAttributeSunPy()
L0 = FrameAttribute(default=0*u.deg)
B0 = FrameAttribute(default=0*u.deg)
class Helioprojective(BaseCoordinateFrame):
"""
A coordinate or frame in the Helioprojective (Cartesian) system.
This is a projective coordinate system centered around the observer.
It is a full spherical coordinate system with position given as longitude
theta_x and latitude theta_y.
Parameters
----------
representation: `~astropy.coordinates.BaseRepresentation` or None.
A representation object. If specified, other parameters must
be in keyword form.
Tx: `Angle` object.
X-axis coordinate.
Ty: `Angle` object.
Y-axis coordinate.
distance: Z-axis coordinate.
The radial distance from the observer to the coordinate point.
D0: `Quantity` object.
Represents the distance between observer and solar center.
Defaults to 1AU.
Examples
--------
>>> sc = SkyCoord(0*u.deg, 0*u.deg, 5*u.km, dateobs="2010/01/01T00:00:00",
frame="helioprojective")
>>> sc
<SkyCoord (HelioProjective): dateobs=2010-01-01 00:00:00, D0=149597870.7 km
, Tx=0.0 arcsec, Ty=0.0 arcsec, distance=5.0 km>
>>> sc = SkyCoord(0*u.deg, 0*u.deg, dateobs="2010/01/01T00:00:00",
frame="helioprojective")
>>> sc
<SkyCoord (HelioProjective): dateobs=2010-01-01 00:00:00, D0=149597870.7 km
, Tx=0.0 arcsec, Ty=0.0 arcsec, distance=149597870.7 km>
"""
default_representation = SphericalWrap180Representation
_frame_specific_representation_info = {
'spherical': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', u.km)],
'sphericalwrap180': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec),
RepresentationMapping('distance', 'distance', u.km)],
'unitspherical': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)],
'unitsphericalwrap180': [RepresentationMapping('lon', 'Tx', u.arcsec),
RepresentationMapping('lat', 'Ty', u.arcsec)]}
D0 = FrameAttribute(default=(1*u.au).to(u.km))
dateobs = TimeFrameAttributeSunPy()
L0 = FrameAttribute(default=0*u.deg)
B0 = FrameAttribute(default=0*u.deg)
rsun = FrameAttribute(default=RSUN_METERS.to(u.km))
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation', None)
BaseCoordinateFrame.__init__(self, *args, **kwargs)
# Convert from Spherical to SphericalWrap180
# If representation was explicitly passed, do not change the rep.
if not _rep_kwarg:
# The base __init__ will make this a UnitSphericalRepresentation
# This makes it Wrap180 instead
if isinstance(self._data, UnitSphericalRepresentation):
self._data = UnitSphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon)
self.representation = UnitSphericalWrap180Representation
# Make a Spherical Wrap180 instead
elif isinstance(self._data, SphericalRepresentation):
self._data = SphericalWrap180Representation(lat=self._data.lat,
lon=self._data.lon,
distance=self._data.distance)
self.representation = SphericalWrap180Representation
def calculate_distance(self):
"""
This method calculates the third coordnate of the Helioprojective
frame. It assumes that the coordinate point is on the disk of the Sun
at the rsun radius.
If a point in the frame is off limb then NaN will be returned.
Returns
-------
new_frame : `~sunpy.coordinates.frames.HelioProjective`
A new frame instance with all the attributes of the original but
now with a third coordinate.
"""
# Skip if we already are 3D
if isinstance(self._data, SphericalRepresentation):
return self
rep = self.represent_as(UnitSphericalWrap180Representation)
lat, lon = rep.lat, rep.lon
alpha = np.arccos(np.cos(lat) * np.cos(lon)).to(lat.unit)
c = self.D0**2 - self.rsun**2
b = -2 * self.D0.to(u.m) * np.cos(alpha)
d = ((-1*b) - np.sqrt(b**2 - 4*c)) / 2
return self.realize_frame(SphericalWrap180Representation(lon=lon,
lat=lat,
distance=d))
| 41.49848
| 90
| 0.620669
| 1,517
| 13,653
| 5.523401
| 0.175346
| 0.018141
| 0.017066
| 0.006683
| 0.570951
| 0.546008
| 0.508533
| 0.468672
| 0.456379
| 0.444206
| 0
| 0.047786
| 0.270417
| 13,653
| 328
| 91
| 41.625
| 0.793394
| 0.469567
| 0
| 0.509434
| 0
| 0
| 0.090965
| 0.013713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028302
| false
| 0
| 0.084906
| 0
| 0.367925
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bf7588b6e982ef5c34279f0381a39c74ff2495d
| 4,640
|
py
|
Python
|
python/ray/serve/tests/test_pipeline_dag.py
|
quarkzou/ray
|
49de29969df0c55a5969b8ffbfc7d62459e5024b
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/tests/test_pipeline_dag.py
|
quarkzou/ray
|
49de29969df0c55a5969b8ffbfc7d62459e5024b
|
[
"Apache-2.0"
] | null | null | null |
python/ray/serve/tests/test_pipeline_dag.py
|
quarkzou/ray
|
49de29969df0c55a5969b8ffbfc7d62459e5024b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import os
import sys
import numpy as np
import ray
from ray import serve
from ray.serve.api import _get_deployments_from_node
from ray.serve.handle import PipelineHandle
from ray.serve.pipeline.pipeline_input_node import PipelineInputNode
@serve.deployment
class Adder:
def __init__(self, increment: int):
self.increment = increment
def forward(self, inp: int) -> int:
print(f"Adder got {inp}")
return inp + self.increment
__call__ = forward
@serve.deployment
class Driver:
def __init__(self, dag: PipelineHandle):
self.dag = dag
def __call__(self, inp: int) -> int:
print(f"Driver got {inp}")
return ray.get(self.dag.remote(inp))
@serve.deployment
class Echo:
def __init__(self, s: str):
self._s = s
def __call__(self, *args):
return self._s
@ray.remote
def combine(*args):
return sum(args)
def test_single_node_deploy_success(serve_instance):
m1 = Adder.bind(1)
handle = serve.run(m1)
assert ray.get(handle.remote(41)) == 42
def test_single_node_driver_sucess(serve_instance):
m1 = Adder.bind(1)
m2 = Adder.bind(2)
with PipelineInputNode() as input_node:
out = m1.forward.bind(input_node)
out = m2.forward.bind(out)
driver = Driver.bind(out)
handle = serve.run(driver)
assert ray.get(handle.remote(39)) == 42
def test_options_and_names(serve_instance):
m1 = Adder.bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.name == "Adder"
m1 = Adder.options(name="Adder2").bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.name == "Adder2"
m1 = Adder.options(num_replicas=2).bind(1)
m1_built = _get_deployments_from_node(m1)[-1]
assert m1_built.num_replicas == 2
@pytest.mark.skip("TODO")
def test_mixing_task(serve_instance):
m1 = Adder.bind(1)
m2 = Adder.bind(2)
with PipelineInputNode() as input_node:
out = combine.bind(m1.forward.bind(input_node), m2.forward.bind(input_node))
driver = Driver.bind(out)
handle = serve.run(driver)
assert ray.get(handle.remote(1)) == 5
@serve.deployment
class TakeHandle:
def __init__(self, handle) -> None:
self.handle = handle
def __call__(self, inp):
return ray.get(self.handle.remote(inp))
def test_passing_handle(serve_instance):
child = Adder.bind(1)
parent = TakeHandle.bind(child)
driver = Driver.bind(parent)
handle = serve.run(driver)
assert ray.get(handle.remote(1)) == 2
def test_passing_handle_in_obj(serve_instance):
@serve.deployment
class Parent:
def __init__(self, d):
self._d = d
async def __call__(self, key):
return await self._d[key].remote()
child1 = Echo.bind("ed")
child2 = Echo.bind("simon")
parent = Parent.bind({"child1": child1, "child2": child2})
handle = serve.run(parent)
assert ray.get(handle.remote("child1")) == "ed"
assert ray.get(handle.remote("child2")) == "simon"
def test_pass_handle_to_multiple(serve_instance):
@serve.deployment
class Child:
def __call__(self, *args):
return os.getpid()
@serve.deployment
class Parent:
def __init__(self, child):
self._child = child
def __call__(self, *args):
return ray.get(self._child.remote())
@serve.deployment
class GrandParent:
def __init__(self, child, parent):
self._child = child
self._parent = parent
def __call__(self, *args):
# Check that the grandparent and parent are talking to the same child.
assert ray.get(self._child.remote()) == ray.get(self._parent.remote())
return "ok"
child = Child.bind()
parent = Parent.bind(child)
grandparent = GrandParent.bind(child, parent)
handle = serve.run(grandparent)
assert ray.get(handle.remote()) == "ok"
def test_non_json_serializable_args(serve_instance):
# Test that we can capture and bind non-json-serializable arguments.
arr1 = np.zeros(100)
arr2 = np.zeros(200)
@serve.deployment
class A:
def __init__(self, arr1):
self.arr1 = arr1
self.arr2 = arr2
def __call__(self, *args):
return self.arr1, self.arr2
handle = serve.run(A.bind(arr1))
ret1, ret2 = ray.get(handle.remote())
assert np.array_equal(ret1, arr1) and np.array_equal(ret2, arr2)
# TODO: check that serve.build raises an exception.
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| 25.217391
| 84
| 0.650647
| 626
| 4,640
| 4.570288
| 0.204473
| 0.027263
| 0.062915
| 0.050332
| 0.349179
| 0.256204
| 0.191541
| 0.165676
| 0.165676
| 0.165676
| 0
| 0.022371
| 0.22931
| 4,640
| 183
| 85
| 25.355191
| 0.777685
| 0.039871
| 0
| 0.263566
| 0
| 0
| 0.023815
| 0
| 0
| 0
| 0
| 0.005464
| 0.093023
| 1
| 0.193798
| false
| 0.023256
| 0.069767
| 0.046512
| 0.418605
| 0.015504
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bf7e9d1ed3871fd0972273d253da43b826c3e35
| 598
|
py
|
Python
|
test/data_producer_kafka.py
|
netgroup/srv6-pm-dockerized
|
770976e9e2da56780ae9bb4048360235d2568627
|
[
"Apache-2.0"
] | null | null | null |
test/data_producer_kafka.py
|
netgroup/srv6-pm-dockerized
|
770976e9e2da56780ae9bb4048360235d2568627
|
[
"Apache-2.0"
] | null | null | null |
test/data_producer_kafka.py
|
netgroup/srv6-pm-dockerized
|
770976e9e2da56780ae9bb4048360235d2568627
|
[
"Apache-2.0"
] | 2
|
2020-07-28T18:12:09.000Z
|
2021-02-22T06:31:19.000Z
|
from kafka import KafkaProducer
from kafka.errors import KafkaError
import json
# produce json messages
producer = KafkaProducer(bootstrap_servers='kafka:9092', security_protocol='PLAINTEXT',
value_serializer=lambda m: json.dumps(m).encode('ascii'))
result = producer.send('ktig', {'measure_id': 1, 'interval': 10, 'timestamp': '',
'color': 'red', 'sender_tx_counter': 50,
'sender_rx_counter': 50, 'reflector_tx_counter': 48,
'reflector_rx_counter': 48})
producer.close()
| 37.375
| 88
| 0.602007
| 62
| 598
| 5.612903
| 0.677419
| 0.051724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034803
| 0.279264
| 598
| 15
| 89
| 39.866667
| 0.772622
| 0.035117
| 0
| 0
| 0
| 0
| 0.238261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bf80a6b7a2e719d044ca3071a20a59ca3623e14
| 248
|
py
|
Python
|
uasyncio.core/test_cb_args.py
|
Carglglz/micropython-lib
|
07102c56aa1087b97ee313cedc1d89fd20452e11
|
[
"PSF-2.0"
] | 126
|
2019-07-19T14:42:41.000Z
|
2022-03-21T22:22:19.000Z
|
uasyncio.core/test_cb_args.py
|
Carglglz/micropython-lib
|
07102c56aa1087b97ee313cedc1d89fd20452e11
|
[
"PSF-2.0"
] | 38
|
2019-08-28T01:46:31.000Z
|
2022-03-17T05:46:51.000Z
|
uasyncio.core/test_cb_args.py
|
Carglglz/micropython-lib
|
07102c56aa1087b97ee313cedc1d89fd20452e11
|
[
"PSF-2.0"
] | 55
|
2019-08-02T09:32:33.000Z
|
2021-12-22T11:25:51.000Z
|
try:
import uasyncio.core as asyncio
except:
import asyncio
def cb(a, b):
assert a == "test"
assert b == "test2"
loop.stop()
loop = asyncio.get_event_loop()
loop.call_soon(cb, "test", "test2")
loop.run_forever()
print("OK")
| 14.588235
| 35
| 0.637097
| 37
| 248
| 4.162162
| 0.648649
| 0.116883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 0.209677
| 248
| 16
| 36
| 15.5
| 0.77551
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfa439c74e0b340dc223e43b06761bdee5d063d
| 1,026
|
py
|
Python
|
cookiecutter_mbam/scan/views.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
cookiecutter_mbam/scan/views.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
cookiecutter_mbam/scan/views.py
|
tiburona/cookiecutter_mbam
|
13788774a4c1426c133b3f689f98d8f0c54de9c6
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Scan views."""
from flask import Blueprint, render_template, flash, redirect, url_for, session
from flask_login import current_user
from .forms import ScanForm
from .service import ScanService
from cookiecutter_mbam.utils import flash_errors
blueprint = Blueprint('scan', __name__, url_prefix='/scans', static_folder='../static')
from flask import current_app
def debug():
assert current_app.debug == False, "Don't panic! You're here by request of debug()"
@blueprint.route('/add', methods=['GET', 'POST'])
def add():
"""Add a scan."""
form = ScanForm()
if form.validate_on_submit():
f = form.scan_file.data
user_id = str(current_user.get_id())
exp_id = str(session['curr_experiment'])
ScanService(user_id, exp_id).upload(f)
flash('You successfully added a new scan.', 'success')
return redirect(url_for('experiment.experiments'))
else:
flash_errors(form)
return render_template('scans/upload.html',scan_form=form)
| 34.2
| 87
| 0.693957
| 139
| 1,026
| 4.920863
| 0.532374
| 0.039474
| 0.04386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001178
| 0.172515
| 1,026
| 30
| 88
| 34.2
| 0.804476
| 0.044834
| 0
| 0
| 0
| 0
| 0.176289
| 0.02268
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfa8f2b88f8aca9aab6973afb6831c3aa0a0478
| 3,460
|
py
|
Python
|
python-route-endpoint/test_dbstore.py
|
blues/note-samples
|
a50c27ea0b8728668f2c44139b088d5fdf0c7d57
|
[
"Apache-2.0"
] | 1
|
2021-10-04T14:42:43.000Z
|
2021-10-04T14:42:43.000Z
|
python-route-endpoint/test_dbstore.py
|
blues/note-samples
|
a50c27ea0b8728668f2c44139b088d5fdf0c7d57
|
[
"Apache-2.0"
] | 3
|
2021-09-07T17:54:58.000Z
|
2021-11-16T21:40:52.000Z
|
python-route-endpoint/test_dbstore.py
|
blues/note-samples
|
a50c27ea0b8728668f2c44139b088d5fdf0c7d57
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import dbstore
inMemFile = ":memory:"
measurementTable = "measurements"
alertTable = "alerts"
def test_db_store_constructor():
s = dbstore.dbstore(file=inMemFile)
assert(s != None)
def test_dbStore_connect():
s = dbstore.dbstore(file=inMemFile)
s.connect()
assert s._connection is not None
def test_dbStore_connect_whenConnectionIsOpen():
s = dbstore.dbstore(file=inMemFile)
s.connect()
c = s._connection
s.connect()
assert s._connection == c
def test_dbStore_close_whenConnectionIsOpen():
s = dbstore.dbstore(file=inMemFile)
s.connect()
assert s._connection is not None
s.close()
assert s._connection is None
def test_dbStore_close_whenConnectionIsClosed():
s = dbstore.dbstore(file=inMemFile)
assert s._connection is None
s.close()
assert s._connection is None
def test_dbStore_createTables():
s = dbstore.dbstore(file="inMemFile")
s.connect()
s.createTables()
for n in [measurementTable, alertTable]:
s._cursor.execute(f"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{n}';")
isTable = s._cursor.fetchone()[0]==1
assert isTable
timestampTestData = "2021-04-29T23:25:44Z"
def generateConnectedInMemDb() -> dbstore.dbstore:
s = dbstore.dbstore(file=inMemFile)
s.connect()
s.createTables()
return s
def test_addMeasurement():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
measurementType = "sensor1"
timestamp = timestampTestData
value = 3.14
units = "units1"
s.addMeasurement(deviceId, timestamp, measurementType, value, units)
c = s._cursor.execute(f'SELECT * from {measurementTable}')
row = c.fetchone()
assert row[0] == deviceId
assert row[1] == timestamp
assert row[2] == measurementType
assert row[3] == value
assert row[4] == units
def test_addAlert():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
alertType = "overfill"
timestamp = timestampTestData
message = "message 1"
s.addAlert(deviceId, timestamp, alertType, message)
c = s._cursor.execute(f'SELECT * from {alertTable}')
row = c.fetchone()
assert row[0] == deviceId
assert row[1] == timestamp
assert row[2] == alertType
assert row[3] == message
def test_getAlerts():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
alertType = "overfill"
timestamp = timestampTestData
message = "message 1"
s.addAlert(deviceId, timestamp, alertType, message)
s.addAlert(deviceId, timestamp, alertType, message)
a = s.getAlerts()
e = [{"deviceId":deviceId,"timestamp":timestamp,"type":alertType,"message":message},
{"deviceId":deviceId,"timestamp":timestamp,"type":alertType,"message":message},]
assert a == e
def test_getAlerts_noAlertsStored():
s = generateConnectedInMemDb()
a = s.getAlerts()
assert a == []
def test_getAlerts_withLimit():
s = generateConnectedInMemDb()
deviceId = "dev:xxxxxxxxxxxx"
alertType = "overfill"
timestamp = timestampTestData
message = "message 1"
s.addAlert(deviceId, timestamp, alertType, message)
s.addAlert(deviceId, timestamp, alertType, message)
a = s.getAlerts(limit=1)
e = [{"deviceId":deviceId,"timestamp":timestamp,"type":alertType,"message":message}]
assert a == e
| 23.69863
| 102
| 0.67052
| 380
| 3,460
| 6.013158
| 0.210526
| 0.033698
| 0.045952
| 0.058206
| 0.670022
| 0.603501
| 0.603501
| 0.550109
| 0.516849
| 0.44639
| 0
| 0.012454
| 0.210983
| 3,460
| 146
| 103
| 23.69863
| 0.824542
| 0
| 0
| 0.561224
| 0
| 0
| 0.114707
| 0
| 0
| 0
| 0
| 0
| 0.204082
| 1
| 0.122449
| false
| 0
| 0.020408
| 0
| 0.153061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfc984d3b1bbcef2b5af5e9508ff3a2a9c35186
| 604
|
py
|
Python
|
basics/linear.py
|
zhijiahu/dltk
|
bf0484e22d3d0116b1ac60ae78f688a36c5a0636
|
[
"MIT"
] | null | null | null |
basics/linear.py
|
zhijiahu/dltk
|
bf0484e22d3d0116b1ac60ae78f688a36c5a0636
|
[
"MIT"
] | null | null | null |
basics/linear.py
|
zhijiahu/dltk
|
bf0484e22d3d0116b1ac60ae78f688a36c5a0636
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
labels = ['dog', 'cat', 'panda']
np.random.seed(1)
# Simulate model already trained
W = np.random.randn(3, 3072)
b = np.random.randn(3)
orig = cv2.imread('beagle.png')
image = cv2.resize(orig, (32, 32)).flatten()
scores = W.dot(image) + b
for (label, score) in zip(labels, scores):
print('[INFO] {}: {:2}'.format(label, score))
cv2.putText(orig,
'Label: {}'.format(labels[np.argmax(scores)]),
(10,30),
cv2.FONT_HERSHEY_SIMPLEX,
0.9,
(0, 255, 0),
2)
cv2.imshow('Image', orig)
cv2.waitKey(0)
| 20.133333
| 58
| 0.574503
| 87
| 604
| 3.965517
| 0.597701
| 0.069565
| 0.075362
| 0.081159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069565
| 0.238411
| 604
| 29
| 59
| 20.827586
| 0.680435
| 0.049669
| 0
| 0
| 0
| 0
| 0.087566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfd515b8c9ab45a349fc3b66ded01bb3b315143
| 2,759
|
py
|
Python
|
sevivi/synchronizer/synchronizer.py
|
edgarriba/sevivi
|
52c8bef206e531c797221a08037306c0c5b0ca59
|
[
"MIT"
] | null | null | null |
sevivi/synchronizer/synchronizer.py
|
edgarriba/sevivi
|
52c8bef206e531c797221a08037306c0c5b0ca59
|
[
"MIT"
] | 9
|
2021-09-09T07:40:21.000Z
|
2022-01-13T07:03:59.000Z
|
sevivi/synchronizer/synchronizer.py
|
edgarriba/sevivi
|
52c8bef206e531c797221a08037306c0c5b0ca59
|
[
"MIT"
] | 1
|
2022-01-26T09:51:29.000Z
|
2022-01-26T09:51:29.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .signal_processing import (
resample_data,
normalize_signal,
calculate_magnitude,
calculate_offset_in_seconds_using_cross_correlation,
calculate_sampling_frequency_from_timestamps,
)
def get_synchronization_offset(
video_sync_df: pd.DataFrame,
sensor_sync_df: pd.DataFrame,
use_gradient: bool,
show_plots: bool = False,
) -> pd.Timedelta:
"""
Get the temporal offset between the two given sensor dataframes.
:param video_sync_df: the synchronization information from the video
:param sensor_sync_df: the synchronization information from the sensor
:param use_gradient: if true, the second derivation of the video synchronization data will be used. if false,
the raw data will be used.
:param show_plots: can enable debugging plots
:return: a pd.Timedelta object that specifies how much the sensor_sync_df needs to be moved in time to align it with
the video_sync_df
"""
video_sf = calculate_sampling_frequency_from_timestamps(video_sync_df.index)
sensor_sf = calculate_sampling_frequency_from_timestamps(sensor_sync_df.index)
if use_gradient:
video_acceleration = np.gradient(
np.gradient(video_sync_df.to_numpy(), axis=0), axis=0
)
else:
video_acceleration = video_sync_df.to_numpy()
video_acceleration = resample_data(
video_acceleration,
current_sampling_rate=video_sf,
new_sampling_rate=sensor_sf,
)
video_acceleration = normalize_signal(video_acceleration)
video_acceleration = calculate_magnitude(video_acceleration)
sensor_acceleration = normalize_signal(sensor_sync_df.to_numpy())
sensor_acceleration = calculate_magnitude(sensor_acceleration)
if show_plots:
plt.close()
plt.figure(1)
plt.plot(video_acceleration, label="Kinect")
plt.plot(sensor_acceleration, label="IMU")
plt.xlabel("Time (s)")
plt.ylabel("Acceleration Magnitude (normalized)")
plt.legend()
plt.show()
shift = calculate_offset_in_seconds_using_cross_correlation(
ref_signal=video_acceleration,
target_signal=sensor_acceleration,
sampling_frequency=sensor_sf,
)
if show_plots:
plt.close()
plt.figure(1)
plt.plot(video_acceleration, label="Kinect")
plt.plot(
np.arange(len(sensor_acceleration)) + (sensor_sf * shift),
sensor_acceleration,
label="IMU",
)
plt.xlabel("Time (s)")
plt.ylabel("Acceleration (normalized)")
plt.legend()
plt.show()
return pd.Timedelta(seconds=shift)
| 33.240964
| 120
| 0.696629
| 335
| 2,759
| 5.444776
| 0.301493
| 0.036184
| 0.036184
| 0.049342
| 0.35636
| 0.286184
| 0.240132
| 0.144737
| 0.144737
| 0.144737
| 0
| 0.001881
| 0.229069
| 2,759
| 82
| 121
| 33.646341
| 0.855665
| 0.200435
| 0
| 0.229508
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0
| 0.065574
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfd607f605b753ac1980b586075777909511585
| 244
|
py
|
Python
|
bob.py
|
williamstern/Intro-to-CS-MIT-Course
|
0f6129fa6bd47767cb57507279d49b27501a160f
|
[
"MIT"
] | null | null | null |
bob.py
|
williamstern/Intro-to-CS-MIT-Course
|
0f6129fa6bd47767cb57507279d49b27501a160f
|
[
"MIT"
] | null | null | null |
bob.py
|
williamstern/Intro-to-CS-MIT-Course
|
0f6129fa6bd47767cb57507279d49b27501a160f
|
[
"MIT"
] | null | null | null |
s = 'vpoboooboboobooboboo'
y = 0
counter = 0
times_run = 0
start = 0
end = 3
for letter in s:
sc = s[start:end]
start += 1
end += 1
if sc == str('bob'):
counter += 1
print('Number of times bob occurs is: ', counter)
| 10.166667
| 49
| 0.565574
| 39
| 244
| 3.512821
| 0.589744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047059
| 0.303279
| 244
| 23
| 50
| 10.608696
| 0.758824
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfd9f299f8a3e49d68acee30f35331e05c04631
| 5,469
|
py
|
Python
|
tests/main.py
|
bastienleonard/pysfml-cython
|
c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4
|
[
"Zlib",
"BSD-2-Clause"
] | 14
|
2015-09-14T18:04:27.000Z
|
2021-02-19T16:51:57.000Z
|
tests/main.py
|
bastienleonard/pysfml-cython
|
c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4
|
[
"Zlib",
"BSD-2-Clause"
] | 3
|
2015-12-14T17:07:45.000Z
|
2021-10-02T05:55:11.000Z
|
tests/main.py
|
bastienleonard/pysfml-cython
|
c71194988ba90678cbc4c9e6fd3e03f53ac4c2e4
|
[
"Zlib",
"BSD-2-Clause"
] | 3
|
2015-04-12T16:57:02.000Z
|
2021-02-20T17:15:51.000Z
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
import random
import unittest
import sfml as sf
class TestColor(unittest.TestCase):
def random_color(self):
return sf.Color(random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
def test_eq(self):
equal = [(sf.Color(i, i, i, i), sf.Color(i, i, i, i))
for i in range(256)]
for c1, c2 in equal:
self.assertEqual(c1, c2)
def test_neq(self):
not_equal = [(sf.Color(0, 0, 0, 1), sf.Color(0, 1, 0, 0)),
(sf.Color(255, 255, 255, 255),
sf.Color(254, 255, 255, 255))]
for c1, c2 in not_equal:
self.assertNotEqual(c1, c2)
def test_copy(self):
c1 = self.random_color()
c2 = c1.copy()
self.assertEqual(c1, c2)
class TestIntRect(unittest.TestCase):
def random_rect(self):
return sf.IntRect(random.randint(0, 100),
random.randint(0, 100),
random.randint(0, 100),
random.randint(0, 100))
def test_eq(self):
def r():
return random.randint(0, 100)
equal = [(sf.IntRect(l, t, w, h), sf.IntRect(l, t, w, h))
for l, t, w, h in
[(r(), r(), r(), r()) for i in range(100)]]
for r1, r2 in equal:
self.assertEqual(r1, r2)
def test_neq(self):
not_equal = [(sf.IntRect(0, 0, 0, 0), sf.IntRect(0, 0, 0, 10)),
(sf.IntRect(0, 0, 0, 0), sf.IntRect(0, 0, 10, 0)),
(sf.IntRect(0, 0, 0, 0), sf.IntRect(0, 10, 0, 0)),
(sf.IntRect(0, 0, 0, 0), sf.IntRect(10, 0, 0, 0))]
for r1, r2 in not_equal:
self.assertNotEqual
def test_copy(self):
r1 = self.random_rect()
r2 = r1.copy()
self.assertEqual(r1, r2)
class TestFloatRect(unittest.TestCase):
def random_rect(self):
return sf.FloatRect(random.triangular(0.0, 100.0),
random.triangular(0.0, 100.0),
random.triangular(0.0, 100.0),
random.triangular(0.0, 100.0))
def test_eq(self):
def r():
return random.triangular(0.0, 100.0)
equal = [(sf.FloatRect(l, t, w, h), sf.FloatRect(l, t, w, h))
for l, t, w, h in
[(r(), r(), r(), r()) for i in range(100)]]
for r1, r2 in equal:
self.assertEqual(r1, r2)
def test_neq(self):
not_equal = [(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(0, 0, 0, 10)),
(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(0, 0, 10, 0)),
(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(0, 10, 0, 0)),
(sf.FloatRect(0, 0, 0, 0), sf.FloatRect(10, 0, 0, 0))]
for r1, r2 in not_equal:
self.assertNotEqual
def test_copy(self):
r1 = self.random_rect()
r2 = r1.copy()
self.assertEqual(r1, r2)
class TestTime(unittest.TestCase):
def random_time(self):
return sf.Time(microseconds=random.randint(0, 1000000))
def test_eq(self):
equal = [(sf.Time(microseconds=x), sf.Time(microseconds=x))
for x in
[random.randint(0, 1000000) for n in range(10)]]
for t1, t2 in equal:
self.assertEqual(t1, t2)
def test_add(self):
t1 = self.random_time()
t2 = self.random_time()
self.assertEqual(
t1 + t2,
sf.Time(microseconds=t1.as_microseconds() + t2.as_microseconds()))
def test_sub(self):
t1 = self.random_time()
t2 = self.random_time()
self.assertEqual(
t1 - t2,
sf.Time(microseconds=t1.as_microseconds() - t2.as_microseconds()))
def test_mul(self):
t = self.random_time()
i = random.randint(1, 1000)
self.assertEqual(t * i,
sf.Time(microseconds=t.as_microseconds() * i))
f = random.triangular(0.0, 100.0)
self.assertEqual(t * f,
sf.Time(seconds=t.as_seconds() * f))
def test_div(self):
t = self.random_time()
i = random.randint(1, 1000)
self.assertEqual(t / i,
sf.Time(microseconds=t.as_microseconds() / i))
f = random.triangular(0.0, 100.0)
self.assertEqual(t / f,
sf.Time(seconds=t.as_seconds() / f))
def test_copy(self):
t1 = self.random_time()
t2 = t1.copy()
self.assertEqual(t1, t2)
class TestTransform(unittest.TestCase):
def random_transform(self):
return sf.Transform(*[random.triangular(0.0, 5.0) for i in range(9)])
def test_init(self):
self.assertEqual(sf.Transform().matrix, sf.Transform.IDENTITY.matrix)
self.assertRaises(TypeError, sf.Transform, *range(10))
def test_copy(self):
for i in range(10):
t1 = self.random_transform()
t2 = t1.copy()
self.assertEqual(t1.matrix, t2.matrix)
def test_imul(self):
t1 = self.random_transform()
t2 = self.random_transform()
t3 = t1.copy()
t3 *= t2
self.assertEqual((t1 * t2).matrix, t3.matrix)
if __name__ == '__main__':
unittest.main()
| 30.724719
| 78
| 0.513257
| 734
| 5,469
| 3.745232
| 0.114441
| 0.034194
| 0.022917
| 0.011641
| 0.654056
| 0.623499
| 0.552928
| 0.542743
| 0.491815
| 0.454711
| 0
| 0.083565
| 0.343573
| 5,469
| 177
| 79
| 30.898305
| 0.682173
| 0.008045
| 0
| 0.455224
| 0
| 0
| 0.001475
| 0
| 0
| 0
| 0
| 0
| 0.156716
| 1
| 0.186567
| false
| 0
| 0.022388
| 0.052239
| 0.298507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8bfef33258b56cdbd64d66536a38eaa752a6a523
| 12,840
|
py
|
Python
|
textgen/augment/word_level_augment.py
|
shibing624/textgen
|
0a9d55f1f61d5217b8e06f1f23904e49afa84370
|
[
"Apache-2.0"
] | 31
|
2021-06-29T14:31:35.000Z
|
2022-03-25T00:36:44.000Z
|
textgen/augment/word_level_augment.py
|
shibing624/text-generation
|
0a9d55f1f61d5217b8e06f1f23904e49afa84370
|
[
"Apache-2.0"
] | 1
|
2021-11-09T21:30:16.000Z
|
2022-03-02T10:21:04.000Z
|
textgen/augment/word_level_augment.py
|
shibing624/text-generation
|
0a9d55f1f61d5217b8e06f1f23904e49afa84370
|
[
"Apache-2.0"
] | 5
|
2021-06-21T03:13:39.000Z
|
2022-02-07T06:53:22.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description: Word level augmentations including Replace words with uniform
random words or TF-IDF based word replacement.
"""
import collections
import copy
import math
import numpy as np
from textgen.utils.log import logger
min_token_num = 3
class EfficientRandomGen(object):
"""A base class that generate multiple random numbers at the same time."""
def reset_random_prob(self):
"""Generate many random numbers at the same time and cache them."""
cache_len = 100000
self.random_prob_cache = np.random.random(size=(cache_len,))
self.random_prob_ptr = cache_len - 1
def get_random_prob(self):
"""Get a random number."""
value = self.random_prob_cache[self.random_prob_ptr]
self.random_prob_ptr -= 1
if self.random_prob_ptr == -1:
self.reset_random_prob()
return value
def get_random_token(self):
"""Get a Random token."""
token = self.token_list[self.token_ptr]
self.token_ptr -= 1
if self.token_ptr == -1:
self.reset_token_list()
return token
def get_insert_token(self, word):
"""Get a replace token."""
# Insert word choose
return ''.join([word] * 2)
def get_delete_token(self):
"""Get a replace token."""
# Insert word choose
return ''
class RandomReplace(EfficientRandomGen):
"""Uniformly replace word with random words in the vocab."""
def __init__(self, token_prob, vocab):
self.token_prob = token_prob
self.vocab_size = len(vocab)
self.vocab = vocab
self.reset_token_list()
self.reset_random_prob()
def __call__(self, tokens):
return self.replace_tokens(tokens)
def replace_tokens(self, tokens):
"""
Replace tokens randomly.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
details = []
idx = 0
if len(tokens) >= min_token_num:
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < self.token_prob:
tokens[i] = self.get_random_token()
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
"""Generate many random tokens at the same time and cache them."""
self.token_list = list(self.vocab.keys())
self.token_ptr = len(self.token_list) - 1
np.random.shuffle(self.token_list)
class InsertReplace(EfficientRandomGen):
"""Uniformly replace word with insert repeat words in the vocab."""
def __init__(self, token_prob, vocab):
self.token_prob = token_prob
self.vocab_size = len(vocab)
self.vocab = vocab
self.reset_token_list()
self.reset_random_prob()
def __call__(self, tokens):
return self.replace_tokens(tokens)
def replace_tokens(self, tokens):
"""
Replace tokens with insert data.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
details = []
idx = 0
if len(tokens) >= min_token_num:
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < self.token_prob:
tokens[i] = self.get_insert_token(tokens[i])
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
"""Generate many random tokens at the same time and cache them."""
self.token_list = list(self.vocab.keys())
self.token_ptr = len(self.token_list) - 1
np.random.shuffle(self.token_list)
class DeleteReplace(EfficientRandomGen):
"""Uniformly replace word with delete words in the vocab."""
def __init__(self, token_prob, vocab):
self.token_prob = token_prob
self.vocab_size = len(vocab)
self.vocab = vocab
self.reset_token_list()
self.reset_random_prob()
def __call__(self, tokens):
return self.replace_tokens(tokens)
def replace_tokens(self, tokens):
"""
Replace tokens with insert data.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
details = []
idx = 0
if len(tokens) >= min_token_num:
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < self.token_prob:
tokens[i] = self.get_delete_token()
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
"""Generate many random tokens at the same time and cache them."""
self.token_list = list(self.vocab.keys())
self.token_ptr = len(self.token_list) - 1
np.random.shuffle(self.token_list)
def get_data_idf(tokenized_sentence_list):
"""Compute the IDF score for each word. Then compute the TF-IDF score."""
word_doc_freq = collections.defaultdict(int)
# Compute IDF
for cur_sent in tokenized_sentence_list:
cur_word_dict = {}
for word in cur_sent:
cur_word_dict[word] = 1
for word in cur_word_dict:
word_doc_freq[word] += 1
idf = {}
for word in word_doc_freq:
idf[word] = math.log(len(tokenized_sentence_list) * 1. / word_doc_freq[word])
# Compute TF-IDF
tf_idf = {}
for cur_sent in tokenized_sentence_list:
for word in cur_sent:
if word not in tf_idf:
tf_idf[word] = 0
tf_idf[word] += 1. / len(cur_sent) * idf[word]
return {
"idf": idf,
"tf_idf": tf_idf,
}
class MixEfficientRandomGen(EfficientRandomGen):
"""Add word2vec to Random Gen"""
def __init__(self,
w2v,
similar_prob=0.7,
random_prob=0.1,
delete_prob=0.1,
insert_prob=0.1):
super(MixEfficientRandomGen, self).__init__()
self.word2vec_model = w2v
# Insert replace prob
self.insert_prob = insert_prob
# Delete replace prob
self.delete_prob = delete_prob
# Random replace prob
self.random_prob = random_prob
# Similar replace prob
self.similar_prob = similar_prob
def get_similar_token(self, word):
"""Get a Similar replace token."""
if word in self.word2vec_model.key_to_index:
target_candidate = self.word2vec_model.similar_by_word(word, topn=3)
target_words = [w for w, p in target_candidate if w]
if len(target_words) > 1:
word = np.random.choice(target_words, size=1).tolist()[0]
return word
return word
def get_replace_token(self, word):
"""Get a replace token."""
r_prob = np.random.rand()
# Similar choose prob
if r_prob < self.similar_prob:
word = self.get_similar_token(word)
elif r_prob - self.similar_prob < self.random_prob:
word = self.get_random_token()
elif r_prob - self.similar_prob - self.random_prob < self.delete_prob:
word = self.get_delete_token()
else:
word = self.get_insert_token(word)
return word
class TfIdfWordReplace(MixEfficientRandomGen):
"""TF-IDF Based Word Replacement."""
def __init__(self,
w2v,
token_prob,
data_idf,
similar_prob=0.7,
random_prob=0.1,
delete_prob=0.1,
insert_prob=0.1):
super(TfIdfWordReplace, self).__init__(w2v,
similar_prob=similar_prob,
random_prob=random_prob,
delete_prob=delete_prob,
insert_prob=insert_prob)
self.token_prob = token_prob
self.idf = data_idf["idf"]
self.tf_idf = data_idf["tf_idf"]
if not self.idf:
logger.error('sentence_list must set in tfidf word replace.')
raise ValueError("idf is None.")
data_idf = copy.deepcopy(data_idf)
tf_idf_items = data_idf["tf_idf"].items()
tf_idf_items = sorted(tf_idf_items, key=lambda item: -item[1])
self.tf_idf_keys = []
self.tf_idf_values = []
for key, value in tf_idf_items:
self.tf_idf_keys += [key]
self.tf_idf_values += [value]
self.normalized_tf_idf = np.array(self.tf_idf_values)
self.normalized_tf_idf = max(self.normalized_tf_idf) - self.normalized_tf_idf
self.normalized_tf_idf = self.normalized_tf_idf / self.normalized_tf_idf.sum()
self.reset_token_list()
self.reset_random_prob()
def get_replace_prob(self, all_words):
"""Compute the probability of replacing tokens in a sentence."""
cur_tf_idf = collections.defaultdict(int)
for word in all_words:
cur_tf_idf[word] += 1. / len(all_words) * self.idf[word]
replace_prob = []
for word in all_words:
replace_prob += [cur_tf_idf[word]]
replace_prob = np.array(replace_prob)
replace_prob = np.max(replace_prob) - replace_prob
if replace_prob.sum() != 0.0:
replace_prob = replace_prob / replace_prob.sum() * self.token_prob * len(all_words)
return replace_prob
def __call__(self, tokens):
"""
Replace tokens with tfidf data.
:param tokens: list
:return: tokens, details
tokens, list
details, list eg: [(old_token, new_token, start_idx, end_idx), ...]
"""
new_tokens = []
details = []
if len(tokens) >= min_token_num:
replace_prob = self.get_replace_prob(tokens)
new_tokens, details = self.replace_tokens(tokens, replace_prob[:len(tokens)])
return new_tokens, details
def replace_tokens(self, tokens, replace_prob):
"""Replace tokens with tfidf similar word"""
details = []
idx = 0
for i in range(len(tokens)):
old_token = tokens[i]
if self.get_random_prob() < replace_prob[i]:
# Use Tfidf find similar token
tokens[i] = self.get_similar_token(tokens[i])
details.append((old_token, tokens[i], idx, idx + len(tokens[i])))
idx += len(tokens[i])
return tokens, details
def reset_token_list(self):
cache_len = len(self.tf_idf_keys)
token_list_idx = np.random.choice(
cache_len, (cache_len,), p=self.normalized_tf_idf)
self.token_list = []
for idx in token_list_idx:
self.token_list += [self.tf_idf_keys[idx]]
self.token_ptr = len(self.token_list) - 1
logger.debug("sampled token list: {}".format(self.token_list))
class MixWordReplace(TfIdfWordReplace):
"""Multi Method Based Word Replacement."""
def __init__(self,
w2v,
token_prob,
data_idf,
similar_prob=0.7,
random_prob=0.1,
delete_prob=0.1,
insert_prob=0.1):
super(MixWordReplace, self).__init__(w2v,
token_prob,
data_idf,
similar_prob=similar_prob,
random_prob=random_prob,
delete_prob=delete_prob,
insert_prob=insert_prob)
def replace_tokens(self, word_list, replace_prob):
"""Replace tokens with mix method."""
details = []
idx = 0
for i in range(len(word_list)):
old_token = word_list[i]
if self.get_random_prob() < replace_prob[i]:
word_list[i] = self.get_replace_token(word_list[i])
details.append((old_token, word_list[i], idx, idx + len(word_list[i])))
idx += len(word_list[i])
return word_list, details
| 35.469613
| 95
| 0.576947
| 1,590
| 12,840
| 4.395597
| 0.108805
| 0.025039
| 0.026041
| 0.020604
| 0.560166
| 0.482616
| 0.465446
| 0.453141
| 0.422664
| 0.399771
| 0
| 0.008406
| 0.323676
| 12,840
| 361
| 96
| 35.567867
| 0.796407
| 0.14891
| 0
| 0.514056
| 0
| 0
| 0.009731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11245
| false
| 0
| 0.02008
| 0.012048
| 0.232932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e301076532db001f5790d94584e7f5e4d2165387
| 1,198
|
py
|
Python
|
ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py
|
AmberCrafter/docker-compose_libRadtran
|
0182f991db6a13e0cacb3bf9f43809e6850593e4
|
[
"MIT"
] | null | null | null |
ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py
|
AmberCrafter/docker-compose_libRadtran
|
0182f991db6a13e0cacb3bf9f43809e6850593e4
|
[
"MIT"
] | null | null | null |
ubuntu20/projects/libRadtran-2.0.4/examples/GUI/spectrum_GOME/spectrum_GOME_plot.py
|
AmberCrafter/docker-compose_libRadtran
|
0182f991db6a13e0cacb3bf9f43809e6850593e4
|
[
"MIT"
] | null | null | null |
from matplotlib import use
use('WXAgg')
import pylab as plt
import numpy as np
plt.figure(figsize=(8,5))
ax = plt.subplot(111)
fil = './spectrum_GOME.out'
data = np.loadtxt(fil)
y = data[:,1]
x = data[:,0]
pl_list = []
pl, = ax.plot(x,y,'r')
pl_list.append(pl)
y = 10*data[:,3]
pl, = ax.plot(x,y,'b')
pl_list.append(pl)
#plt.xlim([425,450])
#plt.ylim([0,2000])
plt.ylabel(r"Radiation (photons/(s cm$^2$ nm))", fontsize = 12)
plt.xlabel(r"Wavelength (nm)", fontsize = 12)
from matplotlib.legend import Legend
l0 = Legend(ax, pl_list[0:1], ('Solar irradiance',), loc=(0.1,0.85))
#ltext = l0.get_texts() # all the text.Text instance in the legend
#plt.setp(ltext, fontsize='small', linespacing=0) # the legend text fontsize
l0.draw_frame(False) # don't draw the legend frame
ax.add_artist(l0)
l0 = Legend(ax, pl_list[1:2], ('Earth shine (multiplied by 10)',), loc=(0.1,0.75))
#ltext = l0.get_texts() # all the text.Text instance in the legend
#plt.setp(ltext, fontsize='small', linespacing=0) # the legend text fontsize
l0.draw_frame(False) # don't draw the legend frame
ax.add_artist(l0)
#plt.show()
plt.savefig('spectrum_GOME.png')
| 26.622222
| 83
| 0.656093
| 206
| 1,198
| 3.752427
| 0.402913
| 0.069858
| 0.020699
| 0.023286
| 0.47348
| 0.40621
| 0.40621
| 0.40621
| 0.40621
| 0.40621
| 0
| 0.052156
| 0.16778
| 1,198
| 44
| 84
| 27.227273
| 0.72317
| 0.324708
| 0
| 0.230769
| 0
| 0
| 0.172111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e302119a1e26db2aa7e3d9148ce46b0ec243f446
| 24,156
|
py
|
Python
|
condensation-forum/application.py
|
BitFracture/condensation
|
a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f
|
[
"BSD-2-Clause"
] | null | null | null |
condensation-forum/application.py
|
BitFracture/condensation
|
a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f
|
[
"BSD-2-Clause"
] | 59
|
2018-03-02T03:08:22.000Z
|
2018-03-11T01:43:02.000Z
|
condensation-forum/application.py
|
BitFracture/condensation
|
a68a9bbae7a7d35e1542242a4f1588ce3abf9d3f
|
[
"BSD-2-Clause"
] | null | null | null |
"""
An AWS Python3+Flask web app.
"""
from flask import Flask, redirect, url_for, request, session, flash, get_flashed_messages, render_template, escape
from flask_oauthlib.client import OAuth
import boto3,botocore
import jinja2
from boto3.dynamodb.conditions import Key, Attr
import urllib.request
import json
import cgi
import time
import random
import sys
from configLoader import ConfigLoader
from googleOAuthManager import GoogleOAuthManager
from data.session import SessionManager
from data import query, schema
from forms import CreateThreadForm, CreateCommentForm
import inspect
from werkzeug.utils import secure_filename
import uuid
import os
###############################################################################
#FLASK CONFIG
###############################################################################
# This is the EB application, calling directly into Flask
application = Flask(__name__)
# Loads config from file or environment variable
config = ConfigLoader("config.local.json")
# Enable encrypted session, required for OAuth to stick
application.secret_key = config.get("sessionSecret")
#used for form validation
application.config["SECRET_KEY"]=config.get("sessionSecret")
# Set up service handles
botoSession = boto3.Session(
aws_access_key_id = config.get("accessKey"),
aws_secret_access_key = config.get("secretKey"),
aws_session_token=None,
region_name = config.get("region"),
botocore_session=None,
profile_name=None)
dynamodb = botoSession.resource('dynamodb')
s3 = botoSession.resource('s3')
authCacheTable = dynamodb.Table('person-attribute-table')
# Example: bucket = s3.Bucket('elasticbeanstalk-us-west-2-3453535353')
# OAuth setup
authManager = GoogleOAuthManager(
flaskApp = application,
clientId = config.get("oauthClientId"),
clientSecret = config.get("oauthClientSecret"))
#This is the Upload requirement section
bucket = s3.Bucket('condensation-forum')
bucket_name = 'condensation-forum'
s3client = boto3.client(
"s3",
aws_access_key_id=config.get("accessKey"),
aws_secret_access_key=config.get("secretKey")
)
#database connection
dataSessionMgr = SessionManager(
config.get("dbUser"),
config.get("dbPassword"),
config.get("dbEndpoint"))
# Load up Jinja2 templates
templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")
templateEnv = jinja2.Environment(loader=templateLoader)
#pass in library functions to jinja, isn't python terrifying?
#we want to zip collections in view
templateEnv.globals.update(zip=zip)
#we also want to view our flashed messages
templateEnv.globals.update(get_flashed_messages=get_flashed_messages)
#generate urls for buttons in the view
templateEnv.globals.update(url_for=url_for)
bodyTemplate = templateEnv.get_template("body.html")
bodySimpleTemplate = templateEnv.get_template("body-simple.html")
homeTemplate = templateEnv.get_template("home.html")
threadTemplate = templateEnv.get_template("thread.html")
editThreadTemplate = templateEnv.get_template("edit-thread.html")
editCommentTemplate = templateEnv.get_template("edit-comment.html")
fileManagerTemplate = templateEnv.get_template("file-manager.html")
fileListTemplate = templateEnv.get_template("file-list.html")
sharedJavascript = templateEnv.get_template("shared.js")
###############################################################################
#END CONFIG
###############################################################################
@application.route('/', methods=['GET'])
@authManager.enableAuthentication
def indexGetHandler():
"""
Returns the template "home" wrapped by "body" served as HTML
"""
threads = None
#grab threads ordered by time, and zip them with some usernames
with dataSessionMgr.session_scope() as dbSession:
user = authManager.getUserData()
if not user:
flash("Welcome, please <a href='/login'>log in or create an account</a>.")
threads = query.getThreadsByCommentTime(dbSession)
urls = [url_for("threadGetHandler", tid=thread.id) for thread in threads]
usernames = [thread.user.name for thread in threads]
user = authManager.getUserData()
threads = query.extractOutput(threads)
homeRendered = homeTemplate.render(
threads=threads,
urls=urls,
usernames=usernames)
user = authManager.getUserData()
return bodyTemplate.render(
title="Home",
body=homeRendered,
user=user,
location=request.url)
@application.route("/new-thread", methods=["GET", "POST"])
@authManager.requireAuthentication
def newThreadHandler():
""" Renders the thread creation screen, creates thread if all data is validated """
#do not allow unauthenticated users to submit
form = CreateThreadForm()
user = authManager.getUserData()
if form.validate_on_submit():
tid = None
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
user = query.getUser(dbSession, user["id"])
thread = schema.Thread(
user=user,
heading=escape(form.heading.data),
body=escape(form.body.data),
attachments=files)
#commits current transactions so we can grab the generated id
dbSession.flush()
tid = thread.id
flash("Your thread was created successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while creating a thread. Please try again later.")
return redirect(url_for("indexGetHandler"))
#error handling is done in the html forms
user = authManager.getUserData()
#File attachment list
fileList = [];
rendered = editThreadTemplate.render(form=form, fileListAsString=json.dumps(fileList))
return bodyTemplate.render(
title="Create Thread",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/shared.js", methods=["GET"])
def getSharedJs():
return sharedJavascript.render();
@application.route("/edit-thread?tid=<int:tid>", methods=["GET", "POST"])
@authManager.requireAuthentication
def editThreadHandler(tid):
"""Renders an existing threaed to be modified """
#do not allow unauthenticated users to submit
form = CreateThreadForm()
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
if user["id"] != thread.user_id:
abort(403)
if form.validate_on_submit():
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
print (fileEntries, file=sys.stderr)
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
thread = query.getThreadById(dbSession, tid)
if user["id"] != thread.user_id:
abort(403)
thread.attachments = files
thread.heading = escape(form.heading.data)
thread.body = escape(form.body.data)
flash("Your thread was updated successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while updating a thread. Please try again later.")
return redirect(url_for("indexGetHandler"))
#populate with old data from forms
fileList = [];
try:
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
form.heading.data = thread.heading
form.body.data = thread.body
for file in thread.attachments:
fileList.append({
'id': file.id,
'name': file.name
})
except:
flash("loading failed")
#error handling is done in the html forms
rendered = editThreadTemplate.render(form=form, edit = True, fileListAsString=json.dumps(fileList))
return bodyTemplate.render(
title="Edit Thread",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/delete-thread?tid=<int:tid>", methods=["GET"])
@authManager.requireAuthentication
def deleteThreadHandler(tid):
"""Deletes a thread."""
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
try:
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
if not thread:
abort(404)
if user["id"] != thread.user_id:
abort(403)
dbSession.delete(thread)
flash("Your thread was deleted successfully.")
except:
flash("An unexpected error occurred while deleting a thread. Please try again later.")
return redirect(url_for("indexGetHandler"))
@application.route("/new-comment?<int:tid>", methods=["GET", "POST"])
@authManager.requireAuthentication
def newCommentHandler(tid):
"""Renders the thread creation screen, creates thread if all data is validated"""
#do not allow unauthenticated users to submit
form = CreateCommentForm()
user = authManager.getUserData()
print(user, file=sys.stderr)
if not user:
abort(403)
if form.validate_on_submit():
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
user = query.getUser(dbSession, user["id"])
thread = query.getThreadById(dbSession, tid)
thread.replies.append(schema.Comment(user=user, body=escape(form.body.data), attachments=files))
flash("Your comment was created successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while creating a comment. Please try again later.")
return redirect(url_for("indexGetHandler"))
fileList = [];
rendered = editCommentTemplate.render(form=form, fileListAsString=json.dumps(fileList))
user = authManager.getUserData()
return bodyTemplate.render(
title="Reply",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/edit-comment?cid=<int:cid>", methods=["GET", "POST"])
@authManager.requireAuthentication
def editCommentHandler(cid):
"""Renders an existing comment to be modified """
#do not allow unauthenticated users to submit
form = CreateCommentForm()
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
with dataSessionMgr.session_scope() as dbSession:
comment = query.getCommentById(dbSession, cid)
if user["id"] != comment.user_id:
abort(403)
if form.validate_on_submit():
try:
with dataSessionMgr.session_scope() as dbSession:
# Collect a list of all file entities
fileEntries = json.loads(request.form["fileIds"])
files = []
for fileEntry in fileEntries:
files.append(query.getFileById(dbSession, fileEntry['id']))
comment = query.getCommentById(dbSession, cid)
tid = comment.thread_id
if user["id"] != comment.user_id:
abort(403)
comment.body = escape(form.body.data)
comment.attachments = files
flash("Your comment was updated successfully.")
#redirect to the created thread view
return redirect(url_for("threadGetHandler", tid=tid))
except:
flash("An unexpected error occurred while updating a comment. Please try again later.")
return redirect(url_for("indexGetHandler"))
#populate with old data from forms
fileList = [];
try:
with dataSessionMgr.session_scope() as dbSession:
comment = query.getCommentById(dbSession, cid)
form.body.data = comment.body
for file in comment.attachments:
fileList.append({
'id': file.id,
'name': file.name
})
except:
flash("Loading comment data failed, please try again.")
#error handling is done in the html forms
rendered = editCommentTemplate.render(form=form, edit=True, fileListAsString=json.dumps(fileList))
return bodyTemplate.render(
title="Edit Comment",
body=rendered,
user=user,
location=url_for('indexGetHandler', _external=True))
@application.route("/delete-comment?cid=<int:cid>", methods=["GET"])
@authManager.requireAuthentication
def deleteCommentHandler(cid):
"""Deletes a comment."""
#verify security no error handling because if this fails we have problems, we should fail too
user = authManager.getUserData()
if not user:
abort(403)
try:
with dataSessionMgr.session_scope() as dbSession:
comment = query.getCommentById(dbSession, cid)
if not comment:
abort(404)
if user["id"] != comment.user_id:
abort(403)
dbSession.delete(comment)
flash("Your comment was deleted successfully.")
except:
flash("An unexpected error occurred while deleting a comment. Please try again later.")
return redirect(url_for("indexGetHandler"))
@application.route("/thread/<int:tid>)", methods=["GET"])
@authManager.enableAuthentication
def threadGetHandler(tid):
"""Renders a thread, attachments, and all relevant comments"""
#grab the thread with attachments
thread = None
with dataSessionMgr.session_scope() as dbSession:
thread = query.getThreadById(dbSession, tid)
if thread is None:
flash("The thread you selected does not exist.")
return redirect(url_for("indexGetHandler"));
thread_attachments = query.extractOutput(thread.attachments)
user = authManager.getUserData()
uid = user["id"] if user else 0
op = query.extractOutput(thread.user)
op_permission = thread.user_id == uid
replyUrl = url_for("newCommentHandler", tid=thread.id)
post_attachments = query.extractOutput(thread.attachments)
comments = query.getCommentsByThread(dbSession, thread.id)
comment_attachments =[]
comment_users = []
edit_permissions = []
for comment in comments:
comment_attachments.append(query.extractOutput(comment.attachments))
comment_users.append(query.extractOutput(comment.user))
edit_permissions.append(uid == comment.user_id)
comments = query.extractOutput(comments)
thread = query.extractOutput(thread)
threadRendered = threadTemplate.render(
thread=thread,
thread_attachments=thread_attachments,
op=op,
op_permission=op_permission,
comments=comments,
comment_attachments=comment_attachments,
comment_users=comment_users,
edit_permissions=edit_permissions,
replyUrl=replyUrl)
user = authManager.getUserData();
return bodyTemplate.render(
title="Thread",
body=threadRendered,
user=user,
location=request.url)
@authManager.loginCallback
def loginCallback():
"""
This is invoked when a user logs in, before any other logic.
"""
user = authManager.getUserData()
if user:
try:
with dataSessionMgr.session_scope() as dbSession:
#add a new user if not in the database
if not query.getUser(dbSession, user["id"]):
dbSession.add(schema.User(
id=user["id"],
name=user["name"],
profile_picture=user["picture"]))
flash("Your Google account has been linked. Thank you!")
except:
flash("An unexpected error occurred while linking your account. Please try again later.")
#if this fails logout and redirect home
return redirect(authManager.LOGOUT_ROUTE)
@application.route("/delete-user", methods=["GET"])
@authManager.requireAuthentication
def deleteUserHandler():
"""Deletes a user and redirects them home"""
user = authManager.getUserData()
if user:
try:
with dataSessionMgr.session_scope() as dbSession:
account = query.getUser(dbSession, user["id"])
if account:
dbSession.delete(account)
flash("Your forum account has been deleted and unlinked from your Google account.")
except:
flash("An unexpected error occurred while deleting your account. Please try again later.")
return redirect(authManager.LOGOUT_ROUTE)
@authManager.logoutCallback
def logoutCallback():
"""
This is invoked when a user logs out, immediately before user context is destroyed.
"""
user = authManager.getUserData()
@application.route('/file-manager', methods=['GET'])
@authManager.enableAuthentication
def fileManagerGetHandler():
"""renders the users file manager screen"""
user = authManager.getUserData();
if not user:
return 401;
id = user['id']
fileManagerRendered = fileManagerTemplate.render()
return bodyTemplate.render(
title="File Manager",
body=fileManagerRendered,
user=user,
location=request.url)
@application.route('/file-delete', methods=['POST'])
@authManager.requireAuthentication
def fileListDeleteHander():
"""Deletes a list of files"""
user = authManager.getUserData()
fid = int(request.form['file'])
id = user['id']
# Find the file in S3
try:
with dataSessionMgr.session_scope() as dbSession:
file1 = query.getFileById(dbSession,fid)
file1 = query.extractOutput(file1)
except Exception as e:
flash("An unexpected error occurred while finding the file in our cloud storage. "\
+ "Please try again later.<br/><br/>", e);
return redirect(url_for("fileListGetHandler"))
# Delete the file from S3
key = file1['cloud_key']
try:
s3client.delete_object(Bucket=bucket_name,Key=key)
except Exception as e:
flash("An unexpected error occurred while removing the file from our cloud storage. "\
+ "Please try again later.<br/><br/>", e);
return redirect(url_for("fileListGetHandler"))
# Delete the file by fileID in RDS
try:
with dataSessionMgr.session_scope() as dbSession:
file = query.getFileById(dbSession,fid)
if file:
dbSession.delete(file)
except Exception as e:
flash("An unexpected error occurred while removing this file from our database. "\
+ "Please try again later.<br/><br/>", e);
return redirect(url_for("fileListGetHandler"))
return redirect(url_for("fileListGetHandler"))
@application.route('/file-list', methods=['GET'])
@authManager.requireAuthentication
def fileListGetHandler():
"""Gives the list of files associated with current user"""
user = authManager.getUserData()
id = user['id']
#Get the user's profile from the DB and zip it first
with dataSessionMgr.session_scope() as dbSession:
files = query.getFilesByUser(dbSession,id)
files = query.extractOutput(files)
if not files:
files = [];
fileManagerRendered = fileListTemplate.render(files=files)
return bodySimpleTemplate.render(
title="File Manager",
body=fileManagerRendered)
@application.route('/file-list', methods=['POST'])
@authManager.requireAuthentication
def fileListPostHandler():
"""Uploads a list of files to s3 and the dv"""
user = authManager.getUserData()
# Get the user session and file to upload
id = user['id']
file = request.files['file']
# If user does not select file, browser also submit a empty part without filename
if not file or file.filename.strip() == '':
flash('You must select a file in order to upload one.')
return redirect(request.url)
# Determine shortened file name (secure)
filename = secure_filename(file.filename.strip())
while (len(filename) > 50):
cutString = len(filename) % 50
filename = filename[cutString:len(filename)]
# Determine the S3 key
try:
myUuid = uuid.uuid4().hex
fn, fileExtension = os.path.splitext(filename)
key = id + "/" + myUuid + fileExtension.lower()
# If the file already exists, we need to warn and abort
try:
with dataSessionMgr.session_scope() as dbSession:
checkFile = query.getFileByName(dbSession,id,filename)
checkFile = query.extractOutput(checkFile)
except Exception as e:
flash("We had an issue connecting to our storage, please try again", e);
return e
if checkFile is not None:
flash("That file already exists. Please delete it first and then re-upload. " \
+ "This will <b>remove</b> any attachments you have made to this file.")
return redirect(request.url)
# Since the file does not exist, we will upload it now
s3client.upload_fileobj(file, bucket_name, key, ExtraArgs={"ACL": "public-read", "ContentType": file.content_type})
url = "https://s3-us-west-2.amazonaws.com/condensation-forum/" + key
try:
with dataSessionMgr.session_scope() as dbSession:
user = query.getUser(dbSession, id)
file = schema.File(url=url, cloud_key=key, name=filename)
user.uploads.append(file)
except:
flash("We had an issue connecting to storage, please try again.")
return redirect(request.url)
except Exception:
flash("An unexpected error occurred while uploading your file. Things to try: "\
+ "<br/> - Rename the file to something shorter"\
+ "<br/> - Make sure the file size is under 1 megabyte"\
+ "<br/> - Make sure there are no special characters in the file name<br/><br/>");
return redirect(request.url)
# Redirect to end the POST handling the redirect can be to the same route or somewhere else
return redirect(request.url)
# Run Flask app now
if __name__ == "__main__":
# Enable debug output, disable in prod
application.debug = True
application.run()
| 36.711246
| 123
| 0.638475
| 2,611
| 24,156
| 5.851781
| 0.173497
| 0.009817
| 0.034034
| 0.037306
| 0.512992
| 0.441259
| 0.402775
| 0.352903
| 0.329799
| 0.320113
| 0
| 0.004822
| 0.253105
| 24,156
| 657
| 124
| 36.767123
| 0.842035
| 0.130402
| 0
| 0.465665
| 0
| 0
| 0.158033
| 0.007505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034335
| false
| 0.002146
| 0.042918
| 0.002146
| 0.148069
| 0.004292
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e3073fdd2f59dca010998232729affa0626a74d8
| 3,133
|
py
|
Python
|
core/scheduler/at.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2016-10-08T09:01:05.000Z
|
2016-10-08T09:01:05.000Z
|
core/scheduler/at.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2019-09-24T09:56:52.000Z
|
2019-09-24T09:56:52.000Z
|
core/scheduler/at.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" apscheduler. """
import subprocess
from apscheduler.scheduler import Scheduler
from apscheduler.jobstores.shelve_store import ShelveJobStore
from datetime import date, datetime, timedelta
import os
import shelve
import zmq
from core.config.settings import logger
def job(command):
#""" docstring for job. """
subprocess.Popen(command)
class ScheduleDaemon(object):
response = None
"""
scheduler - at daemon.
one of daemons
"""
def __init__(self, name="scheduler-at"):
"""docstring for __init__"""
self.context = zmq.Context()
self.name = name
self.sock = self.context.socket(zmq.REP)
self.sock.bind('ipc:///tmp/smarty-%s' % name)
def add_job(self, command, hour, minute, sec=0):
logger.info("2. scheduler adding job command: %s at %s:%s:%s" % (
command, hour, minute, sec
))
sched = Scheduler(standalone=True)
#make a db file
shelve.open(
os.path.join(
os.path.dirname(__file__),
'example.db'
)
)
sched.add_jobstore(ShelveJobStore('example.db'), 'shelve')
exec_time = datetime(
date.today().year,
date.today().month,
date.today().day,
int(hour),
int(minute),
int(sec)
)
#test
#exec_time = datetime.now() + timedelta(seconds=5)
sched.add_date_job(
job,
exec_time,
name='alarm',
jobstore='shelve',
args=[command]
)
sched.start()
def start(self):
""" start """
logger.info('daemon %s started successfully' % (self.name))
while True:
self.msg = self.sock.recv_json()
logger.info('daemon %s received %s' % (self.name, self.msg))
self.cmd = self.msg.get('cmd', None)
if self.cmd == 'terminate':
self.response['text'] = 'terminated'
self.sock.send_json(self.response)
self.sock.close()
self.context.term()
break
if self.cmd:
response = self.process_command(self.cmd)
logger.info('daemon responded with %s' % response)
exit()
def process_command(self, cmd):
"""docstring for process"""
if cmd == 'add_job':
err = 'uhm, I did not understand.'
response = {'text': ';-)'}
command = self.msg.pop('command', None)
hour = self.msg.pop('hour', None)
minute = self.msg.pop('minute', None)
sec = self.msg.pop('sec', None)
self.sock.send_json({'text': 'job added'})
try:
response = self.add_job(command, hour, minute, sec)
except (KeyboardInterrupt, SystemExit) as e:
logger.exception(e)
response = {'text': 'wrong params passed'}
return response
daemon = ScheduleDaemon()
daemon.start()
| 25.892562
| 73
| 0.531759
| 337
| 3,133
| 4.863501
| 0.379822
| 0.029896
| 0.024405
| 0.036608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001933
| 0.339611
| 3,133
| 120
| 74
| 26.108333
| 0.790237
| 0.064475
| 0
| 0
| 0
| 0
| 0.110056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064103
| false
| 0.012821
| 0.102564
| 0
| 0.205128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e30fa4b4018e2cb629164838090fb39449877a74
| 2,551
|
py
|
Python
|
advertorch/tests/test_utilities.py
|
sleepstagingrest/rest
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 18
|
2020-02-03T07:14:40.000Z
|
2021-12-20T18:45:43.000Z
|
advertorch/tests/test_utilities.py
|
sleepstagingrest/rest
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 11
|
2020-01-28T23:16:25.000Z
|
2022-02-10T01:04:56.000Z
|
advertorch/tests/test_utilities.py
|
sleepstagingrest/REST
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 2
|
2020-08-20T08:15:09.000Z
|
2021-02-23T07:30:40.000Z
|
# Copyright (c) 2018-present, Royal Bank of Canada.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import numpy as np
import torch
import torchvision.transforms.functional as F
from advertorch.utils import torch_allclose
from advertorch.utils import CIFAR10_MEAN
from advertorch.utils import CIFAR10_STD
from advertorch.utils import MNIST_MEAN
from advertorch.utils import MNIST_STD
from advertorch.utils import NormalizeByChannelMeanStd
from advertorch.utils import PerImageStandardize
from advertorch_examples.utils import bchw2bhwc
from advertorch_examples.utils import bhwc2bchw
def test_mnist_normalize():
# MNIST
tensor = torch.rand((16, 1, 28, 28))
normalize = NormalizeByChannelMeanStd(MNIST_MEAN, MNIST_STD)
assert torch_allclose(
torch.stack([F.normalize(t, MNIST_MEAN, MNIST_STD)
for t in tensor.clone()]),
normalize(tensor))
def test_cifar10_normalize():
# CIFAR10
tensor = torch.rand((16, 3, 32, 32))
normalize = NormalizeByChannelMeanStd(CIFAR10_MEAN, CIFAR10_STD)
assert torch_allclose(
torch.stack([F.normalize(t, CIFAR10_MEAN, CIFAR10_STD)
for t in tensor.clone()]),
normalize(tensor))
def test_grad_through_normalize():
tensor = torch.rand((2, 1, 28, 28))
tensor.requires_grad_()
mean = torch.tensor((0.,))
std = torch.tensor((1.,))
normalize = NormalizeByChannelMeanStd(mean, std)
loss = (normalize(tensor) ** 2).sum()
loss.backward()
assert torch_allclose(2 * tensor, tensor.grad)
def _run_tf_per_image_standardization(imgs):
import tensorflow as tf
import tensorflow.image
imgs = bchw2bhwc(imgs)
placeholder = tf.placeholder(tf.float32, shape=imgs.shape)
var_scaled = tf.map_fn(
lambda img: tf.image.per_image_standardization(img), placeholder)
with tf.Session() as sess:
tf_scaled = sess.run(var_scaled, feed_dict={placeholder: imgs})
return bhwc2bchw(tf_scaled)
def test_per_image_standardization():
imgs = np.random.normal(
scale=1. / (3072 ** 0.5), size=(10, 3, 32, 32)).astype(np.float32)
per_image_standardize = PerImageStandardize()
pt_scaled = per_image_standardize(torch.tensor(imgs)).numpy()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tf_scaled = _run_tf_per_image_standardization(imgs)
assert np.abs(pt_scaled - tf_scaled).max() < 0.001
| 30.73494
| 74
| 0.717758
| 331
| 2,551
| 5.359517
| 0.329305
| 0.071026
| 0.074972
| 0.098647
| 0.260992
| 0.131905
| 0.095829
| 0.095829
| 0.095829
| 0.047351
| 0
| 0.033686
| 0.185417
| 2,551
| 82
| 75
| 31.109756
| 0.820019
| 0.078401
| 0
| 0.109091
| 0
| 0
| 0.002562
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.381818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e30ff60533abef30a592ebe83ada7b1e9f61003f
| 5,595
|
py
|
Python
|
RV/portfolio/portfolio/hindex.py
|
rmomizo/portfolio_bot
|
b7854c4b5c9f32e9631389bb2238b5bb30d54c8e
|
[
"MIT"
] | null | null | null |
RV/portfolio/portfolio/hindex.py
|
rmomizo/portfolio_bot
|
b7854c4b5c9f32e9631389bb2238b5bb30d54c8e
|
[
"MIT"
] | null | null | null |
RV/portfolio/portfolio/hindex.py
|
rmomizo/portfolio_bot
|
b7854c4b5c9f32e9631389bb2238b5bb30d54c8e
|
[
"MIT"
] | null | null | null |
from __future__ import division
import itertools
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
import random
from random import shuffle
from collections import Counter
def flatten_list(somelist):
if any(isinstance(el, list) for el in somelist) == False:
return somelist
flat_list = list(itertools.chain(*somelist))
return flat_list
def term_frequency(somelist):
"""Returns the term frequency of each unique token in the term list"""
somelist = flatten_list(somelist)
term_freqs = dict(Counter(somelist))
return term_freqs
def tf_ranks(somelist):
term_freqs = term_frequency(somelist)
#sort term frequencies from largest to smallest
freqs = list(set([v for (k,v) in term_freqs.items()]))
#add ranks to sorted term frequencies, creating tuple (term_freqs, rank)
i = 1
rfreqs = []
for item in sorted(freqs, reverse=True):
rfreqs.append((item, i))
i = i + 1
#create dict of keys based on terms
term_ranks ={}
for k, v in term_freqs.items():
term_ranks.setdefault(k, [])
#add (term_freq, rank) to keys
for k, v in term_freqs.items():
for item in rfreqs:
if v == item[0]:
term_ranks[k] = item
return term_ranks
def find_h_index(somelist):
tranks = tf_ranks(somelist)
#h_index = [(key, (val2, 1/(val1-val2)) for (key, (val1, val2)) in tranks.iteritems()]
#plot h_points
values = []
for key, (val1, val2) in tranks.iteritems():
if val1-val2 == 0:
h_point = key, (val1, val2)
#return 'h-point is: ' + str(h_point)
else:
values.append((val2, 1/(val1-val2)))
#[(val2, 1/(val1-val2)) for key, (val1, val2) in tranks.iteritems()]
sorted_values = sorted(values)
xvalues = [val1 for (val1, val2) in sorted_values]
yvalues = [val2 for (val1, val2) in sorted_values]
# plt.scatter(xvalues, yvalues)
# plt.title('h point')
# plt.ylabel('1/ranks - frequency')
# plt.xlabel('ranks')
# plt.show()
d = zip(xvalues, yvalues)
data = [[x,y] for (x,y) in d ]
return data
def find_abmin(somelist):
tranks = tf_ranks(somelist)
subs = []
for key, (val1, val2) in tranks.iteritems():
subs.append((val1-val2))
abmin = min(subs, key=abs)
return abmin
def find_h(somelist):
tranks = tf_ranks(somelist)
abmin = find_abmin(somelist)
for key, (val1, val2) in tranks.iteritems():
if val1-val2 == 0:
h_point = key, (val1, val2)
return h_point
elif val1-val2 ==abmin:
h_point = key, (val1, val2), val1-val2
return h_point
def fast_h(somelist):
h_point = find_h(somelist)
tranks = tf_ranks(somelist)
fast =[]
boundary = h_point[1][1]
for key, (val1, val2) in tranks.iteritems():
if val2 <= boundary:
fast.append((key, (val1, val2)))
return fast
def slow_h(somelist):
h_point = find_h(somelist)
tranks = tf_ranks(somelist)
slow =[]
boundary = h_point[1][1]
for key, (val1, val2) in tranks.iteritems():
if val2 > boundary:
slow.append((key, (val1, val2)))
return slow
def h_tag_nodes(somelist):
"""
Tag tokens in a processed list as either autosemantic(fast) or synsematic(slow).
"""
fast = fast_h(somelist)
fasth = [(word, {'h':'syns'}) for (word, rank) in fast]
slow = slow_h(somelist)
slowh = [(word, {'h':'auto'}) for (word,rank) in slow]
h_tags = fasth + slowh
return h_tags
def extract_fast_h(list_of_cycle_length_freqs, cycles):
"""
This is specifically designed to extract lists from lists by comparing the length
of the nested list to the most frequent cycles lengths found using fast_h method
"""
fh = [key for (key, (val1, val2)) in fast_h(list_of_cycle_length_freqs)]
fast_cycles = [cycle for cycle in cycles if len(cycle) in fh]
return fast_cycles
def extract_slow_h(list_of_cycle_length_freqs, cycles):
"""
This is specifically designed to extract lists from lists by comparing the length
of the nested list to the most frequent cycles lengths found using slow_h method
"""
sh = [key for (key, (val1, val2)) in slow_h(list_of_cycle_length_freqs)]
slow_cycles = [cycle for cycle in cycles if len(cycle) in sh]
return slow_cycles
def h_cycles(cycle_length):
fast = [key for (key, (va1, val2)) in fast_h(cycle_length)]
slow = [key for (key, (val1, val2)) in slow_h(cycle_length)]
h_cycles = []
for cycle in cycle_length:
if cycle in fast:
h_cycles.append((cycle, 'autosemantic'))
elif cycle in slow:
h_cycles.append((cycle, 'synsemantic'))
return h_cycles
def find_a_param(somelist):
h_point = find_h(somelist)
a = len(somelist) / h_point**2
return a
| 32.719298
| 94
| 0.557283
| 704
| 5,595
| 4.284091
| 0.203125
| 0.066313
| 0.054708
| 0.046419
| 0.432361
| 0.38561
| 0.35378
| 0.29244
| 0.276525
| 0.276525
| 0
| 0.019864
| 0.343164
| 5,595
| 170
| 95
| 32.911765
| 0.800816
| 0.174978
| 0
| 0.209091
| 0
| 0
| 0.00727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118182
| false
| 0
| 0.063636
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e3106531f1b9e6f9266ac05f2587a787cfc4e699
| 1,316
|
py
|
Python
|
operators/device_output.py
|
a1exwang/fm-synth
|
fb14aa1dec3798b15a607ac03442decf322bebee
|
[
"MIT"
] | 3
|
2018-01-18T12:25:38.000Z
|
2020-03-19T13:19:31.000Z
|
operators/device_output.py
|
a1exwang/fm-synth
|
fb14aa1dec3798b15a607ac03442decf322bebee
|
[
"MIT"
] | 4
|
2017-04-24T16:36:59.000Z
|
2017-05-11T11:23:44.000Z
|
operators/device_output.py
|
a1exwang/fm-synth
|
fb14aa1dec3798b15a607ac03442decf322bebee
|
[
"MIT"
] | null | null | null |
from PyQt5.QtCore import pyqtSlot
from channels.channel import Channel
from operators.base import OutputOperator
import numpy as np
class DeviceOutput(OutputOperator):
def __init__(self, input_ops, volume=1.0, name=None):
super().__init__(input_ops, name)
self.total_count = 0
self.stream = None
self.volume = volume
self.channel = Channel.get_instance()
self.channel.add_channel(name='MasterVol', slot=self.volume_changed, get_val=lambda: self.volume)
@pyqtSlot(float, name='volume_changed')
def volume_changed(self, vol):
if vol <= 0:
vol = 0
if vol >= 1:
vol = 1
self.volume = vol
def next_buffer(self, input_buffers, n):
if len(input_buffers) == 1:
# mono
# [-1, 1) -> [0, 2**16)
arr = ((np.array(input_buffers[0], dtype='float32') + 1) / 2) * 2**16
arr = np.transpose(np.array([arr, arr]))
else:
# stereo
arr_l = ((np.array(input_buffers[0], dtype='float32') + 1) / 2) * 2 ** 16
arr_r = ((np.array(input_buffers[1], dtype='float32') + 1) / 2) * 2 ** 16
arr = np.transpose(np.array([arr_l, arr_r]))
result = np.array(arr, dtype='int16')
return [result * self.volume]
| 34.631579
| 105
| 0.575988
| 174
| 1,316
| 4.201149
| 0.344828
| 0.057456
| 0.032832
| 0.032832
| 0.194254
| 0.194254
| 0.194254
| 0.194254
| 0.194254
| 0.194254
| 0
| 0.043663
| 0.286474
| 1,316
| 37
| 106
| 35.567568
| 0.734824
| 0.025076
| 0
| 0
| 0
| 0
| 0.038311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.137931
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e31093c826bcdc408129c3db911766a20c8f8973
| 524
|
py
|
Python
|
code/0217-containsDuplicate.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
code/0217-containsDuplicate.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
code/0217-containsDuplicate.py
|
RRRoger/LeetCodeExercise
|
0019a048fcfac9ac9e6f37651b17d01407c92c7d
|
[
"MIT"
] | null | null | null |
class Solution(object):
def isPowerOfTwo(self, n):
"""
:type n: int
:rtype: bool
"""
# 左移
if n == 1:
return True
power = 0
flag = True
while flag:
val = 2 << power
if val == n:
return True
elif val > n:
return False
power += 1
if "__main__" == __name__:
solution = Solution()
n = 1025
res = solution.isPowerOfTwo(n)
print(res)
| 15.878788
| 34
| 0.412214
| 52
| 524
| 4
| 0.557692
| 0.096154
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030189
| 0.494275
| 524
| 33
| 35
| 15.878788
| 0.754717
| 0.055344
| 0
| 0.111111
| 0
| 0
| 0.017241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.277778
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e312d0f86ad81db6700f196a91af6d00bac33137
| 3,870
|
py
|
Python
|
app/discal/cogs/handler.py
|
Shirataki2/DisCalendar
|
cfb5ecad6c65911fbb041cbc585d86588de125f5
|
[
"MIT"
] | 6
|
2020-11-29T08:04:07.000Z
|
2021-05-07T11:05:10.000Z
|
app/discal/cogs/handler.py
|
Shirataki2/DisCalendar
|
cfb5ecad6c65911fbb041cbc585d86588de125f5
|
[
"MIT"
] | 139
|
2020-11-24T23:37:03.000Z
|
2022-03-30T00:18:09.000Z
|
app/discal/cogs/handler.py
|
Shirataki2/DisCalendar
|
cfb5ecad6c65911fbb041cbc585d86588de125f5
|
[
"MIT"
] | 1
|
2021-02-01T15:07:17.000Z
|
2021-02-01T15:07:17.000Z
|
import asyncio
import json
import discord
from discord.ext import commands, tasks
from discal.bot import Bot
from datetime import datetime, timedelta
from discal.logger import get_module_logger
logger = get_module_logger(__name__)
class Handler(commands.Cog):
def __init__(self, bot):
self.bot: Bot = bot
self.postloop.start()
def cog_unload(self):
self.postloop.cancel()
async def post_subtask(self, record):
setting = await self.bot.pool.fetchrow(
(
'SELECT * FROM event_settings WHERE '
'guild_id = $1;'
),
record['guild_id']
)
if setting is None:
return
guild = self.bot.get_guild(int(setting['guild_id']))
channel = guild.get_channel(int(setting['channel_id']))
notifications = [
json.loads(notification)
for notification in record['notifications']
]
notifications.append({'key': -1, 'num': 0, 'type': '分前'})
for notification in notifications:
minutes = int(notification['num'])
if notification['type'] == '時間前':
minutes *= 60
elif notification['type'] == '日前':
minutes *= 24 * 60
elif notification['type'] == '週間前':
minutes *= 7 * 24 * 60
start = record['start_at']
end = record['end_at']
if record['is_all_day']:
start = datetime(
start.year, start.month, start.day,
0, 0, 0, 0
)
end = datetime(
end.year, end.month, end.day,
0, 0, 0, 0
)
now_minus_1 = datetime.now() + timedelta(hours=9, minutes=minutes - 1)
now = datetime.now() + timedelta(hours=9, minutes=minutes)
if start >= now_minus_1 and start < now:
embed = discord.Embed(color=int(record['color'][1:], 16))
embed.title = record['name']
if record['description']:
embed.description = record['description']
if notification['key'] == -1:
embed.set_author(name='以下の予定が開催されます')
else:
prefix = f'{notification["num"]}{notification["type"][:-1]}後に'
embed.set_author(name=f'{prefix}以下の予定が開催されます')
if record['is_all_day']:
if start == end:
v = f'{start.strftime("%Y/%m/%d")}'
else:
v = f'{start.strftime("%Y/%m/%d")} - {end.strftime("%Y/%m/%d")}'
else:
start_date = datetime(start.year, start.month, start.day)
end_date = datetime(end.year, end.month, end.day)
if start_date == end_date:
v = f'{start.strftime("%Y/%m/%d %H:%M")} - {end.strftime("%H:%M")}'
else:
v = f'{start.strftime("%Y/%m/%d %H:%M")} - {end.strftime("%Y/%m/%d %H:%M")}'
embed.add_field(name='日時', value=v, inline=False)
logger.info(f'Send Notification: {record}')
await channel.send(embed=embed)
@tasks.loop(minutes=1)
async def postloop(self):
records = await self.bot.pool.fetch(
(
'SELECT * FROM events WHERE '
'start_at >= $1;'
),
datetime.now()
)
asyncio.gather(*[
self.post_subtask(record)
for record in records
], loop=self.bot.loop)
@postloop.before_loop
async def wait_ready(self):
logger.info('waiting...')
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(Handler(bot))
| 36.168224
| 100
| 0.496382
| 420
| 3,870
| 4.466667
| 0.27381
| 0.026119
| 0.031983
| 0.035181
| 0.200959
| 0.170043
| 0.167377
| 0.046908
| 0.033049
| 0.033049
| 0
| 0.013969
| 0.371059
| 3,870
| 106
| 101
| 36.509434
| 0.756779
| 0
| 0
| 0.104167
| 0
| 0.03125
| 0.14677
| 0.058915
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.072917
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e312d4733d2d6ab5dadd53371794d5b4269ec969
| 2,738
|
py
|
Python
|
nids/enipcip/enip_cpf.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 4
|
2019-12-17T08:59:57.000Z
|
2022-01-09T19:52:27.000Z
|
nids/enipcip/enip_cpf.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 3
|
2020-08-13T16:05:46.000Z
|
2021-10-17T07:49:33.000Z
|
nids/enipcip/enip_cpf.py
|
Cyphysecurity/ICS-SDN-1
|
c04d9e7bb7ad945166e969e071a2f82fb5bd18bf
|
[
"MIT"
] | 4
|
2017-06-14T23:41:50.000Z
|
2021-03-01T18:54:03.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2015 David I. Urbina, david.urbina@utdallas.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Ethernet/IP Common Packet Format Scapy dissector."""
import struct
from scapy import all as scapy_all
from . import utils
class CPF_SequencedAddressItem(scapy_all.Packet):
name = "CPF_SequencedAddressItem"
fields_desc = [
scapy_all.LEIntField("connection_id", 0),
scapy_all.LEIntField("sequence_number", 0),
]
class CPF_AddressDataItem(scapy_all.Packet):
name = "CPF_AddressDataItem"
fields_desc = [
scapy_all.LEShortEnumField('type_id', 0, {
0x0000: "Null Address",
0x00a1: "Connection-based Address",
0x00b1: "Connected Transport Packet",
0x00b2: "Unconnected Message",
0x0100: "ListServices response",
0x8002: 'Sequenced Address Item',
}),
scapy_all.LEShortField("length", None),
]
def extract_padding(self, p):
return p[:self.length], p[self.length:]
def post_build(self, p, pay):
if self.length is None and pay:
l = len(pay)
p = p[:2] + struct.pack("<H", l) + p[4:]
return p + pay
class ENIP_CPF(scapy_all.Packet):
name = "ENIP_CPF"
fields_desc = [
utils.LEShortLenField("count", 2, count_of="items"),
scapy_all.PacketListField("items", [CPF_AddressDataItem('', 0, 0), CPF_AddressDataItem('', 0, 0)],
CPF_AddressDataItem, count_from=lambda p: p.count),
]
def extract_padding(self, p):
return '', p
scapy_all.bind_layers(CPF_AddressDataItem, CPF_SequencedAddressItem, type_id=0x8002)
| 36.506667
| 106
| 0.685172
| 361
| 2,738
| 5.102493
| 0.479224
| 0.043431
| 0.022801
| 0.029316
| 0.085776
| 0.062975
| 0.031488
| 0
| 0
| 0
| 0
| 0.022482
| 0.220234
| 2,738
| 74
| 107
| 37
| 0.840281
| 0.429511
| 0
| 0.128205
| 0
| 0
| 0.151693
| 0.015625
| 0
| 0
| 0.027344
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0.051282
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e314ca5cb9348b5a95152247da6288de4e244796
| 1,103
|
py
|
Python
|
programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py
|
andriidem308/python_practice
|
85a0ebd6ecbecf63eaba170c8279f0a88600237a
|
[
"MIT"
] | 2
|
2020-01-27T11:58:54.000Z
|
2020-03-30T10:54:08.000Z
|
programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py
|
andriidem308/python_practice
|
85a0ebd6ecbecf63eaba170c8279f0a88600237a
|
[
"MIT"
] | null | null | null |
programming_hw_4s/tasks_with_eolymp_tags/t_7_6_(eolimp_5089).py
|
andriidem308/python_practice
|
85a0ebd6ecbecf63eaba170c8279f0a88600237a
|
[
"MIT"
] | null | null | null |
def merge_sort(arr):
if len(arr) > 1:
middle = len(arr) // 2
lefthalf = arr[:middle]
righthalf = arr[middle:]
merge_sort(lefthalf)
merge_sort(righthalf)
i = j = k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
arr[k] = lefthalf[i]
i += 1
else:
arr[k] = righthalf[j]
j += 1
k += 1
while i < len(lefthalf):
arr[k] = lefthalf[i]
i += 1
k += 1
while j < len(righthalf):
arr[k] = righthalf[j]
j += 1
k += 1
def insertion_sort(arr, length):
for i in range(1, length):
item_to_insert = arr[i]
j = i - 1
while j >= 0 and arr[j] > item_to_insert:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = item_to_insert
n = int(input())
words = [''] * n
for i in range(n):
words[i] = input()
# merge_sort(words)
insertion_sort(words, n)
for w in words:
print(w)
| 19.350877
| 55
| 0.44243
| 147
| 1,103
| 3.238095
| 0.217687
| 0.021008
| 0.018908
| 0.071429
| 0.138655
| 0.138655
| 0.07563
| 0.07563
| 0
| 0
| 0
| 0.025397
| 0.42883
| 1,103
| 56
| 56
| 19.696429
| 0.730159
| 0.015413
| 0
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0
| 0
| 0.051282
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e316f4ba8d78958af8ea71861f55f56a0c25786e
| 765
|
py
|
Python
|
Algorithms and Data Structures/sort/qks.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
Algorithms and Data Structures/sort/qks.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
Algorithms and Data Structures/sort/qks.py
|
ioyy900205/PyTorch_mess-around
|
90d255e17158699fd7902f7746b35fa18975112e
|
[
"MIT"
] | null | null | null |
'''
Date: 2021-08-10 17:17:35
LastEditors: Liuliang
LastEditTime: 2021-08-10 18:27:56
Description:
'''
import random
import sys
sys.path.append("..")
from bacic_module.random_int_list import random_int_list
def partition(nums, left, right):
tmp = nums[left]
while left < right:
while left<right and nums[right] >= tmp:
right -= 1
nums[left] = nums[right]
while left<right and nums[left] <= tmp:
left += 1
nums[right] = nums[left]
nums[left] = tmp
return left
def qks(nums, left, right):
if left < right:
mid = partition(nums,left,right)
qks(nums,left,mid-1)
qks(nums,mid+1,right)
c = random_int_list(0,10,10)
print(c)
p = qks(c,0,len(c)-1)
print(c)
| 21.25
| 56
| 0.605229
| 117
| 765
| 3.897436
| 0.350427
| 0.157895
| 0.085526
| 0.096491
| 0.114035
| 0.114035
| 0
| 0
| 0
| 0
| 0
| 0.068662
| 0.257516
| 765
| 35
| 57
| 21.857143
| 0.734155
| 0.122876
| 0
| 0.083333
| 0
| 0
| 0.003021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.125
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e318e94372f3438841131a8e520812b4b488dc1f
| 2,144
|
py
|
Python
|
Core/config/CYCEnv/run_json_CYC_envs.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 17
|
2017-06-27T04:14:42.000Z
|
2022-03-07T03:37:44.000Z
|
Core/config/CYCEnv/run_json_CYC_envs.py
|
geoffroygivry/Cyclops-VFX
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2017-06-14T04:17:51.000Z
|
2018-08-23T20:12:44.000Z
|
Core/config/CYCEnv/run_json_CYC_envs.py
|
geoffroygivry/CyclopsVFX-Unity
|
6ab9ab122b6c3e6200e90d49a0c2bf774e53d985
|
[
"MIT"
] | 2
|
2019-03-18T06:18:33.000Z
|
2019-08-14T21:07:53.000Z
|
import os
import json
def create_json_CYC_envs(root_dir):
DI_ROOT = root_dir
CYC_ROOT = "%s/CyclopsVFX" % DI_ROOT
DATA_FILENAME = os.path.join(CYC_ROOT, "CYC_envs.json")
CYC_HYDRA_PATH = "%s/Hydra" % (CYC_ROOT)
CYC_HYDRA_CACHE = "%s/Hydra/cache" % (CYC_ROOT)
CYC_CORE_PATH = "%s/Core/config/" % (CYC_ROOT)
CYC_NUKE_ENV = "%s/Core/config/NukeEnv" % (CYC_ROOT)
CYC_MAYA_ENV = "%s/Core/config/MayaEnv" % (CYC_ROOT)
CYC_RV_ENV = "%s/Core/config/RVEnv" % (CYC_ROOT)
CYC_MARI_ENV = "%s/Core/config/MariEnv" % (CYC_ROOT)
CYC_3DE_ENV = "%s/Core/config/3DeEnv" % (CYC_ROOT)
CYC_CLARISSE_ENV = "%s/Core/config/ClarisseEnv" % (CYC_ROOT)
CYC_SHOW_ENV = "%s/Core/config/ShowEnv" % (CYC_ROOT)
CYC_POLYPHEMUS_PATH = "%s/Apps/Polyphemus" % (CYC_ROOT)
CYC_STEROPES_PATH = "%s/Apps/Steropes" % (CYC_ROOT)
CYC_ENGINE_NUKE = "%s/Apps/Engines/Nuke" % (CYC_ROOT)
CYC_ICON = "%s/icons" % (CYC_ROOT)
NUKE_PATH = CYC_NUKE_ENV
SHOW_PATH = os.path.join(DI_ROOT, "jobs")
with open(DATA_FILENAME, mode='w') as feedsjson:
CYC_envs = {
"CYC_envs": {
"DI_ROOT": DI_ROOT,
"CYC_ROOT": CYC_ROOT,
"CYC_HYDRA_PATH": CYC_HYDRA_PATH,
"CYC_HYDRA_CACHE": CYC_HYDRA_CACHE,
"CYC_CORE_PATH": CYC_CORE_PATH,
"CYC_NUKE_ENV": CYC_NUKE_ENV,
"CYC_MAYA_ENV": CYC_MAYA_ENV,
"CYC_RV_ENV": CYC_RV_ENV,
"CYC_MARI_ENV": CYC_MARI_ENV,
"CYC_3DE_ENV": CYC_3DE_ENV,
"CYC_CLARISSE_ENV": CYC_CLARISSE_ENV,
"CYC_SHOW_ENV": CYC_SHOW_ENV,
"CYC_POLYPHEMUS_PATH": CYC_POLYPHEMUS_PATH,
"CYC_STEROPES_PATH": CYC_STEROPES_PATH,
"CYC_ENGINE_NUKE": CYC_ENGINE_NUKE,
"CYC_ICON": CYC_ICON,
"NUKE_PATH": NUKE_PATH,
"SHOW_PATH": SHOW_PATH
}
}
json.dump(CYC_envs, feedsjson, indent=4, sort_keys=True)
create_json_CYC_envs("/home/geoff/Dropbox")
| 39.703704
| 65
| 0.58722
| 288
| 2,144
| 3.930556
| 0.201389
| 0.111307
| 0.141343
| 0.086572
| 0.174028
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003287
| 0.290578
| 2,144
| 53
| 66
| 40.45283
| 0.74096
| 0
| 0
| 0
| 0
| 0
| 0.253945
| 0.064562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.042553
| 0
| 0.06383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e31af962393b8a7c27bf698791ef898144c732f5
| 4,143
|
py
|
Python
|
test/unit/api/test_api_safety.py
|
technocreep/FEDOT
|
c11f19d1d231bd9c1d96d6e39d14697a028f6272
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/api/test_api_safety.py
|
technocreep/FEDOT
|
c11f19d1d231bd9c1d96d6e39d14697a028f6272
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/api/test_api_safety.py
|
technocreep/FEDOT
|
c11f19d1d231bd9c1d96d6e39d14697a028f6272
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from fedot.api.api_utils.api_data import ApiDataProcessor
from fedot.api.api_utils.api_data_analyser import DataAnalyser
from fedot.api.main import Fedot
from fedot.core.data.data import InputData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import TaskTypesEnum, Task
from fedot.preprocessing.preprocessing import DataPreprocessor
from test.unit.api.test_main_api import composer_params
def get_data_analyser_with_specific_params(max_size=18, max_cat_cardinality=5):
""" Create a DataAnalyser object with small max dataset size and small max cardinality for categorical features"""
safety_module = DataAnalyser(safe_mode=True)
preprocessor = ApiDataProcessor(Task(TaskTypesEnum.classification))
safety_module.max_size = max_size
safety_module.max_cat_cardinality = max_cat_cardinality
return safety_module, preprocessor
def get_small_cat_data():
""" Generate tabular data with categorical features."""
features = np.array([["a", "qq", 0.5],
["b", "pp", 1],
["c", np.nan, 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3],
["d", "oo", 3]], dtype=object)
target = np.array([0, 0, 0, 0, 1, 1, 1, 1])
input_data = InputData(idx=np.arange(features.shape[0]),
features=features, target=target,
data_type=DataTypesEnum.table,
task=Task(TaskTypesEnum.classification))
input_data = DataPreprocessor().obligatory_prepare_for_fit(input_data)
return input_data
def test_safety_label_correct():
"""
Check if cutting and label encoding is used for pseudo large data with categorical features with high cardinality
"""
api_safety, api_preprocessor = get_data_analyser_with_specific_params()
data = get_small_cat_data()
recs = api_safety.give_recommendation(data)
api_preprocessor.accept_and_apply_recommendations(data, recs)
assert data.features.shape[0] * data.features.shape[1] <= api_safety.max_size
assert data.features.shape[1] == 3
assert data.features[0, 0] != 'a'
def test_no_safety_needed_correct():
"""
Check if oneHot encoding is used for small data with small cardinality of categorical features
"""
api_safety, api_preprocessor = get_data_analyser_with_specific_params(max_size=100, max_cat_cardinality=100)
data = get_small_cat_data()
recs = api_safety.give_recommendation(data)
api_preprocessor.accept_and_apply_recommendations(data, recs)
assert data.features.shape[0] * data.features.shape[1] == 24
assert data.features.shape[1] == 3
assert data.features[0, 0] == 'a'
def test_api_fit_predict_with_pseudo_large_dataset_with_label_correct():
"""
Test if safe mode in API cut large data and use LabelEncoder for features with high cardinality
"""
model = Fedot(problem="classification",
composer_params=composer_params)
model.data_analyser.max_cat_cardinality = 5
model.data_analyser.max_size = 18
data = get_small_cat_data()
pipeline = model.fit(features=data, predefined_model='auto')
pipeline.predict(data)
model.predict(features=data)
# the should be only tree like models + data operations
assert len(model.params.api_params['available_operations']) == 6
assert 'logit' not in model.params.api_params['available_operations']
def test_api_fit_predict_with_pseudo_large_dataset_with_onehot_correct():
"""
Test if safe mode in API use OneHotEncoder with small data with small cardinality
"""
model = Fedot(problem="classification",
composer_params=composer_params)
model.data_analyser.max_size = 1000
data = get_small_cat_data()
model.fit(features=data, predefined_model='auto')
model.predict(features=data)
# there should be all light models + data operations
assert 'logit' in model.params.api_params['available_operations']
| 42.71134
| 118
| 0.69901
| 540
| 4,143
| 5.125926
| 0.240741
| 0.034682
| 0.039017
| 0.027095
| 0.457731
| 0.416908
| 0.402818
| 0.30672
| 0.28974
| 0.28974
| 0
| 0.014916
| 0.207096
| 4,143
| 96
| 119
| 43.15625
| 0.827702
| 0.156891
| 0
| 0.30303
| 0
| 0
| 0.038023
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.257576
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e31bbe934af2c97028c0e66dc59a02ae268f0c31
| 7,765
|
py
|
Python
|
parallelpy/parallelpy.py
|
krober/parallelpy
|
356fa0b75d3de2fa695b2fd64f0a53555f6bf55f
|
[
"MIT"
] | null | null | null |
parallelpy/parallelpy.py
|
krober/parallelpy
|
356fa0b75d3de2fa695b2fd64f0a53555f6bf55f
|
[
"MIT"
] | 1
|
2018-08-26T03:01:18.000Z
|
2018-08-26T03:01:18.000Z
|
parallelpy/parallelpy.py
|
krober/parallelpy
|
356fa0b75d3de2fa695b2fd64f0a53555f6bf55f
|
[
"MIT"
] | null | null | null |
from multiprocessing import cpu_count, Manager, Process
from time import sleep
class Parallelizer:
def __init__(
self,
*,
target: 'function',
args: list,
enable_results: bool,
auto_proc_count: bool,
max_proc_count: int):
"""
Constructor, no positional args needed, all named args required
:param target: function: target for multiple processes
:param args: list: args to be passed to each instance of target.
Target function must accept an individual arg as its first param
(though can be tuple/dict/any encapsulating data structure).
:param enable_results: bool: enables/disables a managed proxylist
to hold data from the target function. Disable if you only need
the target func to run and do not need it to modify/persist data.
If enabled, passes managed proxylist to target func, therefore
target func must accept the list as its second param.
:param auto_proc_count: bool: True=let class determine number of
processes to use - calculates based on number of cores installed
and number of operations to be performed.
False=use max_proc_count number of processes.
:param max_proc_count: int: max number of processes to be spawned
simultaneously
"""
self.target = target
self.args = args
self.enable_results = enable_results
self.__proc_count = 0
self.__cpu_count = cpu_count()
self.__iterations = len(args)
self.__processes = []
self.__incoming = 0
self.__running = 0
self.__finished = [False for _ in range(len(args))]
self.__set_proc_count(auto_proc_count, max_proc_count)
def run(self):
"""
Runs the target function, manages core/process count/activity
:return: list: results, unpackaged from manager.list proxy.
Recommended to enclose results in target function in tuples
or other data structures before appending to the proxy list
to avoid race conditions.
"""
if self.enable_results:
return self.__run_managed()
else:
self.__run_unmanaged()
def __run_managed(self):
"""
Configures process manager and runs procs
:return: List: converted from ProxyList
"""
with Manager() as manager:
results = manager.list()
self.__generate_procs(results)
self.__run_procs()
self.__finalize_procs()
results = list(results)
return results
def __run_unmanaged(self):
"""
Runs data-unmanaged procs - for when you just want to run in
parallel and don't need 'return' data
:return: nothing
"""
self.__generate_procs()
self.__run_procs()
self.__finalize_procs()
def __run_procs(self):
"""
Runs processes, prints self on exception and re-raises exception
:return: nothing
"""
try:
while self.__incoming < self.__iterations:
# sleep reduces the CPU impact of this 'manager loop'
sleep(1 / 100)
self.__mark_finished_procs()
self.__spawn_available_procs()
except Exception as e:
print(self)
raise e
def __set_proc_count_auto(self, max_procs: int):
"""
Calculates optimal proc_count to reduce ram usage, but also
reduce wait time when only a single thread may be running
:param max_procs: int: max procs to allow simultaneously
:return: None
"""
if (self.__iterations <= self.__cpu_count
and self.__iterations <= max_procs):
self.__proc_count = self.__iterations
elif max_procs <= self.__cpu_count:
self.__proc_count = max_procs
else:
self.__proc_count = self.__cpu_count
for i in range(self.__cpu_count, max_procs + 1):
if self.__iterations % i == 0:
self.__proc_count = i
break
print(f'Using {self.__proc_count} processes')
def __set_proc_count_manual(self, count: int):
"""
Manually set the proc count. Use with care when using
very large counts. Higher count = higher ram usage.
:param count: int: number of procs to run simultaneously
:return: None
"""
self.__proc_count = count
def __validate_proc_count(self, count: int):
"""
Throws ValueError if count < 1
:return: None
"""
if count < 1:
raise ValueError('Number of processes must be > 0')
elif isinstance(count, bool) or not isinstance(count, int):
raise ValueError('Number of processes must be an integer')
def __set_proc_count(self, auto_proc_count: bool, max_proc_count: int):
"""
Sets proc count based on auto_procs true/false
:param auto_proc_count: bool: use auto proc count?
:param max_proc_count: int: max num of procs to run simultaneously
:return: None
"""
self.__validate_proc_count(max_proc_count)
if auto_proc_count:
self.__set_proc_count_auto(max_proc_count)
else:
self.__set_proc_count_manual(max_proc_count)
def __generate_procs(self, managed_results=None):
"""
Generates a list of procs ready for starting
:param managed_results: proxy manager.list: to store
data from target func
:return: None
"""
if managed_results is not None:
for arg in self.args:
self.__processes.append(Process(
target=self.target,
args=(arg, managed_results)
))
else:
for arg in self.args:
self.__processes.append(Process(
target=self.target,
args=(arg,)
))
def __spawn_available_procs(self):
"""
Spawns procs if the number of currently running procs is
less than the number of max_procs defined
:return: None
"""
if self.__running < self.__proc_count:
self.__processes[self.__incoming].start()
self.__incoming += 1
self.__running += 1
def __mark_finished_procs(self):
"""
Checks currently running procs for status, marks finished
:return: None
"""
for i in range(self.__incoming):
if not self.__processes[i].is_alive():
if not self.__finished[i]:
self.__running -= 1
self.__finished[i] = True
def __finalize_procs(self):
"""
Finalizes procs/waits on remaining running procs
:return: None
"""
[process.join() for process in self.__processes]
self.__mark_finished_procs()
def __str__(self):
stats = f'\n' \
f'Target function: {self.target.__name__}\n' \
f'Number of iters: {self.__iterations}\n' \
f'Number of threads: {self.__proc_count}\n' \
f'Number of procs: {len(self.__processes)}\n' \
f'Current incoming: {self.__incoming}\n' \
f'Current running: {self.__running}\n' \
f'Current finished: {sum(self.__finished)}' \
f'\n'
return stats
def __repr__(self):
return self.__str__()
| 34.665179
| 79
| 0.582228
| 903
| 7,765
| 4.710963
| 0.241418
| 0.076164
| 0.025388
| 0.015985
| 0.143394
| 0.106723
| 0.082276
| 0.06441
| 0.030559
| 0.030559
| 0
| 0.002946
| 0.344237
| 7,765
| 223
| 80
| 34.820628
| 0.832482
| 0.330071
| 0
| 0.157895
| 0
| 0
| 0.08708
| 0.024662
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.017544
| 0.008772
| 0.192982
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e31d9fd874884c64a5cfd7e556213a44724536fb
| 9,507
|
py
|
Python
|
deanslist/deanslist.py
|
upeducationnetwork/deanslist-python
|
226eda2580055427119397bc28e7976f019d7301
|
[
"MIT"
] | null | null | null |
deanslist/deanslist.py
|
upeducationnetwork/deanslist-python
|
226eda2580055427119397bc28e7976f019d7301
|
[
"MIT"
] | 2
|
2016-05-16T19:54:26.000Z
|
2016-05-20T12:02:20.000Z
|
deanslist/deanslist.py
|
upeducationnetwork/deanslist-python
|
226eda2580055427119397bc28e7976f019d7301
|
[
"MIT"
] | null | null | null |
__author__ = 'rknight'
import os
import csv
import logging
import datetime
from requests_futures.sessions import FuturesSession
def dl(reports, dlkeys):
# Primary call
# Send requests
allreports = dlrequest(reports=reports, dlkeys=dlkeys)
# Write results
for outreport in allreports.keys():
# Points and incidents require unique parsing
if outreport == 'points':
writepoints('points.csv', report=allreports[outreport])
elif outreport == 'coaching':
writecoaching('coaching.csv', report=allreports[outreport])
elif outreport == 'coaching_evidence':
writeevidence('coaching_evidence.csv', report=allreports[outreport])
elif outreport == 'incidents':
writeincidents(report=allreports[outreport])
else:
# Merge the schools into a single list
dat = []
for school in allreports[outreport]['data']:
dat.extend(school['data'])
writefile('{0}.csv'.format(outreport), dataset=dat, rewrite=allreports[outreport]['write'])
def dlrequest(reports, dlkeys):
'''
Primary function to get data for a range of dates
Returns a dict. Structure should be:
{'outname': {'data': [all the data for this report with one list item per school],
'write': whether to write or append},
'second outname': {'data': [all the data for this report with one list item per key],
'write': whether to write or append},
etc
}
'''
session = FuturesSession(max_workers=10)
allreports = {}
futures = []
# This is run in background once the download is completed
def bg_call(sess, resp, outname):
if resp.status_code == 200:
dat = resp.json()
allreports[outname]['data'].append(dat)
else:
logging.warning('Response code {0} for {1}'.format(resp.status_code, resp.url))
# Throw the requests at Deanslist
for ireport in reports:
outname = ireport['outname']
url = ireport['reporturl']
allreports[outname] = {'data': [], 'write': ireport.get('rewrite', 'w')}
for dlkey in dlkeys:
futures.append(session.get(url,
params={'sdt': ireport.get('pulldate', ''),
'edt': ireport.get('enddate', ''),
'apikey': dlkey},
background_callback=lambda sess, resp, outname=outname: bg_call(sess, resp, outname)))
# Parse errors in the results
for f in futures:
try:
f.result()
except:
logging.warning('{0}'.format(f.exception))
continue
return allreports
def dlall(outname, reporturl, startat, dlkeys, endat='', max_workers=5):
# Get all data for large datasets by sending a separate request for each week of data
one_week = datetime.timedelta(days=7)
one_day = datetime.timedelta(days=1)
try:
sdt = datetime.datetime.strptime(startat, '%Y-%m-%d').date()
except ValueError:
raise ValueError("Incorrect data format for startat, should be YYYY-MM-DD")
if endat != '':
try:
endat = datetime.datetime.strptime(endat, '%Y-%m-%d').date()
except ValueError:
raise ValueError("Incorrect data format for endat, should be YYYY-MM-DD")
else:
endat = datetime.date.today()
edt = sdt + one_week
alldat = []
session = FuturesSession(max_workers=max_workers)
while edt < endat + one_week:
# outname_date = outname + "/" + outname + "_Week_" + edt.strftime("%Y-%m-%d")
dat = dlrequest_single(reporturl=reporturl, sdt=sdt, edt=edt, dlkeys=dlkeys, session=session)
alldat.extend(dat)
sdt = edt + one_day
edt = edt + one_week
# Write to hard drive
if len(alldat) > 0:
writefile('{0}.csv'.format(outname), dataset=alldat, rewrite='w')
def dlrequest_single(reporturl, sdt, edt, dlkeys, session = FuturesSession(max_workers=5)):
"""
Request and write a single report for all schools for a date range
"""
alldat = []
futures = []
url = reporturl
# Throw the requests at Deanslist
for dlkey in dlkeys:
futures.append(session.get(url,
params={'sdt': sdt,
'edt': edt,
'apikey': dlkey}))
# Parse errors in the results
for f in futures:
try:
response = f.result()
except MemoryError:
logging.warning('Memory Error.')
if response.status_code != 200:
logging.warning('Response code {0} for {1}'.format(response.status_code, response.url))
continue
# Append results
dat = response.json()
alldat.extend(dat['data'])
return alldat
def writefile(outname, dataset, headers=None, rewrite='a'):
"""
Utility to write results to file
"""
if len(dataset) == 0:
logging.warning('No data for {0}'.format(outname))
return
# Make default headers
if not headers:
headers = sorted(list(dataset[0].keys()))
# Flag to write headers if its the first time
exists = os.path.isfile(outname)
# Write output
with open(outname, rewrite, encoding='utf-8') as file:
outfile = csv.DictWriter(file, headers, lineterminator='\n')
if not exists or rewrite == 'w':
outfile.writeheader()
for row in dataset:
outfile.writerow(row)
def writepoints(outname, report):
# Parse and write points
if 'data' not in report['data']:
logging.warning('No points data')
return
points = []
# Flatten
for dat in report['data']:
for row in dat['Students']:
for item in row['Terms']:
item['StudentID'] = row['StudentID']
item['StudentSchoolID'] = row['StudentSchoolID']
item['SchoolID'] = dat['SchoolID']
try:
item['StartDate'] = item['StartDate']['date']
item['EndDate'] = item['EndDate']['date']
except:
pass
points.append(item)
# Write
writefile(outname, dataset=points, rewrite=report['write'])
# Parse & write the incidents module, which has a unique json structure
def writeincidents(report):
incidents = []
penalties = []
actions = []
custfields = []
# All possible ids
inc_id_list = ['IncidentID', 'SchoolID', 'StudentID', 'StudentFirst', 'StudentLast',
'StudentSchoolID', 'GradeLevelShort', 'HomeroomName', 'Infraction', 'Location', 'ReportedDetails']
for school in report['data']:
for idat in school['data']:
# grab ids in this report
inc_id = {this_id: idat[this_id] for this_id in inc_id_list}
# Flatten
for timefield in ['CreateTS', 'UpdateTS', 'IssueTS', 'ReviewTS', 'CloseTS', 'ReturnDate']:
try:
idat[timefield] = idat.pop(timefield)['date']
except:
idat[timefield] = ''
# Actions
act_list = idat.pop('Actions')
idat['NumActions'] = len(actions)
for iact in act_list:
iact.update(inc_id)
actions.append(iact)
# Penalties
pen_list = idat.pop('Penalties')
idat['NumPenalties'] = len(penalties)
for ipen in pen_list:
ipen.update(inc_id)
penalties.append(ipen)
# Custom fields (not currently used)
if 'Custom_Fields' in idat:
cust_list = idat.pop('Custom_Fields')
for field in cust_list:
if field['StringValue'] == 'Y':
custfields.append({'IncidentID': inc_id['IncidentID'], 'SpecialCase': field['FieldName']})
# Incidents
incidents.append(idat)
# Export
exportdict = {'incidents': incidents, 'incidents-penalties': penalties, 'incidents-actions': actions, 'incidents-custfields': custfields}
for key in exportdict:
writefile('{0}.csv'.format(key), dataset=exportdict[key], rewrite='w')
def writecoaching(outname, report):
# Flatten
coaching = []
for school in report['data']:
for observation in school['data']:
for timefield in ['DebriefDate', 'ReviewDate', 'LessonDate']:
try:
observation[timefield] = observation.pop(timefield)['date']
except:
observation[timefield] = ''
feedbackitems = observation.pop('FeedbackItems')
for feedbackitem in feedbackitems:
feedbackitem.update(observation)
coaching.append(feedbackitem)
writefile(outname, dataset=coaching, rewrite=report['write'])
return coaching
def writeevidence(outname, report):
# Flatten
coaching = []
for school in report['data']:
for observation in school['data']:
for timefield in ['EvidenceDate']:
try:
observation[timefield] = observation.pop(timefield)['date']
except:
observation[timefield] = ''
coaching.append(observation)
writefile(outname, dataset=coaching, rewrite=report['write'])
| 31.376238
| 141
| 0.577154
| 1,004
| 9,507
| 5.416335
| 0.253984
| 0.01416
| 0.011033
| 0.011033
| 0.230599
| 0.216992
| 0.184627
| 0.148584
| 0.134976
| 0.134976
| 0
| 0.003795
| 0.307037
| 9,507
| 302
| 142
| 31.480132
| 0.821645
| 0.137372
| 0
| 0.244565
| 0
| 0
| 0.127094
| 0.002606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054348
| false
| 0.005435
| 0.027174
| 0
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e31da554e9612910aa7b87468de6e4101ac08273
| 7,210
|
py
|
Python
|
anchore_engine/services/policy_engine/api/models/image.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/api/models/image.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/api/models/image.py
|
roachmd/anchore-engine
|
521d6796778139a95f51542670714205c2735a81
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api import util
class Image(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, digest=None, user_id=None, state=None, distro_namespace=None, created_at=None, last_modified=None, tags=None): # noqa: E501
"""Image - a model defined in Swagger
:param id: The id of this Image. # noqa: E501
:type id: str
:param digest: The digest of this Image. # noqa: E501
:type digest: str
:param user_id: The user_id of this Image. # noqa: E501
:type user_id: str
:param state: The state of this Image. # noqa: E501
:type state: str
:param distro_namespace: The distro_namespace of this Image. # noqa: E501
:type distro_namespace: str
:param created_at: The created_at of this Image. # noqa: E501
:type created_at: datetime
:param last_modified: The last_modified of this Image. # noqa: E501
:type last_modified: datetime
:param tags: The tags of this Image. # noqa: E501
:type tags: List[str]
"""
self.swagger_types = {
'id': str,
'digest': str,
'user_id': str,
'state': str,
'distro_namespace': str,
'created_at': datetime,
'last_modified': datetime,
'tags': List[str]
}
self.attribute_map = {
'id': 'id',
'digest': 'digest',
'user_id': 'user_id',
'state': 'state',
'distro_namespace': 'distro_namespace',
'created_at': 'created_at',
'last_modified': 'last_modified',
'tags': 'tags'
}
self._id = id
self._digest = digest
self._user_id = user_id
self._state = state
self._distro_namespace = distro_namespace
self._created_at = created_at
self._last_modified = last_modified
self._tags = tags
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Image of this Image. # noqa: E501
:rtype: Image
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this Image.
:return: The id of this Image.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Image.
:param id: The id of this Image.
:type id: str
"""
self._id = id
@property
def digest(self):
"""Gets the digest of this Image.
:return: The digest of this Image.
:rtype: str
"""
return self._digest
@digest.setter
def digest(self, digest):
"""Sets the digest of this Image.
:param digest: The digest of this Image.
:type digest: str
"""
self._digest = digest
@property
def user_id(self):
"""Gets the user_id of this Image.
:return: The user_id of this Image.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Image.
:param user_id: The user_id of this Image.
:type user_id: str
"""
self._user_id = user_id
@property
def state(self):
"""Gets the state of this Image.
State of the image in the policy evaluation system # noqa: E501
:return: The state of this Image.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this Image.
State of the image in the policy evaluation system # noqa: E501
:param state: The state of this Image.
:type state: str
"""
allowed_values = ["failed", "initializing", "analyzing", "analyzed"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def distro_namespace(self):
"""Gets the distro_namespace of this Image.
The namespace identifier for this image for purposes of CVE matches, etc # noqa: E501
:return: The distro_namespace of this Image.
:rtype: str
"""
return self._distro_namespace
@distro_namespace.setter
def distro_namespace(self, distro_namespace):
"""Sets the distro_namespace of this Image.
The namespace identifier for this image for purposes of CVE matches, etc # noqa: E501
:param distro_namespace: The distro_namespace of this Image.
:type distro_namespace: str
"""
self._distro_namespace = distro_namespace
@property
def created_at(self):
"""Gets the created_at of this Image.
The timestamp on when this image record was created, not the image itself # noqa: E501
:return: The created_at of this Image.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Image.
The timestamp on when this image record was created, not the image itself # noqa: E501
:param created_at: The created_at of this Image.
:type created_at: datetime
"""
self._created_at = created_at
@property
def last_modified(self):
"""Gets the last_modified of this Image.
Time the image record in this service was last updated # noqa: E501
:return: The last_modified of this Image.
:rtype: datetime
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""Sets the last_modified of this Image.
Time the image record in this service was last updated # noqa: E501
:param last_modified: The last_modified of this Image.
:type last_modified: datetime
"""
self._last_modified = last_modified
@property
def tags(self):
"""Gets the tags of this Image.
List of tags currently applied to the image. Updated by new tag events. Similarly scoped by the user_id # noqa: E501
:return: The tags of this Image.
:rtype: List[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this Image.
List of tags currently applied to the image. Updated by new tag events. Similarly scoped by the user_id # noqa: E501
:param tags: The tags of this Image.
:type tags: List[str]
"""
self._tags = tags
| 27.414449
| 156
| 0.59251
| 919
| 7,210
| 4.500544
| 0.130577
| 0.097921
| 0.109043
| 0.031431
| 0.577369
| 0.469778
| 0.423356
| 0.307544
| 0.292553
| 0.215184
| 0
| 0.014754
| 0.323162
| 7,210
| 262
| 157
| 27.519084
| 0.832787
| 0.460749
| 0
| 0.26087
| 0
| 0
| 0.088113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195652
| false
| 0
| 0.054348
| 0
| 0.358696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e32283e627f56eef0ab47dab2fb3694cb482ef8d
| 231
|
py
|
Python
|
hdc-utility/model/Formation.py
|
YSRKEN/HDC_React2
|
cba48a0563caef629169644254742f688a0e1ec7
|
[
"MIT"
] | null | null | null |
hdc-utility/model/Formation.py
|
YSRKEN/HDC_React2
|
cba48a0563caef629169644254742f688a0e1ec7
|
[
"MIT"
] | 13
|
2020-09-04T23:25:20.000Z
|
2022-02-18T01:52:33.000Z
|
hdc-utility/model/Formation.py
|
YSRKEN/HDC_React2
|
cba48a0563caef629169644254742f688a0e1ec7
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Formation(Enum):
LINE_AHEAD = 0 # 単縦陣
DOUBLE_LINE = 1 # 複縦陣
DIAMOND = 2 # 輪形陣
ECHELON = 3 # 梯形陣
LINE_ABREAST = 4 # 単横陣
FORMATION_3 = 5 # 第3陣形(第三警戒航行序列(輪形陣))
| 21
| 43
| 0.562771
| 31
| 231
| 4.064516
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053691
| 0.354978
| 231
| 10
| 44
| 23.1
| 0.791946
| 0.168831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e323be496777a0e952195a0a60b4f2ae474d9dd5
| 849
|
py
|
Python
|
bisection.py
|
Raijeku/Optimizacion
|
b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b
|
[
"Apache-2.0"
] | null | null | null |
bisection.py
|
Raijeku/Optimizacion
|
b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b
|
[
"Apache-2.0"
] | null | null | null |
bisection.py
|
Raijeku/Optimizacion
|
b06c302c3edbdb3a2a2b378b0c53baaf9fe69c2b
|
[
"Apache-2.0"
] | null | null | null |
from sympy import *
import pandas as pd
def bisection(xl, xu, tolerance, function):
x = Symbol('x')
f = parse_expr(function)
iteration = 0
data = pd.DataFrame(columns=['iteration','xl','xu','xr','f(xl)','f(xu)','f(xr)','f(xl)f(xr)','error'])
while abs(xu-xl)>=tolerance:
xr = (xl + xu)/2
fxl = f.subs(x, xl)
fxu = f.subs(x, xu)
fxr = f.subs(x, xr)
data = data.append(pd.DataFrame({'iteration':[iteration], 'xl':[xl], 'xu':[xu], 'xr':[xr], 'f(xl)':[fxl], 'f(xu)':[fxu], 'f(xr)':[fxr], 'f(xl)f(xr)':[fxl*fxr], 'error':[abs(xu-xl)]}), ignore_index = True)
if fxl*fxr<0:
xu = xr
elif fxl*fxr>0:
xl = xr
iteration += 1
data.set_index('iteration', inplace=True)
return data
print(bisection(10, 50, 0.01, '3*x**2 - 120*x + 100'))
| 30.321429
| 212
| 0.522968
| 134
| 849
| 3.291045
| 0.358209
| 0.036281
| 0.034014
| 0.027211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.246172
| 849
| 28
| 213
| 30.321429
| 0.657813
| 0
| 0
| 0
| 0
| 0
| 0.141176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e324c2b47225b873ec4b37a7708b700104f77b26
| 3,684
|
py
|
Python
|
subt/ros/base/src/motor_controller.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 12
|
2017-02-16T10:22:59.000Z
|
2022-03-20T05:48:06.000Z
|
subt/ros/base/src/motor_controller.py
|
m3d/osgar_archive_2020
|
556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e
|
[
"MIT"
] | 618
|
2016-08-30T04:46:12.000Z
|
2022-03-25T16:03:10.000Z
|
subt/ros/base/src/motor_controller.py
|
robotika/osgar
|
6f4f584d5553ab62c08a1c7bb493fefdc9033173
|
[
"MIT"
] | 11
|
2016-08-27T20:02:55.000Z
|
2022-03-07T08:53:53.000Z
|
from pid import PID
import pdb
#for anonymous objects
Object = lambda **kwargs: type("Object", (), kwargs)
class MotorController:
def __init__(self, wheelBase,numberOfMotors):
self.pidControllerFrontLeft = PID()
self.pidControllerFrontRight = PID()
self.pidControllerRearLeft = PID()
self.pidControllerRearRight = PID()
self.wheelBase = wheelBase
self.numberOfMotors = numberOfMotors
self.lastForwardSpeed = 0
def update(self,cmd_vel,actualWheelSpeed):
#desiredSpeedFrontLeft = 0
#desiredSpeedFrontRight = 0
#desiredSpeedRearLeft = 0
#desiredSpeedRearRight = 0
desiredSpeed = Object
if self.numberOfMotors == 1:
desiredSpeed.frontLeft = cmd_vel.linear.x
desiredSpeed.frontRight = 0
desiredSpeed.rearLeft = 0
desiredSpeed.rearRight =0
else:
desiredSpeed.frontLeft = cmd_vel.linear.x - cmd_vel.angular.z * self.wheelBase / 2
desiredSpeed.frontRight = cmd_vel.linear.x + cmd_vel.angular.z * self.wheelBase / 2
desiredSpeed.rearLeft = desiredSpeed.frontLeft
desiredSpeed.rearRight = desiredSpeed.frontRight
newWheelSpeed = Object
if desiredSpeed.frontLeft == 0 and\
desiredSpeed.frontRight == 0 and\
desiredSpeed.rearLeft == 0 and\
desiredSpeed.rearRight == 0:
#robot wants to stop now
newWheelSpeed.frontLeft = self.pidControllerFrontLeft.stop()
newWheelSpeed.frontRight = self.pidControllerFrontRight.stop()
newWheelSpeed.rearLeft = self.pidControllerRearLeft.stop()
newWheelSpeed.rearRight = self.pidControllerRearRight.stop()
elif (cmd_vel.linear.x > 0 and self.lastForwardSpeed < 0) or \
(cmd_vel.linear.x < 0 and self.lastForwardSpeed > 0):
#robot wants to change direction -> stop first.
newWheelSpeed.frontLeft = self.pidControllerFrontLeft.stop()
newWheelSpeed.frontRight = self.pidControllerFrontRight.stop()
newWheelSpeed.rearLeft = self.pidControllerRearLeft.stop()
newWheelSpeed.rearRight = self.pidControllerRearRight.stop()
else:
newWheelSpeed.frontLeft = self.pidControllerFrontLeft.update(desiredSpeed.frontLeft,actualWheelSpeed.frontLeft)
newWheelSpeed.frontRight = self.pidControllerFrontRight.update(desiredSpeed.frontRight,actualWheelSpeed.frontRight)
newWheelSpeed.rearLeft = self.pidControllerRearLeft.update(desiredSpeed.rearLeft,actualWheelSpeed.rearLeft)
newWheelSpeed.rearRight = self.pidControllerRearRight.update(desiredSpeed.rearRight,actualWheelSpeed.rearRight)
"""
print "FL:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.frontLeft,actualWheelSpeed.frontLeft,newWheelSpeed.frontLeft)
print "FR:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.frontRight,actualWheelSpeed.frontRight,newWheelSpeed.frontRight)
print "RL:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.rearLeft,actualWheelSpeed.rearLeft,newWheelSpeed.rearLeft)
print "RR:\tdesired=%lf;\tactual=%lf;\tnew=%lf" % (desiredSpeed.rearRight,actualWheelSpeed.rearRight,newWheelSpeed.rearRight)
"""
self.lastForwardSpeed = cmd_vel.linear.x
"""
newWheelSpeed.frontLeft = 0
newWheelSpeed.frontRight = 0
newWheelSpeed.rearLeft = 0
newWheelSpeed.rearRight = 0
"""
return newWheelSpeed
| 47.844156
| 140
| 0.659609
| 316
| 3,684
| 7.648734
| 0.212025
| 0.022342
| 0.029789
| 0.032271
| 0.462971
| 0.331403
| 0.308647
| 0.247414
| 0.247414
| 0.21597
| 0
| 0.00833
| 0.250543
| 3,684
| 76
| 141
| 48.473684
| 0.867077
| 0.051574
| 0
| 0.217391
| 0
| 0
| 0.002177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e325abcd58eea788430716963a4dc7047047719c
| 4,931
|
py
|
Python
|
shiftscheduler/gui/barebone.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | 2
|
2020-04-16T17:03:56.000Z
|
2021-04-08T17:23:21.000Z
|
shiftscheduler/gui/barebone.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | null | null | null |
shiftscheduler/gui/barebone.py
|
c-rainbow/nurse-scheduling
|
8537c875e46772700499a89dec3a30a796434fe0
|
[
"MIT"
] | 1
|
2020-05-04T18:03:59.000Z
|
2020-05-04T18:03:59.000Z
|
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import scrolledtext
from tkinter import ttk
import tkcalendar as tkc
from shiftscheduler.data_types import data_types
from shiftscheduler.excel import output as excel_output
from shiftscheduler.gui import constants
from shiftscheduler.gui import util
from shiftscheduler.i18n import gettext
_ = gettext.GetTextFn('gui/barebone')
LOCALE_CODE = gettext.GetLanguageCode()
DATE_PATTERN = _('y/m/d')
# TkInter frame for getting barebone Excel file
class BareboneExcelFrame(ttk.Frame):
def __init__(self, master, *args, **kwargs):
super().__init__(master, *args, **kwargs)
util.SetGridWeights(self, column_weights=(1, 2))
self.createLeftFrame()
self.createRightFrame()
# Create left side of the frame
def createLeftFrame(self):
left_frame = ttk.Frame(self)
util.SetGrid(left_frame, 0, 0)
util.SetGridWeights(left_frame, row_weights=(1, 9))
label = ttk.Label(left_frame, text=_('Please enter name of workers'))
util.SetGrid(label, 0, 0) #, sticky=ttk.W) # For some reason, ttk.NSEW does not work
#self.names_text_area = ttk.Text(left_frame)
self.names_text_area = scrolledtext.ScrolledText(left_frame)
util.SetGrid(self.names_text_area, 1, 0)
# Create right side of the frame
def createRightFrame(self):
right_frame = ttk.Frame(self)
util.SetGrid(right_frame, 0, 1)
util.SetGridWeights(right_frame, row_weights=(1, 1, 1, 1, 1, 5, 1))
# Start date widgets
start_date_label = ttk.Label(right_frame, text=_('Start Date'))
util.SetGrid(start_date_label, 0, 0)
self.start_cal = tkc.DateEntry(
right_frame, year=2020, month=5, day=1, date_pattern=DATE_PATTERN, locale=LOCALE_CODE)
util.SetGrid(self.start_cal, 1, 0)
# End date widgets
end_date_label = ttk.Label(right_frame, text=_('End Date'))
util.SetGrid(end_date_label, 2, 0)
self.end_cal = tkc.DateEntry(
right_frame, year=2020, month=5, day=31, date_pattern=DATE_PATTERN, locale=LOCALE_CODE)
util.SetGrid(self.end_cal, 3, 0)
# Instruction label
instruction = """
사용 방법
1.간호사 이름을 한줄씩 적어주세요
2.일정의 시작-끝 날짜를 지정합니다
3."엑셀 파일 받기"를 눌러 파일을 저장합니다
4."날짜별 설정" 시트에서 필요 인원을 입력합니다
5."간호사별 설정"에서 근무일수를 입력합니다
6."일정표"에서 기존에 정해진 일정을 입력합니다
7."새 일정" 탭에서 다음 단계를 진행해 주세요
"""
instruction_label = ttk.Label(right_frame, text=instruction, justify=tk.LEFT)
util.SetGrid(instruction_label,5, 0)
# Download button
def callback_func():
error = self.validateValues()
if error:
messagebox.showerror(message=error)
return
filepath = filedialog.asksaveasfilename(
title=_('Save the barebone Excel file'), filetypes=constants.EXCEL_FILE_TYPE)
if filepath:
self.CreateExcel(filepath)
download_button = ttk.Button(
right_frame, text=_('Download barebone Excel'), command=callback_func)
util.SetGrid(download_button, 6, 0)
# Get values from GUI
def getValues(self):
text_area_value = self.names_text_area.get('1.0', 'end').strip()
names = text_area_value.split('\n')
# Filter out all empty names
names = [name.strip() for name in names if name and not name.isspace()]
start_date = self.start_cal.get_date()
end_date = self.end_cal.get_date()
return (names, start_date, end_date)
def validateValues(self):
names, start_date, end_date = self.getValues()
# No name input
if not names:
return _('Please enter names')
if start_date > end_date:
return _('The start date is after the end date')
# Check for duplicate names
nameset = set()
duplicates = set()
for name in names:
if name not in nameset:
nameset.add(name)
else:
duplicates.add(name)
if duplicates:
return _('Duplicate names: {names}').format(','.join(sorted(duplicates)))
return '' # No error
def CreateExcel(self, filepath):
names, start_date, end_date = self.getValues()
sw_config = data_types.SoftwareConfig(start_date=start_date, end_date=end_date, num_person=len(names))
person_configs = [
data_types.PersonConfig(name, None, None, None, None) for name in names]
barebone_schedule = data_types.TotalSchedule(
software_config=sw_config, person_configs=person_configs, date_configs=[],
assignment_dict=dict())
excel_output.FromTotalSchedule(barebone_schedule, filepath)
| 35.47482
| 110
| 0.636585
| 628
| 4,931
| 4.820064
| 0.307325
| 0.035679
| 0.025438
| 0.026429
| 0.164519
| 0.14635
| 0.105715
| 0.062768
| 0.062768
| 0.062768
| 0
| 0.015257
| 0.268911
| 4,931
| 138
| 111
| 35.731884
| 0.824411
| 0.075441
| 0
| 0.020408
| 0
| 0
| 0.101057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.112245
| 0
| 0.255102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e328edcf699e6d13889b75058d9c53daede11262
| 428
|
py
|
Python
|
play.py
|
Samitha156/100-days-of-coding
|
b47aff0f6d432945a20a5f95e2252cddb6cc5522
|
[
"MIT"
] | null | null | null |
play.py
|
Samitha156/100-days-of-coding
|
b47aff0f6d432945a20a5f95e2252cddb6cc5522
|
[
"MIT"
] | null | null | null |
play.py
|
Samitha156/100-days-of-coding
|
b47aff0f6d432945a20a5f95e2252cddb6cc5522
|
[
"MIT"
] | null | null | null |
def add(*args):
c = 0
for n in args:
c += n
return c
sum = add(2,5,6,5)
print(sum)
def calculate(**kwargs):
print(kwargs)
calculate(add=3, mul=5)
class Car:
def __init__(self, **kw):
# self.make = kw["make"]
# self.model = kw["model"]
self.make = kw.get("make")
self.make = kw.get("model")
my_car = Car(make="Nissan")
print(my_car.model)
| 17.12
| 36
| 0.514019
| 64
| 428
| 3.34375
| 0.4375
| 0.11215
| 0.140187
| 0.121495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023973
| 0.317757
| 428
| 25
| 37
| 17.12
| 0.708904
| 0.109813
| 0
| 0
| 0
| 0
| 0.042254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0
| 0
| 0.3125
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e3294c6b906349f5541063a2b6f7ca5cb0e7e90b
| 21,406
|
py
|
Python
|
lib/simpleauth/handler.py
|
Bekt/tweetement
|
5cdb2e7db30a1600fbf522754c4917f8c9e377a6
|
[
"MIT"
] | 2
|
2015-02-18T17:31:58.000Z
|
2019-04-01T13:44:45.000Z
|
lib/simpleauth/handler.py
|
Bekt/tweetement
|
5cdb2e7db30a1600fbf522754c4917f8c9e377a6
|
[
"MIT"
] | 1
|
2015-01-26T03:58:19.000Z
|
2015-01-26T03:58:19.000Z
|
lib/simpleauth/handler.py
|
Bekt/tweetement
|
5cdb2e7db30a1600fbf522754c4917f8c9e377a6
|
[
"MIT"
] | 1
|
2021-05-04T21:15:53.000Z
|
2021-05-04T21:15:53.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
import logging
import json
from urllib import urlencode
import urlparse
# for CSRF state tokens
import time
import base64
# Get available json parser
try:
# should be the fastest on App Engine py27.
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
# at this point ImportError will be raised
# if none of the above could be imported
# it's a OAuth 1.0 spec even though the lib is called oauth2
import oauth2 as oauth1
# users module is needed for OpenID authentication.
from google.appengine.api import urlfetch, users
from webapp2_extras import security
__all__ = ['SimpleAuthHandler',
'Error',
'UnknownAuthMethodError',
'AuthProviderResponseError',
'InvalidCSRFTokenError',
'InvalidOAuthRequestToken',
'InvalidOpenIDUserError']
OAUTH1 = 'oauth1'
OAUTH2 = 'oauth2'
OPENID = 'openid'
class Error(Exception):
"""Base error class for this module"""
pass
class UnknownAuthMethodError(Error):
"""Raised when there's no method to call for a specific auth type"""
pass
class AuthProviderResponseError(Error):
"""Error coming from a provider"""
pass
class InvalidCSRFTokenError(Error):
"""Currently used only in OAuth 2.0 with CSRF protection enabled"""
pass
class InvalidOAuthRequestToken(Error):
"""OAuth1 request token -related error"""
pass
class InvalidOpenIDUserError(Error):
"""Error during OpenID auth callback"""
pass
class SimpleAuthHandler(object):
"""A mixin to be used with a real request handler,
e.g. webapp2.RequestHandler. See README for getting started and
a usage example, or look through the code. It really is simple.
See README for docs on authentication flows.
"""
PROVIDERS = {
# OAuth 2.0 providers
'google': (OAUTH2,
'https://accounts.google.com/o/oauth2/auth?{0}',
'https://accounts.google.com/o/oauth2/token'),
'googleplus': (OAUTH2,
'https://accounts.google.com/o/oauth2/auth?{0}',
'https://accounts.google.com/o/oauth2/token'),
'windows_live': (OAUTH2,
'https://login.live.com/oauth20_authorize.srf?{0}',
'https://login.live.com/oauth20_token.srf'),
'facebook': (OAUTH2,
'https://www.facebook.com/dialog/oauth?{0}',
'https://graph.facebook.com/oauth/access_token'),
'linkedin2': (OAUTH2,
'https://www.linkedin.com/uas/oauth2/authorization?{0}',
'https://www.linkedin.com/uas/oauth2/accessToken'),
'foursquare': (OAUTH2,
'https://foursquare.com/oauth2/authenticate?{0}',
'https://foursquare.com/oauth2/access_token'),
# OAuth 1.0a providers
'linkedin': (OAUTH1, {
'request': 'https://api.linkedin.com/uas/oauth/requestToken',
'auth': 'https://www.linkedin.com/uas/oauth/authenticate?{0}'
}, 'https://api.linkedin.com/uas/oauth/accessToken'),
'twitter': (OAUTH1, {
'request': 'https://api.twitter.com/oauth/request_token',
'auth': 'https://api.twitter.com/oauth/authenticate?{0}'
}, 'https://api.twitter.com/oauth/access_token'),
# OpenID
'openid': ('openid', None)
}
TOKEN_RESPONSE_PARSERS = {
'google': '_json_parser',
'googleplus': '_json_parser',
'windows_live': '_json_parser',
'foursquare': '_json_parser',
'facebook': '_query_string_parser',
'linkedin': '_query_string_parser',
'linkedin2': '_json_parser',
'twitter': '_query_string_parser'
}
# Set this to True in your handler if you want to use
# 'state' param during authorization phase to guard agains
# cross-site-request-forgery
#
# CSRF protection assumes there's self.session method on the handler
# instance. See BaseRequestHandler in example/handlers.py for sample usage.
OAUTH2_CSRF_STATE = False
OAUTH2_CSRF_STATE_PARAM = 'csrf'
OAUTH2_CSRF_SESSION_PARAM = 'oauth2_state'
OAUTH2_CSRF_TOKEN_TIMEOUT = 3600 # 1 hour
# This will form the actual state parameter, e.g. token:timestamp
# You don't normally need to override it.
OAUTH2_CSRF_DELIMITER = ':'
# Extra params passed to OAuth2 init handler are stored in the state
# under this name.
OAUTH2_STATE_EXTRA_PARAM = 'extra'
def _simple_auth(self, provider=None):
"""Dispatcher of auth init requests, e.g.
GET /auth/PROVIDER
Calls _<authtype>_init() method, where <authtype> is
oauth2, oauth1 or openid (defined in PROVIDERS dict).
May raise one of the exceptions defined at the beginning
of the module. See README for details on error handling.
"""
extra = None
if self.request is not None and self.request.params is not None:
extra = self.request.params.items()
cfg = self.PROVIDERS.get(provider, (None,))
meth = self._auth_method(cfg[0], 'init')
# We don't respond directly in here. Specific methods are in charge
# with redirecting user to an auth endpoint
meth(provider, cfg[1], extra)
def _auth_callback(self, provider=None):
"""Dispatcher of callbacks from auth providers, e.g.
/auth/PROVIDER/callback?params=...
Calls _<authtype>_callback() method, where <authtype> is
oauth2, oauth1 or openid (defined in PROVIDERS dict).
May raise one of the exceptions defined at the beginning
of the module. See README for details on error handling.
"""
cfg = self.PROVIDERS.get(provider, (None,))
meth = self._auth_method(cfg[0], 'callback')
# Get user profile data and their access token
result = meth(provider, *cfg[-1:])
user_data, auth_info = result[0], result[1]
extra = None
if len(result) > 2:
extra = result[2]
# The rest should be implemented by the actual app
self._on_signin(user_data, auth_info, provider, extra=extra)
def _auth_method(self, auth_type, step):
"""Constructs proper method name and returns a callable.
Args:
auth_type: string, One of 'oauth2', 'oauth1' or 'openid'
step: string, Phase of the auth flow. Either 'init' or 'callback'
Raises UnknownAuthMethodError if expected method doesn't exist on the
handler instance processing the request.
"""
method = '_%s_%s' % (auth_type, step)
try:
return getattr(self, method)
except AttributeError:
raise UnknownAuthMethodError(method)
def _oauth2_init(self, provider, auth_url, extra=None):
"""Initiates OAuth 2.0 web flow"""
key, secret, scope = self._get_consumer_info_for(provider)
callback_url = self._callback_uri_for(provider)
optional_params = self._get_optional_params_for(provider)
params = {
'response_type': 'code',
'client_id': key,
'redirect_uri': callback_url
}
if isinstance(optional_params, dict):
params.update(optional_params)
if scope:
params.update(scope=scope)
state_params = {}
if self.OAUTH2_CSRF_STATE:
csrf_token = self._generate_csrf_token()
state_params[self.OAUTH2_CSRF_STATE_PARAM] = csrf_token
self.session[self.OAUTH2_CSRF_SESSION_PARAM] = csrf_token
if extra is not None:
state_params[self.OAUTH2_STATE_EXTRA_PARAM] = extra
if len(state_params):
params.update(state=json.dumps(state_params))
target_url = auth_url.format(urlencode(params))
logging.debug('Redirecting user to %s', target_url)
self.redirect(target_url)
def _oauth2_callback(self, provider, access_token_url):
"""Step 2 of OAuth 2.0, whenever the user accepts or denies access."""
error = self.request.get('error')
if error:
raise AuthProviderResponseError(error, provider)
code = self.request.get('code')
callback_url = self._callback_uri_for(provider)
client_id, client_secret, scope = self._get_consumer_info_for(provider)
json_state = self.request.get('state')
logging.debug(json_state)
state = json.loads(json_state)
if self.OAUTH2_CSRF_STATE:
_expected = self.session.pop(self.OAUTH2_CSRF_SESSION_PARAM, '')
_actual = state[self.OAUTH2_CSRF_STATE_PARAM]
# If _expected is '' it won't validate anyway.
if not self._validate_csrf_token(_expected, _actual):
raise InvalidCSRFTokenError(
'[%s] vs [%s]' % (_expected, _actual), provider)
extra = state.get(self.OAUTH2_STATE_EXTRA_PARAM, None)
payload = {
'code': code,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': callback_url,
'grant_type': 'authorization_code'
}
resp = urlfetch.fetch(
url=access_token_url,
payload=urlencode(payload),
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
_parser = getattr(self, self.TOKEN_RESPONSE_PARSERS[provider])
_fetcher = getattr(self, '_get_%s_user_info' % provider)
auth_info = _parser(resp.content)
user_data = _fetcher(auth_info, key=client_id, secret=client_secret)
return user_data, auth_info, extra
def _oauth1_init(self, provider, auth_urls, extra=None):
"""Initiates OAuth 1.0 dance"""
key, secret = self._get_consumer_info_for(provider)
callback_url = self._callback_uri_for(provider)
optional_params = self._get_optional_params_for(provider)
token_request_url = auth_urls.get('request', None)
auth_url = auth_urls.get('auth', None)
_parser = getattr(self, self.TOKEN_RESPONSE_PARSERS[provider], None)
# make a request_token request
client = self._oauth1_client(consumer_key=key, consumer_secret=secret)
resp, content = client.request(auth_urls['request'], "POST",
body=urlencode(
{'oauth_callback': callback_url}))
if resp.status != 200:
raise AuthProviderResponseError(
'%s (status: %d)' % (content, resp.status), provider)
# parse token request response
request_token = _parser(content)
if not request_token.get('oauth_token', None):
raise AuthProviderResponseError(
"Couldn't get a request token from %s" % str(request_token), provider)
params = {
'oauth_token': request_token.get('oauth_token', None),
'oauth_callback': callback_url
}
if isinstance(optional_params, dict):
params.update(optional_params)
target_url = auth_urls['auth'].format(urlencode(params))
logging.debug('Redirecting user to %s', target_url)
# save request token for later, the callback
self.session['req_token'] = request_token
self.redirect(target_url)
def _oauth1_callback(self, provider, access_token_url):
"""Third step of OAuth 1.0 dance."""
request_token = self.session.pop('req_token', None)
if not request_token:
raise InvalidOAuthRequestToken(
"No request token in user session", provider)
verifier = self.request.get('oauth_verifier')
if not verifier:
raise AuthProviderResponseError(
"No OAuth verifier was provided", provider)
consumer_key, consumer_secret = self._get_consumer_info_for(provider)
token = oauth1.Token(request_token['oauth_token'],
request_token['oauth_token_secret'])
token.set_verifier(verifier)
client = self._oauth1_client(token, consumer_key, consumer_secret)
resp, content = client.request(access_token_url, "POST")
_parser = getattr(self, self.TOKEN_RESPONSE_PARSERS[provider])
_fetcher = getattr(self, '_get_%s_user_info' % provider)
auth_info = _parser(content)
user_data = _fetcher(auth_info, key=consumer_key, secret=consumer_secret)
return (user_data, auth_info)
def _openid_init(self, provider='openid', identity=None, extra=None):
"""Initiates OpenID dance using App Engine users module API."""
identity_url = identity or self.request.get('identity_url')
callback_url = self._callback_uri_for(provider)
target_url = users.create_login_url(
dest_url=callback_url, federated_identity=identity_url)
logging.debug('Redirecting user to %s', target_url)
self.redirect(target_url)
def _openid_callback(self, provider='openid', _identity=None):
"""Being called back by an OpenID provider
after the user has been authenticated.
"""
user = users.get_current_user()
if not user or not user.federated_identity():
raise InvalidOpenIDUserError(user, provider)
uinfo = {
'id': user.federated_identity(),
'nickname': user.nickname(),
'email': user.email()
}
return (uinfo, {'provider': user.federated_provider()})
#
# callbacks and consumer key/secrets
#
def _callback_uri_for(self, provider):
"""Returns a callback URL for a 2nd step of the auth process.
Override this with something like:
self.uri_for('auth_callback', provider=provider, _full=True)
"""
return None
def _get_consumer_info_for(self, provider):
"""Returns a (key, secret, desired_scopes) tuple.
Defaults to None. You should redefine this method and return real values.
For OAuth 2.0 it should be a 3 elements tuple:
(client_ID, client_secret, scopes)
OAuth 1.0 doesn't have scope so this should return just a
(consumer_key, consumer_secret) tuple.
OpenID needs neither scope nor key/secret, so this method is never called
for OpenID authentication.
See README for more info on scopes and where to get consumer/client
key/secrets.
"""
return (None, None, None)
def _get_optional_params_for(self, provider):
"""Returns optional parameters to send to provider on init
Defaults to None.
If you want to send optional parameter, redefine this method.
This should return a dictionary of parameter names and
values as defined by the provider.
"""
return None
#
# user profile/info
#
def _get_google_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currenly logging in user.
Google API endpoint:
https://www.googleapis.com/oauth2/v3/userinfo
"""
logging.warn('Google userinfo endpoint is deprecated. '
'Use Google+ API (googleplus provider): '
'https://developers.google.com/+/api/auth-migration#timetable')
resp = self._oauth2_request(
'https://www.googleapis.com/oauth2/v3/userinfo?{0}',
auth_info['access_token'])
data = json.loads(resp)
if 'id' not in data and 'sub' in data:
data['id'] = data['sub']
return data
def _get_googleplus_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currenly logging in user.
Google+ API endpoint:
https://www.googleapis.com/plus/v1/people/me
"""
resp = self._oauth2_request(
'https://www.googleapis.com/plus/v1/people/me?{0}',
auth_info['access_token'])
return json.loads(resp)
def _get_windows_live_user_info(self, auth_info, key=None, secret=None):
"""Windows Live API user profile endpoint.
https://apis.live.net/v5.0/me
Profile picture:
https://apis.live.net/v5.0/USER_ID/picture
"""
resp = self._oauth2_request('https://apis.live.net/v5.0/me?{0}',
auth_info['access_token'])
uinfo = json.loads(resp)
avurl = 'https://apis.live.net/v5.0/{0}/picture'.format(uinfo['id'])
uinfo.update(avatar_url=avurl)
return uinfo
def _get_facebook_user_info(self, auth_info, key=None, secret=None):
"""Facebook Graph API endpoint.
https://graph.facebook.com/me
"""
resp = self._oauth2_request('https://graph.facebook.com/me?{0}',
auth_info['access_token'])
return json.loads(resp)
def _get_foursquare_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currenly logging in user.
foursquare API endpoint:
https://api.foursquare.com/v2/users/self
"""
resp = self._oauth2_request(
'https://api.foursquare.com/v2/users/self?{0}&v=20130204',
auth_info['access_token'],'oauth_token')
data = json.loads(resp)
if data['meta']['code'] != 200:
logging.error(data['meta']['errorDetail'])
return data['response'].get('user')
def _get_linkedin_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currently logging in linkedin user.
LinkedIn user profile API endpoint:
http://api.linkedin.com/v1/people/~
or
http://api.linkedin.com/v1/people/~:<fields>
where <fields> is something like
(id,first-name,last-name,picture-url,public-profile-url,headline)
LinkedIn OAuth 1.0a is deprecated. Use LinkedIn with OAuth 2.0
"""
# TODO: remove LinkedIn OAuth 1.0a in the next release.
logging.warn('LinkedIn OAuth 1.0a is deprecated. '
'Use LinkedIn with OAuth 2.0: '
'https://developer.linkedin.com/documents/authentication')
token = oauth1.Token(key=auth_info['oauth_token'],
secret=auth_info['oauth_token_secret'])
client = self._oauth1_client(token, key, secret)
fields = 'id,first-name,last-name,picture-url,public-profile-url,headline'
url = 'http://api.linkedin.com/v1/people/~:(%s)' % fields
resp, content = client.request(url)
return self._parse_xml_user_info(content)
def _get_linkedin2_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of currently logging in linkedin user.
LinkedIn user profile API endpoint:
http://api.linkedin.com/v1/people/~
or
http://api.linkedin.com/v1/people/~:<fields>
where <fields> is something like
(id,first-name,last-name,picture-url,public-profile-url,headline)
"""
fields = 'id,first-name,last-name,picture-url,public-profile-url,headline'
url = 'https://api.linkedin.com/v1/people/~:(%s)?{0}' % fields
resp = self._oauth2_request(url, auth_info['access_token'],
token_param='oauth2_access_token')
return self._parse_xml_user_info(resp)
def _parse_xml_user_info(self, content):
try:
# lxml is one of the third party libs available on App Engine out of the
# box. See example/app.yaml for more info.
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
person = etree.fromstring(content)
uinfo = {}
for e in person:
uinfo.setdefault(e.tag, e.text)
return uinfo
def _get_twitter_user_info(self, auth_info, key=None, secret=None):
"""Returns a dict of twitter user using
https://api.twitter.com/1.1/account/verify_credentials.json
"""
token = oauth1.Token(key=auth_info['oauth_token'],
secret=auth_info['oauth_token_secret'])
client = self._oauth1_client(token, key, secret)
resp, content = client.request(
'https://api.twitter.com/1.1/account/verify_credentials.json')
uinfo = json.loads(content)
uinfo.setdefault('link', 'http://twitter.com/%s' % uinfo['screen_name'])
return uinfo
#
# aux methods
#
def _oauth1_client(self, token=None, consumer_key=None,
consumer_secret=None):
"""Returns OAuth 1.0 client that is capable of signing requests."""
args = [oauth1.Consumer(key=consumer_key, secret=consumer_secret)]
if token:
args.append(token)
return oauth1.Client(*args)
def _oauth2_request(self, url, token, token_param='access_token'):
"""Makes an HTTP request with OAuth 2.0 access token using App Engine
URLfetch API.
"""
target_url = url.format(urlencode({token_param:token}))
return urlfetch.fetch(target_url).content
def _query_string_parser(self, body):
"""Parses response body of an access token request query and returns
the result in JSON format.
Facebook, LinkedIn and Twitter respond with a query string, not JSON.
"""
return dict(urlparse.parse_qsl(body))
def _json_parser(self, body):
"""Parses body string into JSON dict"""
return json.loads(body)
def _generate_csrf_token(self, _time=None):
"""Creates a new random token that can be safely used as a URL param.
Token would normally be stored in a user session and passed as 'state'
parameter during OAuth 2.0 authorization step.
"""
now = str(_time or long(time.time()))
secret = security.generate_random_string(30, pool=security.ASCII_PRINTABLE)
token = self.OAUTH2_CSRF_DELIMITER.join([secret, now])
return base64.urlsafe_b64encode(token)
def _validate_csrf_token(self, expected, actual):
"""Validates expected token against the actual.
Args:
expected: String, existing token. Normally stored in a user session.
actual: String, token provided via 'state' param.
"""
if expected != actual:
return False
try:
decoded = base64.urlsafe_b64decode(expected.encode('ascii'))
token_key, token_time = decoded.rsplit(self.OAUTH2_CSRF_DELIMITER, 1)
token_time = long(token_time)
if not token_key:
return False
except (TypeError, ValueError, UnicodeDecodeError):
return False
now = long(time.time())
timeout = now - token_time > self.OAUTH2_CSRF_TOKEN_TIMEOUT
if timeout:
logging.error("CSRF token timeout (issued at %d)", token_time)
return not timeout
| 34.525806
| 80
| 0.679996
| 2,805
| 21,406
| 5.021034
| 0.160428
| 0.014769
| 0.00781
| 0.009088
| 0.350398
| 0.283158
| 0.24148
| 0.218759
| 0.203777
| 0.198523
| 0
| 0.011983
| 0.208633
| 21,406
| 619
| 81
| 34.581583
| 0.819421
| 0.274035
| 0
| 0.227405
| 0
| 0.005831
| 0.197837
| 0.018228
| 0
| 0
| 0
| 0.001616
| 0
| 1
| 0.078717
| false
| 0.017493
| 0.055394
| 0
| 0.250729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e331235f5a65953d372c517da81e56d9c43aa850
| 2,652
|
py
|
Python
|
scenegraph/pddlgym_planners/lapkt.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | 1
|
2022-01-30T22:06:57.000Z
|
2022-01-30T22:06:57.000Z
|
scenegraph/pddlgym_planners/lapkt.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
scenegraph/pddlgym_planners/lapkt.py
|
taskography/3dscenegraph-dev
|
2c261241230fbea1f1c687ff793478248f25c02c
|
[
"MIT"
] | null | null | null |
"""LAPKT-BFWS
https://github.com/nirlipo/BFWS-public
"""
import re
import os
import sys
import subprocess
import tempfile
from pddlgym_planners.pddl_planner import PDDLPlanner
from pddlgym_planners.planner import PlanningFailure
import numpy as np
from utils import FilesInCommonTempDirectory
DOCKER_IMAGE = 'khodeir/bfws:latest'
class LAPKTBFWS(PDDLPlanner):
def __init__(self):
super().__init__()
print("Instantiating LAPKT-BFWS")
self.install_delfi()
def install_delfi(self):
subprocess.check_call(f'docker pull {DOCKER_IMAGE}', shell=True, stdout=subprocess.DEVNULL)
def plan_from_pddl(self, dom_file, prob_file, horizon=np.inf, timeout=10, remove_files=False):
self.tmpdir = FilesInCommonTempDirectory(dom_file, prob_file)
(dom_file, prob_file) = self.tmpdir.new_fpaths
return super().plan_from_pddl(dom_file, prob_file, horizon=horizon, timeout=timeout, remove_files=remove_files)
def _get_cmd_str(self, dom_file, prob_file, timeout):
timeout_cmd = "gtimeout" if sys.platform == "darwin" else "timeout"
probdom_dir = os.path.dirname(dom_file)
dom_fname = os.path.basename(dom_file)
prob_fname = os.path.basename(prob_file)
assert probdom_dir == os.path.dirname(prob_file), "Files must be in the same directory"
cmd_str = f"docker run --privileged -it -v {probdom_dir}:/problem -w /problem {DOCKER_IMAGE} {timeout_cmd} {timeout} bfws --domain /problem/{dom_fname} --problem /problem/{prob_fname} --output /problem/bfws.plan --BFWS-f5 1"
return cmd_str
def _output_to_plan(self, output):
try:
self._statistics["num_node_expansions"] = int(re.search('nodes expanded during search: (\d+)', output.lower()).group(1))
self._statistics["total_time"] = self._statistics["search_time"] = float(re.search('total time: ([0-9.]+)', output.lower()).group(1))
self._statistics["plan_cost"] = float(re.search('plan found with cost: ([0-9.]+)', output.lower()).group(1))
except:
raise PlanningFailure("Failure parsing output of bfws")
try:
plan_fpath = os.path.join(self.tmpdir.dirname, 'bfws.plan')
with open(plan_fpath, 'r') as f:
plan_output = f.read()
self.tmpdir.cleanup()
plan = re.findall(r"^\(([^)]+)\)", plan_output.lower(), re.M)
assert plan
self._statistics["plan_length"] = len(plan)
return plan
except:
raise PlanningFailure("Plan not found with BFWS! Error: {}".format(output))
def _cleanup(self):
pass
| 42.774194
| 232
| 0.667044
| 344
| 2,652
| 4.93314
| 0.389535
| 0.028874
| 0.038892
| 0.044196
| 0.115498
| 0.04891
| 0
| 0
| 0
| 0
| 0
| 0.005218
| 0.205128
| 2,652
| 61
| 233
| 43.47541
| 0.79981
| 0.018477
| 0
| 0.081633
| 0
| 0.020408
| 0.219569
| 0.016564
| 0
| 0
| 0
| 0
| 0.040816
| 1
| 0.122449
| false
| 0.020408
| 0.183673
| 0
| 0.387755
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e33575c4ac98eb7bd72db9483692a67e2a8b1c0f
| 1,914
|
py
|
Python
|
Create Network Zones.py
|
Tosatsu/okta-python-scripts
|
bca5ff89b8fc2381ccab08de971f65505ed0cda5
|
[
"MIT"
] | 1
|
2021-04-09T09:46:31.000Z
|
2021-04-09T09:46:31.000Z
|
Create Network Zones.py
|
Tosatsu/okta-python-scripts
|
bca5ff89b8fc2381ccab08de971f65505ed0cda5
|
[
"MIT"
] | null | null | null |
Create Network Zones.py
|
Tosatsu/okta-python-scripts
|
bca5ff89b8fc2381ccab08de971f65505ed0cda5
|
[
"MIT"
] | 1
|
2021-04-12T11:27:13.000Z
|
2021-04-12T11:27:13.000Z
|
import csv
import re
import sys
import requests
import json
import Data # data container, replace with your own
orgName = Data.orgName # replace with your own
apiKey = Data.apiKey # provide your own API token
api_token = "SSWS " + apiKey
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': api_token}
def CreateZone(data):
createZoneUrl = "https://"+orgName+".com/api/v1/zones"
response = requests.post(createZoneUrl, headers=headers, data=data)
responseJSON = json.dumps(response.json())
responseData = json.loads(responseJSON)
if "errorCode" in responseJSON:
print(responseData)
return "Error"
else:
print(responseData)
return responseData
def CreateZones():
for x in range(1, 100):
Dict = {
"type": "IP",
"id": "null",
"name": "newNetworkZone" + str(x),
"status": "ACTIVE",
"created": "null",
"lastUpdated": "null",
"gateways": [
{
"type": "CIDR",
"value": "1.2.3.4/24"
},
{
"type": "CIDR",
"value": "2.3.4.5/24"
}
],
"proxies": [
{
"type": "CIDR",
"value": "2.2.3.4/24"
},
{
"type": "CIDR",
"value": "3.3.4.5/24"
}
]
}
CreateZone(json.dumps(Dict))
if __name__ == "__main__":
CreateZones()
| 29.90625
| 71
| 0.405434
| 153
| 1,914
| 5.006536
| 0.464052
| 0.041775
| 0.067885
| 0.046997
| 0.046997
| 0.046997
| 0.046997
| 0
| 0
| 0
| 0
| 0.029087
| 0.479101
| 1,914
| 64
| 72
| 29.90625
| 0.739218
| 0.044932
| 0
| 0.107143
| 0
| 0
| 0.158904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.107143
| 0
| 0.178571
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e33639a848594d63e324d70460cacf9ae086d33c
| 959
|
py
|
Python
|
simulador_de_dado.py
|
lucianoferreirasa/PythonProjects
|
c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5
|
[
"MIT"
] | null | null | null |
simulador_de_dado.py
|
lucianoferreirasa/PythonProjects
|
c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5
|
[
"MIT"
] | null | null | null |
simulador_de_dado.py
|
lucianoferreirasa/PythonProjects
|
c26a16bcbd61bd0563bc4f7d4dc0dd3593bd95e5
|
[
"MIT"
] | null | null | null |
import random
import PySimpleGUI as sg
class SimuladorDeDado:
def __init__(self):
self.valor_minimo = 1
self.valor_maximo = 6
self.layout = [
[sg.Text("Jogar o dado?")],
[sg.Button("Sim"),sg.Button("Não")]
]
def Iniciar(self):
self.janela = sg.Window("Simulador de Dado",layout=self.layout)
self.eventos, self.valores = self.janela.Read()
try:
if self.eventos =="Sim" or self.eventos =="s":
self.GerarValorDoDado()
elif self.eventos == "Não" or self.eventos =="n":
print("Agradacemos a sua participação!")
else:
print("Favor digitar sim (s) ou não (n)!")
except:
print("Ocorreu um erro ao receber sua resposta!")
def GerarValorDoDado(self):
print(random.randint(self.valor_minimo,self.valor_maximo))
simulador = SimuladorDeDado()
simulador.Iniciar()
| 29.96875
| 71
| 0.577685
| 110
| 959
| 4.963636
| 0.5
| 0.100733
| 0.054945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002972
| 0.298227
| 959
| 31
| 72
| 30.935484
| 0.808321
| 0
| 0
| 0
| 0
| 0
| 0.154327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.230769
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e3370f6e006d93026ba5320fad4727621e81fc92
| 1,712
|
py
|
Python
|
src/geometry/linear_algebra.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/geometry/linear_algebra.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
src/geometry/linear_algebra.py
|
seahrh/coding-interview
|
517d19e7e88c02acec4aa6336bc20206ce3f1897
|
[
"MIT"
] | null | null | null |
import math
from typing import List, Iterable, Union
Numeric = Union[int, float]
def magnitude(p: Iterable[Numeric]) -> float:
res: float = 0
for component in p:
res += component ** 2
res = math.sqrt(res)
return res
def vdot(p: List[Numeric], q: List[Numeric]) -> float:
"""Vector dot product."""
if len(p) == 0:
raise ValueError("p must not be None or empty")
if len(q) == 0:
raise ValueError("q must not be None or empty")
if len(p) != len(q):
raise ValueError("vectors p and q must have the same dimension")
res: float = 0
for i in range(len(p)):
res += p[i] * q[i]
return res
def full(rows: int, columns: int, fill: Numeric = 0) -> List[List[float]]:
"""Return a new array of given shape and type, filled with fill_value."""
return [[fill] * columns for _ in range(rows)]
def transpose(mat: List[List[Numeric]]) -> List[List[float]]:
res: List[List[float]] = full(rows=len(mat[0]), columns=len(mat))
for i in range(len(mat[0])):
for j in range(len(mat)):
res[i][j] = mat[j][i]
return res
def dot(p: List[List[Numeric]], q: List[List[Numeric]]) -> List[List[float]]:
"""Matrix dot product."""
p_shape = len(p), len(p[0])
q_shape = len(q), len(q[0])
if p_shape[1] != q_shape[0]:
raise ValueError("number of columns in p must equal the number of rows in q")
res: List[List[float]] = full(rows=p_shape[0], columns=q_shape[1])
for i in range(p_shape[0]):
for j in range(q_shape[1]):
for k in range(p_shape[1]):
res[i][j] += p[i][k] * q[k][j]
return res
| 31.703704
| 86
| 0.567757
| 275
| 1,712
| 3.494545
| 0.24
| 0.066597
| 0.067638
| 0.034339
| 0.21436
| 0.16025
| 0.052029
| 0.052029
| 0
| 0
| 0
| 0.01371
| 0.275701
| 1,712
| 53
| 87
| 32.301887
| 0.76129
| 0.0625
| 0
| 0.153846
| 0
| 0
| 0.100911
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e337db10027ece0f941b1295bc94ad1a0ed34904
| 4,179
|
py
|
Python
|
arrow/forwarder/views.py
|
AkhilGKrishnan/arrow
|
bbd35faa5011c642cdcf218b180b48dd7ef39ef6
|
[
"MIT"
] | null | null | null |
arrow/forwarder/views.py
|
AkhilGKrishnan/arrow
|
bbd35faa5011c642cdcf218b180b48dd7ef39ef6
|
[
"MIT"
] | null | null | null |
arrow/forwarder/views.py
|
AkhilGKrishnan/arrow
|
bbd35faa5011c642cdcf218b180b48dd7ef39ef6
|
[
"MIT"
] | 3
|
2019-01-07T17:07:16.000Z
|
2021-01-09T13:01:40.000Z
|
from django.views.generic.edit import CreateView, FormMixin
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django import forms
from django.urls import reverse
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from forwarder.models import Application, Hierarchy
class ForwardForm(forms.Form):
pass
class ApplicationCreate(CreateView):
model = Application
template_name = 'forwarder/application_create.html'
fields = ['type', 'other']
success_url = "/listApplication"
def form_valid(self, form):
form.instance.applicant = self.request.user
return super(ApplicationCreate, self).form_valid(form)
class ListApplicationView(ListView):
template_name = 'forwarder/list.html'
def get_queryset(self):
user = self.request.user
hierarchies = Hierarchy.objects.all()
#TODO: Generalize
if(user.designation == 'st'):
qs = Application.objects.filter(applicant=user)
elif(user.designation == 'tu'):
#for hr in hierarchies:
# qs += Application.objects.filter(applicant__branch=user.branch)
qs = Application.objects.filter(applicant__branch=user.branch, hierarchy_level=0)
elif(user.designation == 'ho'):
#for hr in hierarchies:
# qs += Application.objects.filter(applicant__branch=user.branch)
qs = Application.objects.filter(applicant__branch=user.branch, hierarchy_level=1)
else:
qs = Application.objects.filter(hierarchy_level=2)
return qs
class ApplicationDetailView(FormMixin, DetailView):
model = Application
template_name = 'forwarder/detail.html'
form_class = ForwardForm
def get_context_data(self, **kwargs):
context = super(ApplicationDetailView, self).get_context_data(**kwargs)
context['form'] = self.get_form()
return context
def get_success_url(self):
return reverse("list-application")
def post(self, request, *args, **kwargs):
#if not request.user.is_authenticated:
# return HttpResponseForbidden()
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
# Here, we would record the user's interest using the message
# passed in form.cleaned_data['message']
if '_forward' in self.request.POST:
self.object.hierarchy_level += 1
self.object.save()
if '_reject' in self.request.POST:
self.object.hierarchy_level -= 1
self.object.save()
pass
return super(ApplicationDetailView, self).form_valid(form)
def pdf_dl(request, pk):
# Create the HttpResponse object with the appropriate PDF headers.
application = Application.objects.get(pk=pk)
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % (application)
# Create the PDF object, using the response object as its "file."
p = canvas.Canvas(response)
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
p.drawString(100, 800, "Name : " + application.applicant.name)
p.drawString(100, 780, "Admission no : " + str(application.applicant.admn_no))
p.drawString(100, 760, "Department : " + application.applicant.branch)
p.drawString(100, 740, "Semester : " + str(application.applicant.semester))
p.drawString(100, 720, "Parent name : " + application.applicant.parent_name)
if application.type == "OTH":
p.drawString(100, 700, "Application type : " + application.other())
else:
p.drawString(100, 700, "Application type : " + application.get_type_display())
p.drawString(100, 680, "Recommended by HOD of " + application.applicant.branch)
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
return response
| 36.025862
| 93
| 0.675042
| 489
| 4,179
| 5.674847
| 0.312883
| 0.031712
| 0.04036
| 0.056216
| 0.221622
| 0.167928
| 0.167928
| 0.136937
| 0.136937
| 0.136937
| 0
| 0.016268
| 0.220388
| 4,179
| 115
| 94
| 36.33913
| 0.835482
| 0.160804
| 0
| 0.144737
| 0
| 0
| 0.093043
| 0.015459
| 0
| 0
| 0
| 0.008696
| 0
| 1
| 0.092105
| false
| 0.026316
| 0.105263
| 0.013158
| 0.460526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e3390f43d3793bc787b6b52cd5f2cc575976a36e
| 4,793
|
py
|
Python
|
caption_feats_generation_scripts/full_vid_data_loader.py
|
Alterith/Dense_Video_Captioning_Feature_Extraction_Model_Choice
|
65d0f2d26698cc8f7a5ffb564936113e2bbec201
|
[
"MIT"
] | 1
|
2021-04-21T12:39:07.000Z
|
2021-04-21T12:39:07.000Z
|
caption_feats_generation_scripts/full_vid_data_loader.py
|
Alterith/masters_code
|
65d0f2d26698cc8f7a5ffb564936113e2bbec201
|
[
"MIT"
] | null | null | null |
caption_feats_generation_scripts/full_vid_data_loader.py
|
Alterith/masters_code
|
65d0f2d26698cc8f7a5ffb564936113e2bbec201
|
[
"MIT"
] | null | null | null |
import h5py
# torch imports
import torch
from torch.utils.data import Dataset
# generic imports
import os
import sys
import numpy as np
import random
import pandas as pd
import cv2
from decord import VideoReader
from decord import cpu, gpu
from matplotlib import pyplot as plt
import gc
# create data loader
class video_dataset(Dataset):
def __init__(self, data_dir, split, temporal_depth, patch_width, patch_height, dataset_name, stride=None, stride_idx=None):
print(data_dir)
# list of classes
self.vids = os.listdir(os.path.join(data_dir, split))
# list of the video file directories in each class folder
self.flattened_data_dir = [os.path.join(os.path.join(data_dir, split),v) for v in self.vids]
if stride is not None and stride_idx is not None:
try:
if stride*(stride_idx+1) <= len(self.flattened_data_dir):
self.flattened_data_dir = self.flattened_data_dir[stride*stride_idx:stride*(stride_idx+1)]
else:
self.flattened_data_dir = self.flattened_data_dir[stride*stride_idx:]
except Exception as e:
print("Dataloader out of range")
quit()
# train, test, val
self.split = split
# number of consecutive frames
self.temporal_depth = temporal_depth
# dimension of patch selected
self.patch_width = patch_width
self.patch_height = patch_height
#data augnemtation transforms
def transform(self, vid, split):
total_frames = int(len(vid))
print(total_frames)
if total_frames > 7200:
return torch.zeros(901, 1, 1, 1)
vid_width = vid[0].shape[1]
vid_height = vid[0].shape[0]
start_frame = random.randint(0, (total_frames - self.temporal_depth - 1))
patch_start_width = random.randint(0, 171 - self.patch_width - 1)
patch_start_height = random.randint(0, 128 - self.patch_height - 1)
clips = []
# the prob of flipping a video
flip_prob = random.random()
# frame iterator / stride index
stride = 0
stride_index = self.temporal_depth
# obtrain the temporal depth number of consecutive frames
inter_method_idx = 0 #random.randint(0,4)
inter_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_NEAREST, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
while(stride*stride_index + self.temporal_depth < total_frames):
imgs = []
start_frame = stride*stride_index
for i in range(start_frame, start_frame + self.temporal_depth):
frame = vid[i]
# frame = frame.astype()
frame = frame.asnumpy()
frame = frame.astype(np.float32)
frame = np.asarray(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# print(frame)
# plt.imshow(frame)
# plt.show()
# quit()
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (112, 112), interpolation = inter_methods[inter_method_idx]) # remove this or move it up
cv2.normalize(frame, frame, 0, 1, cv2.NORM_MINMAX)
imgs.append(frame)
stride = stride + 1
clips.append(imgs)
clips = np.asarray(clips, dtype=np.float32)
clips = clips.astype(np.float32)
clips = np.moveaxis(clips, 4, 1)
clips = torch.from_numpy(clips)
return clips
def __len__(self):
return len(self.flattened_data_dir)
def __getitem__(self, idx):
if idx < 0:
return torch.zeros(1, 1, 1, 1), self.flattened_data_dir[idx]
result = False
vid = None
# idx = 3456
# deal with corrupted videos in list or videos which are just too long for us to process
while not result:
try:
vid = VideoReader(self.flattened_data_dir[idx])
if(int(len(vid))>self.temporal_depth):
result = True
else:
#idx = random.randint(0, len(self.flattened_data_dir)-1)
del vid
gc.collect()
return torch.zeros(901, 1, 1, 1), -1
except:
#idx = random.randint(0, len(self.flattened_data_dir)-1)
del vid
gc.collect()
return torch.zeros(901, 1, 1, 1), -1
frames = self.transform(vid, self.split)
# vid.close()
del vid
gc.collect()
return frames, self.flattened_data_dir[idx]
| 30.335443
| 130
| 0.580638
| 596
| 4,793
| 4.505034
| 0.276846
| 0.041713
| 0.075978
| 0.089385
| 0.222719
| 0.158659
| 0.142272
| 0.107263
| 0.099814
| 0.099814
| 0
| 0.029072
| 0.332568
| 4,793
| 157
| 131
| 30.528662
| 0.810253
| 0.150428
| 0
| 0.133333
| 0
| 0
| 0.005685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.144444
| 0.011111
| 0.277778
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e339d61b7c0a81fbe079a184470ec5bdef08b9e1
| 1,583
|
py
|
Python
|
sklearn_baseline.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
sklearn_baseline.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
sklearn_baseline.py
|
Shinkai125/KerasForTextClassfication
|
ed3d04c5c58d1dfb3f79b83ba704dd486616f0e4
|
[
"MIT"
] | null | null | null |
"""
@file: sklearn_method.py
@time: 2020-12-09 17:38:38
"""
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
myfont = fm.FontProperties(fname='SimHei.ttf') # 设置字体
train_data = pd.read_csv('chnsenticorp/train.tsv', sep='\t')
tfidf = TfidfVectorizer(norm='l2', ngram_range=(1, 2))
features = tfidf.fit_transform(train_data.text_a)
labels = train_data.label
print(features.shape)
models = [
RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
LinearSVC(),
MultinomialNB(),
LogisticRegression(random_state=0, solver='liblinear'),
]
CV = 10
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in tqdm(models):
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='f1', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
results = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'f1'])
sns.boxplot(x='model_name', y='f1', data=results)
sns.stripplot(x='model_name', y='f1', data=results,
size=8, jitter=True, edgecolor="gray", linewidth=2)
plt.show()
print(results.groupby('model_name').f1.mean())
| 31.66
| 78
| 0.753001
| 222
| 1,583
| 5.193694
| 0.531532
| 0.057242
| 0.02255
| 0.027754
| 0.041631
| 0.041631
| 0.041631
| 0
| 0
| 0
| 0
| 0.023022
| 0.12192
| 1,583
| 49
| 79
| 32.306122
| 0.806475
| 0.036008
| 0
| 0
| 0
| 0
| 0.06917
| 0.014493
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.297297
| 0
| 0.297297
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e33bc5cbc72c8153bc963c853fb7e883e19b21c8
| 2,087
|
py
|
Python
|
handypackages/gallery/tests.py
|
roundium/handypackages
|
b8a0e4952644144b31168f9a4ac8e743933d87c7
|
[
"MIT"
] | 1
|
2019-07-31T11:40:06.000Z
|
2019-07-31T11:40:06.000Z
|
handypackages/gallery/tests.py
|
roundium/handypackages
|
b8a0e4952644144b31168f9a4ac8e743933d87c7
|
[
"MIT"
] | 10
|
2020-02-12T01:16:25.000Z
|
2021-06-10T18:42:24.000Z
|
handypackages/gallery/tests.py
|
roundium/handypackages
|
b8a0e4952644144b31168f9a4ac8e743933d87c7
|
[
"MIT"
] | 1
|
2019-07-31T11:40:18.000Z
|
2019-07-31T11:40:18.000Z
|
import tempfile
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from filer.models import Image
from handypackages.tag.models import Tag
from .models import Gallery
class TestGalleryModels(TestCase):
def setUp(self):
# make temp folder because we want to delete files after test
# and this code do that automatically
settings.MEDIA_ROOT = tempfile.mkdtemp()
self.user = User.objects.create_superuser('test_user', '', 'testing')
self.tags = [
Tag.objects.create(value='django'),
Tag.objects.create(value='python'),
Tag.objects.create(value='test'),
Tag.objects.create(value='app'),
]
with open('./handypackages/test_requirements/test_upload_image.jpg',
'rb') as image:
test_image = SimpleUploadedFile(
'test_upload_image.jpg',
image.read(),
content_type='image/jpeg'
)
image = Image(
owner=self.user,
file=test_image,
original_filename='gallery_image_file',
name='test_Gallery_image'
)
image.save()
gallery = Gallery(
title='gallery test title',
text='gallery test',
image=image,
)
gallery.save()
gallery.tags.add(*self.tags)
self.gallery = gallery
def test_gallery_model(self):
self.assertEqual(
str(self.gallery),
'gallery test title',
'__str__ in gallery model have an issue!'
)
self.assertEqual(
self.gallery.__unicode__(),
'gallery test title',
'__unicode__ in gallery model have an issue!'
)
self.assertEqual(
set(Tag.objects.all()),
set(self.gallery.gallery_tags),
'gallery gallery_tags method does not work!'
)
| 29.394366
| 77
| 0.577384
| 218
| 2,087
| 5.380734
| 0.394495
| 0.055413
| 0.054561
| 0.071611
| 0.068201
| 0.068201
| 0.068201
| 0.068201
| 0
| 0
| 0
| 0
| 0.330139
| 2,087
| 70
| 78
| 29.814286
| 0.839056
| 0.04552
| 0
| 0.089286
| 0
| 0
| 0.175465
| 0.03821
| 0
| 0
| 0
| 0
| 0.053571
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e33ec5c64b5732e244db6498e5c0817ede88b3d0
| 1,650
|
py
|
Python
|
make_high_indel.py
|
wckdouglas/ngs_qc_plot
|
b279905f9e30d1cf547cda5f51cc77e8a134ce99
|
[
"MIT"
] | null | null | null |
make_high_indel.py
|
wckdouglas/ngs_qc_plot
|
b279905f9e30d1cf547cda5f51cc77e8a134ce99
|
[
"MIT"
] | null | null | null |
make_high_indel.py
|
wckdouglas/ngs_qc_plot
|
b279905f9e30d1cf547cda5f51cc77e8a134ce99
|
[
"MIT"
] | null | null | null |
#!/usr/env python
import pandas as pd
import os
import sys
import numpy as np
if len(sys.argv) != 3:
sys.exit('[usage] python %s <repeat_index table> <indel cutoff>')
ref_table = sys.argv[1]
indel_cut_off = int(sys.argv[2])
for gdf in pd.read_csv(ref_table, sep='\t', chunksize = 10000):
for contig, contig_df in gdf.groupby('contig'):
df = contig_df\
.assign(indel_index = lambda d: d.negative_index + d.positive_index) \
.query('indel_index >= %i ' %indel_cut_off)
count = 0
for i, base in df.iterrows():
if base['negative_index'] == base['indel_index']:
start = base['start']
mononucleotide = base['fwd_base']
indel_index = base['indel_index']
taken_base = 1
elif taken_base != indel_index and base['fwd_base'] == mononucleotide:
taken_base += 1
elif taken_base == indel_index:
assert base['positive_index'] == indel_index and base['fwd_base'] == mononucleotide,'Wrong parsing'
end = base['start']
line = '{contig}\t{start}\t{end}\tIndel{id}\t{indel_index}\t+\t{mononucleotide}' \
.format(contig = base['contig'],
start = start,
end = end,
id = count,
indel_index = indel_index,
mononucleotide = mononucleotide)
print(line, file= sys.stdout)
count += 1
else:
print(base)
| 36.666667
| 115
| 0.512121
| 187
| 1,650
| 4.347594
| 0.374332
| 0.135301
| 0.086101
| 0.04674
| 0.162362
| 0.162362
| 0.162362
| 0.081181
| 0
| 0
| 0
| 0.011617
| 0.373939
| 1,650
| 44
| 116
| 37.5
| 0.775411
| 0.009697
| 0
| 0
| 0
| 0.027778
| 0.15493
| 0.043478
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e340a47d3057e0a84cad2effe274acb95c936bc5
| 662
|
py
|
Python
|
LeetCodeSolutions/python/474_Ones_and_Zeroes.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | 1
|
2017-03-27T13:38:37.000Z
|
2017-03-27T13:38:37.000Z
|
LeetCodeSolutions/python/474_Ones_and_Zeroes.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
LeetCodeSolutions/python/474_Ones_and_Zeroes.py
|
ChuanleiGuo/AlgorithmsPlayground
|
90b6287b742c8bfd3797540c408d679be2821a40
|
[
"MIT"
] | null | null | null |
class Solution(object):
def findMaxForm(self, strs, m, n):
"""
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
dp = [[0] * (n + 1) for _ in range(m + 1)]
def counts(s):
return sum(1 for c in s if c == '0'), \
sum(1 for c in s if c == '1')
for num_zero, num_one in [counts(s) for s in strs]:
for i in range(m, -1, -1):
for j in range(n, -1, -1):
if i >= num_zero and j >= num_one:
dp[i][j] = max(dp[i][j], dp[i - num_zero][j - num_one] + 1)
return dp[m][n]
| 30.090909
| 83
| 0.413897
| 105
| 662
| 2.542857
| 0.314286
| 0.074906
| 0.059925
| 0.067416
| 0.104869
| 0.104869
| 0.104869
| 0.104869
| 0
| 0
| 0
| 0.031915
| 0.432024
| 662
| 21
| 84
| 31.52381
| 0.678191
| 0.089124
| 0
| 0
| 0
| 0
| 0.003604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.083333
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e34633ea0534cf1b5136a4ecb84b248d7c202e57
| 416
|
py
|
Python
|
#103 - Ficha do Jogador.py
|
Lucas-HMSC/curso-python3
|
b6506d508107c9a43993a7b5795ee39fc3b7c79d
|
[
"MIT"
] | null | null | null |
#103 - Ficha do Jogador.py
|
Lucas-HMSC/curso-python3
|
b6506d508107c9a43993a7b5795ee39fc3b7c79d
|
[
"MIT"
] | null | null | null |
#103 - Ficha do Jogador.py
|
Lucas-HMSC/curso-python3
|
b6506d508107c9a43993a7b5795ee39fc3b7c79d
|
[
"MIT"
] | null | null | null |
def ficha(nome, gols):
if gols.isnumeric():
gols = int(gols)
else:
gols = 0
if nome.strip() != '':
print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')
else:
nome = '<desconhecido>'
print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')
print('='*30)
nome = str(input('Nome do Jogador: '))
gols = str(input('Número de Gols: '))
ficha(nome, gols)
| 24.470588
| 67
| 0.560096
| 58
| 416
| 4.017241
| 0.448276
| 0.077253
| 0.111588
| 0.120172
| 0.351931
| 0.351931
| 0.351931
| 0.351931
| 0.351931
| 0.351931
| 0
| 0.009772
| 0.262019
| 416
| 16
| 68
| 26
| 0.749186
| 0
| 0
| 0.285714
| 0
| 0
| 0.350962
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.071429
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e347e8efaaade3a7b28a992e4961e185b12004e3
| 2,079
|
py
|
Python
|
app/business_layers/presentation.py
|
martireg/bmat
|
b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c
|
[
"MIT"
] | null | null | null |
app/business_layers/presentation.py
|
martireg/bmat
|
b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c
|
[
"MIT"
] | null | null | null |
app/business_layers/presentation.py
|
martireg/bmat
|
b5ccd6dcd1edd1e90fa07cb0ef4006b909018a4c
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from fastapi import APIRouter, UploadFile, File, Depends, HTTPException
from pydantic import create_model
from starlette.responses import StreamingResponse
from app.business_layers.domain import Work
from app.business_layers.repository import WorkRepository
from app.business_layers.use_cases import (
bulk_upload_works_use_case,
get_work_use_case,
list_works_use_case,
)
from app.db.mongodb import get_client
from app.utils.csv_manipulation import process_csv, stream_csv_from_dicts
async def get_db():
return await get_client()
work_router = APIRouter()
# Model Fields are defined by either a tuple of the form (<type>, <default value>) or a default value
model_fields = {k: (v, ...) for k, v in Work.__annotations__.items()}
WorkModel = create_model("WorkModel", **model_fields)
@work_router.post("/upload_file", response_model=List[WorkModel])
async def upload_csv(file: UploadFile = File(...), db=Depends(get_db)) -> List[Dict]:
csv = process_csv(await file.read())
works = await bulk_upload_works_use_case(WorkRepository(db), csv)
return [work.to_dict() for work in works]
@work_router.get("/csv")
async def fetch_csv(db=Depends(get_db)) -> File:
works = await list_works(db)
if not works:
return HTTPException(status_code=500, detail="There are no works available")
response = StreamingResponse(
stream_csv_from_dicts(works, separator=",", keys=works[0].keys()),
media_type="text/csv",
)
response.headers["Content-Disposition"] = "attachment; filename=export.csv"
return response
@work_router.get("/work/{iswc}", response_model=WorkModel)
async def get_work(iswc: str, db=Depends(get_db)):
work = await get_work_use_case(WorkRepository(db), iswc)
if not work:
raise HTTPException(status_code=404, detail="Item not found")
return work.to_dict()
@work_router.get("/works", response_model=List[WorkModel])
async def list_works(db=Depends(get_db)):
return [work.to_dict() for work in await list_works_use_case(WorkRepository(db))]
| 34.65
| 101
| 0.746032
| 299
| 2,079
| 4.959866
| 0.324415
| 0.028321
| 0.032367
| 0.037761
| 0.138908
| 0.079568
| 0.033715
| 0
| 0
| 0
| 0
| 0.003928
| 0.142857
| 2,079
| 59
| 102
| 35.237288
| 0.828283
| 0.047619
| 0
| 0
| 0
| 0
| 0.072801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.209302
| 0
| 0.348837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e349722dbbb7eaf1a0dc75722c25f01806dbcca5
| 3,632
|
py
|
Python
|
language/serene/boolq_tfds.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 1,199
|
2018-10-16T01:30:18.000Z
|
2022-03-31T21:05:24.000Z
|
language/serene/boolq_tfds.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 116
|
2018-10-18T03:31:46.000Z
|
2022-03-24T13:40:50.000Z
|
language/serene/boolq_tfds.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 303
|
2018-10-22T12:35:12.000Z
|
2022-03-27T17:38:17.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TF Dataset for BoolQ in same format as Fever TFDS."""
import json
from language.serene import constants
from language.serene import util
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
class BoolQClaims(tfds.core.GeneratorBasedBuilder):
"""TFDS for treating boolq as fact verification."""
VERSION = tfds.core.Version('0.1.0')
def __init__(self,
*,
boolq_train_path,
boolq_dev_path,
data_dir=None,
config=None,
version=None):
super().__init__(data_dir=data_dir, config=config, version=version)
self._boolq_train_path = boolq_train_path
self._boolq_dev_path = boolq_dev_path
def _generate_examples(self, boolq_filepath, fold):
boolq_claims = util.read_jsonlines(boolq_filepath)
for idx, claim in enumerate(boolq_claims):
example_id = f'{fold}-{idx}'
example = {
'example_id':
example_id,
'claim_text':
claim['question'],
'evidence_text':
claim['passage'],
'wikipedia_url':
claim['title'],
'sentence_id':
'0',
# This is effectively gold evidence
'evidence_label':
constants.MATCHING,
'claim_label':
constants.SUPPORTS if claim['answer'] else constants.REFUTES,
'metadata':
json.dumps({})
}
yield example_id, example
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'example_id':
tf.string,
'metadata':
tf.string,
'claim_text':
tfds.features.Text(),
'evidence_text':
tfds.features.Text(),
'wikipedia_url':
tfds.features.Text(),
'sentence_id':
tfds.features.Text(),
'evidence_label':
tfds.features.ClassLabel(
names=constants.EVIDENCE_MATCHING_CLASSES),
'claim_label':
tfds.features.ClassLabel(names=constants.FEVER_CLASSES)
}),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'boolq_filepath': self._boolq_train_path,
'fold': 'train',
},
num_shards=100,
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'boolq_filepath': self._boolq_dev_path,
'fold': 'dev',
},
)
]
| 32.141593
| 75
| 0.593337
| 398
| 3,632
| 5.233668
| 0.444724
| 0.046087
| 0.026884
| 0.025924
| 0.12482
| 0.102736
| 0
| 0
| 0
| 0
| 0
| 0.007232
| 0.314703
| 3,632
| 112
| 76
| 32.428571
| 0.82965
| 0.257159
| 0
| 0.320988
| 0
| 0
| 0.100638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0.012346
| 0.061728
| 0.012346
| 0.160494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e34cc6ddd23022672aee1685f571b987ab87c815
| 936
|
py
|
Python
|
services/viewcounts/utils.py
|
RyanFleck/AuxilliaryWebsiteServices
|
bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378
|
[
"MIT"
] | 1
|
2020-11-11T20:20:42.000Z
|
2020-11-11T20:20:42.000Z
|
services/viewcounts/utils.py
|
RyanFleck/AuxilliaryWebsiteServices
|
bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378
|
[
"MIT"
] | 17
|
2020-11-09T19:04:04.000Z
|
2022-03-01T18:08:42.000Z
|
services/viewcounts/utils.py
|
RyanFleck/AuxilliaryWebsiteServices
|
bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378
|
[
"MIT"
] | null | null | null |
from slugify import slugify
from services.viewcounts.models import PageViewsModel
def get_page_views(url: str):
"""Returns the number of views for a given page object."""
# Pre-processing checks: Client should not pass full or partial URL.
if not url.startswith("/"):
raise Exception("Partial URL detected, only POST the page path.")
if ("http" in url) or ("localhost" in url):
raise Exception("Full URL detected, only POST the page path.")
# Boil down url to slug/path:
path = url_to_path(url)
print(f"User is at {path}")
# Creates a new object if none exists.
page, created = PageViewsModel.objects.get_or_create(path=path)
# Add a view to the model
if not created:
page.views = page.views + 1
page.save()
return page.views
def url_to_path(url: str):
"""Converts an incoming url into a path-slug."""
return slugify(url, max_length=199)
| 28.363636
| 73
| 0.672009
| 141
| 936
| 4.397163
| 0.503546
| 0.058065
| 0.048387
| 0.06129
| 0.096774
| 0.096774
| 0.096774
| 0
| 0
| 0
| 0
| 0.005548
| 0.229701
| 936
| 32
| 74
| 29.25
| 0.854369
| 0.269231
| 0
| 0
| 0
| 0
| 0.179104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e34da2a39a4311b17cd41e029318a815155da9e9
| 9,875
|
py
|
Python
|
bin/gaussian_process_samples.py
|
ltiao/videos
|
ba371078d107da5a4c726a957b31a29bb157664d
|
[
"MIT"
] | null | null | null |
bin/gaussian_process_samples.py
|
ltiao/videos
|
ba371078d107da5a4c726a957b31a29bb157664d
|
[
"MIT"
] | null | null | null |
bin/gaussian_process_samples.py
|
ltiao/videos
|
ba371078d107da5a4c726a957b31a29bb157664d
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_probability as tfp
from scipy.stats import expon
from videos.linalg import safe_cholesky
from manim import *
# shortcuts
tfd = tfp.distributions
kernels = tfp.math.psd_kernels
def default_float():
return "float64"
class State:
def __init__(self, kernel, x_grid, xa, xb_tracker, ci=.95):
self.kernel = kernel
self.x_grid = x_grid # shape (K, 1)
self.xa = xa # shape ()
self.xb_tracker = xb_tracker
self.ci = ci
# cholesky decomposition of gram matrix over grid points; shape (K, K)
self.scale_grid = safe_cholesky(self.kernel.matrix(x_grid, x_grid))
def index_points(self):
return np.vstack([self.xa, self.xb_tracker.get_value()]) # shape (2, 1)
def scale(self):
xs = self.index_points() # shape (2, 1)
Ks = self.kernel.matrix(xs, xs) # shape (2, 2)
Ks_grid = self.kernel.matrix(self.x_grid, xs) # shape (K, 2)
K_col = tf.concat([Ks_grid, Ks], axis=0) # shape (K+2, 2)
L = tfp.math.cholesky_concat(self.scale_grid, K_col) # shape (K+2, K+2)
return tf.linalg.LinearOperatorLowerTriangular(L)
def _ellipse_parametric(self, t):
xs = self.index_points() # shape (2, 1)
Ks = self.kernel.matrix(xs, xs) # shape (2, 2)
# compute 95% confidence interval using inverse cdf of
# chi-squared distribution with 2 degrees of freedom
s = expon(scale=2.).ppf(q=self.ci)
w, v = tf.linalg.eigh(Ks)
U = tf.sqrt(s * w) * v
z = tf.stack((tf.cos(t), tf.sin(t)), axis=-1)
a = tf.matmul(U, tf.expand_dims(z, axis=-1)).numpy()
return (*a, 0)
def plot_ellipse(self, ax):
return ax.plot_parametric_curve(self._ellipse_parametric,
t_range=(0, TAU),
fill_opacity=.25) \
.set_color(TEAL)
class SampleTrajectory:
def __init__(self, state, theta_tracker, random_state):
m = len(state.x_grid)
self.u = random_state.randn(m+2)
self.v = random_state.randn(m+2)
self.state = state
self.theta_tracker = theta_tracker
def __call__(self, theta):
v_norm = np.linalg.norm(self.v, axis=None, ord=2)
v_normed = np.true_divide(self.v, v_norm)
c = np.sum(self.u * v_normed, axis=None)
t = self.u - c * v_normed
t_norm = np.linalg.norm(t, ord=2, axis=None)
t_normed = np.true_divide(t, t_norm)
eps = v_norm * (v_normed * np.cos(theta) + t_normed * np.sin(theta))
return self.state.scale().matmul(tf.expand_dims(eps, axis=-1)).numpy()
def make_updater(self, ax, color, make_line_graph_fn):
def updater(m):
foo = self(self.theta_tracker.get_value())
y_values = foo[:-2]
return m.become(make_line_graph_fn(ax, self.state.x_grid, y_values, color))
return updater
def dot_updater(self, ax):
def updater(m):
foo = self(self.theta_tracker.get_value())
y1, y2 = foo[-2:]
return m.move_to(ax.coords_to_point(y1, y2))
return updater
def make_xa_updater(self, ax):
def updater(m):
foo = self(self.theta_tracker.get_value())
x = self.state.xa
y = foo[-2]
return m.move_to(ax.coords_to_point(x, y))
return updater
def make_xb_updater(self, ax):
def updater(m):
foo = self(self.theta_tracker.get_value())
x = self.state.xb_tracker.get_value()
y = foo[-1]
return m.move_to(ax.coords_to_point(x, y))
return updater
def tset(self, ya, yb, ax, z_index, color):
return ax.get_lines_to_point(ax.coords_to_point(ya, yb), color=color) \
.set_z_index(z_index)
def make_lines_updater(self, ax, z_index, color):
def updater(m):
foo = self(self.theta_tracker.get_value())
ya, yb = foo[-2:]
return m.become(self.tset(ya, yb, ax, z_index, color))
return updater
class GaussianProcessSamples(Scene):
def make_line_graph(self, ax, x, y, color):
x_values = x.squeeze(axis=-1)
return ax.plot_line_graph(x_values=x_values,
y_values=y,
add_vertex_dots=False,
line_color=color,
# vertex_dot_style=dict(fill_color=color,
# fill_opacity=0.8),
stroke_opacity=0.9)
def construct(self):
# self.camera.background_color = WHITE
seed = 23
random_state = np.random.RandomState(seed)
# colors = [BLUE, TEAL, GREEN, GOLD, RED, MAROON, PURPLE]
colors = [RED, GREEN, BLUE]
n_samples = len(colors)
n_index_points = 512 # nbr of index points
n_foo = 2
y_min, y_max, y_step = -3.2, 3.2, .8
x_min, x_max, x_step = -.1, 1., .1
X_grid = np.linspace(x_min, x_max, n_index_points).reshape(-1, 1)
# X_foo = random_state.uniform(low=x_min, high=x_max, size=(n_foo, 1))
xa = 0.7
xb = xa - 0.2
# x2 = random_state.uniform(low=x_min, high=x_max)
# kernel_cls = kernels.MaternFiveHalves
kernel_cls = kernels.ExponentiatedQuadratic
amplitude = 1.
length_scale = .1
kernel = kernel_cls(
amplitude=tf.constant(amplitude, dtype=default_float()),
length_scale=tf.constant(length_scale, dtype=default_float())
)
# angle
theta = 0.
ax1 = Axes(
x_range=[x_min, x_max, x_step],
y_range=[y_min, y_max, y_step],
x_length=7.,
y_length=4.,
tips=False,
)
ax2 = Axes(
x_range=[y_min, y_max, y_step],
y_range=[y_min, y_max, y_step],
x_length=4.,
y_length=4.,
tips=False,
)
axes = VGroup(ax1, ax2).arrange(RIGHT, buff=LARGE_BUFF)
ax1_label = ax1.get_axis_labels(y_label=r"f(x)")
ax2_label = ax2.get_axis_labels(x_label=r"f(x_1)", y_label=r"f(x_2)")
labels = VGroup(ax1_label, ax2_label)
xb_tracker = ValueTracker(xb)
length_scale_tracker = ValueTracker(length_scale)
theta_tracker = ValueTracker(theta)
state = State(kernel, X_grid, xa, xb_tracker)
curve = state.plot_ellipse(ax2)
curve.add_updater(lambda m: m.become(state.plot_ellipse(ax2)))
graphs = VGroup()
lines = VGroup()
dots = VGroup()
for i, color in enumerate(colors):
traj = SampleTrajectory(state, theta_tracker, random_state)
foo = traj(theta_tracker.get_value())
*y_values, ya, yb = foo
graph = self.make_line_graph(ax1, X_grid, y_values, color) \
.set_z_index(i+1)
graph.add_updater(traj.make_updater(ax1, color, self.make_line_graph))
graphs.add(graph)
dot_xa = Dot(ax1.coords_to_point(xa, ya),
fill_color=color, fill_opacity=0.9, stroke_width=1.5) \
.set_z_index(i+1)
dot_xa.add_updater(traj.make_xa_updater(ax1))
dot_xb = Dot(ax1.coords_to_point(xb_tracker.get_value(), yb),
fill_color=color, fill_opacity=0.9, stroke_width=1.5) \
.set_z_index(i+1)
dot_xb.add_updater(traj.make_xb_updater(ax1))
dot = Dot(ax2.coords_to_point(ya, yb),
fill_color=color, stroke_width=1.5) \
.set_z_index(curve.z_index+i+1)
dot.add_updater(traj.dot_updater(ax2))
line = traj.tset(ya, yb, ax2, z_index=curve.z_index+i+1, color=color)
line.add_updater(traj.make_lines_updater(ax2, z_index=curve.z_index+i+1, color=color))
dots.add(dot, dot_xa, dot_xb)
lines.add(line)
line_a = ax1.get_vertical_line(ax1.coords_to_point(xa, .75 * y_min))
line_b = ax1.get_vertical_line(ax1.coords_to_point(xb_tracker.get_value(), .75 * y_max))
line_b.add_updater(lambda m: m.become(ax1.get_vertical_line(ax1.coords_to_point(xb_tracker.get_value(), .75 * y_max))))
lines.add(line_a, line_b)
label_a = MathTex("x_1").next_to(line_a, DOWN)
label_b = MathTex("x_2").next_to(line_b, UP)
label_b.add_updater(lambda m: m.next_to(line_b, UP))
labels.add(label_a, label_b)
logo = Text("@louistiao", font="Open Sans", font_size=20, color=BLUE_D).to_corner(DR)
self.add(logo, axes, labels, graphs, dots, curve, lines)
rotations = 1
frequency = 1
self.play(xb_tracker.animate.set_value(xa - 0.45))
self.wait()
self.animate_samples(theta_tracker, rotations, frequency)
self.wait()
self.next_section()
self.play(xb_tracker.animate.set_value(xa + 0.2))
self.wait()
self.animate_samples(theta_tracker, rotations, frequency)
self.wait()
# self.next_section()
# self.play(xb_tracker.animate.set_value(xa + .015))
# self.wait()
# self.animate_samples(theta_tracker, rotations, frequency)
# self.wait()
# self.next_section()
# self.play(xb_tracker.animate.set_value(xb))
# self.wait()
# self.animate_samples(theta_tracker, rotations, frequency)
# self.wait()
def animate_samples(self, tracker, rotations, frequency,
rate_func=rate_functions.linear):
self.play(tracker.animate.increment_value(rotations * TAU),
rate_func=rate_func, run_time=rotations / frequency)
| 35.142349
| 127
| 0.582278
| 1,388
| 9,875
| 3.898415
| 0.177954
| 0.033266
| 0.030493
| 0.022177
| 0.377195
| 0.326187
| 0.2798
| 0.262244
| 0.249861
| 0.231011
| 0
| 0.020948
| 0.299038
| 9,875
| 280
| 128
| 35.267857
| 0.760763
| 0.098228
| 0
| 0.194872
| 0
| 0
| 0.005411
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112821
| false
| 0
| 0.025641
| 0.020513
| 0.246154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e34e43e9e1aa6f169f4e3ce01d35a03a886c9108
| 932
|
py
|
Python
|
app/api/v2/models/base_models.py
|
erick-maina/Questioner_API
|
0ffad203fd525e22b52e861ce574803a844cc3b3
|
[
"MIT"
] | null | null | null |
app/api/v2/models/base_models.py
|
erick-maina/Questioner_API
|
0ffad203fd525e22b52e861ce574803a844cc3b3
|
[
"MIT"
] | 7
|
2019-01-15T12:23:59.000Z
|
2019-01-20T17:32:45.000Z
|
app/api/v2/models/base_models.py
|
erick-maina/Questioner_API
|
0ffad203fd525e22b52e861ce574803a844cc3b3
|
[
"MIT"
] | null | null | null |
"""
This module defines the base model and associated functions
"""
from flask import Flask, jsonify
from psycopg2.extras import RealDictCursor
from ....database import db_con
class BaseModels(object):
"""
This class encapsulates the functions of the base model
that will be shared across all other models
"""
def __init__(self, tablename):
"""Initializes the database"""
self.table = tablename
self.connect = db_con()
self.cur = self.connect.cursor(cursor_factory=RealDictCursor)
def check_exists(self, key, value):
"""Checks where a particular item exists within the
database given the table name, column name(key) and
the value to be checked"""
query = """SELECT * FROM {} WHERE {} = {};""".format(
self.table, key, value)
self.cur.execute(query)
result = self.cur.fetchall()
return len(result) > 0
| 30.064516
| 69
| 0.648069
| 115
| 932
| 5.182609
| 0.573913
| 0.035235
| 0.040268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002874
| 0.253219
| 932
| 30
| 70
| 31.066667
| 0.853448
| 0.332618
| 0
| 0
| 0
| 0
| 0.054482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.214286
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e350ce9086d7c563b5e1154ba5f38a8024e85d87
| 779
|
py
|
Python
|
inconnu/traits/traitcommon.py
|
tiltowait/inconnu
|
6cca5fed520899d159537701b695c94222d8dc45
|
[
"MIT"
] | 4
|
2021-09-06T20:18:13.000Z
|
2022-02-05T17:08:44.000Z
|
inconnu/traits/traitcommon.py
|
tiltowait/inconnu
|
6cca5fed520899d159537701b695c94222d8dc45
|
[
"MIT"
] | 7
|
2021-09-13T00:46:57.000Z
|
2022-01-11T06:38:50.000Z
|
inconnu/traits/traitcommon.py
|
tiltowait/inconnu
|
6cca5fed520899d159537701b695c94222d8dc45
|
[
"MIT"
] | 2
|
2021-11-27T22:24:53.000Z
|
2022-03-16T21:05:00.000Z
|
"""traits/traitcommon.py - Common functionality across trait operations."""
import re
from ..constants import UNIVERSAL_TRAITS
VALID_TRAIT_PATTERN = re.compile(r"^[A-z_]+$")
def validate_trait_names(*traits):
"""
Raises a ValueError if a trait doesn't exist and a SyntaxError
if the syntax is bad.
"""
for trait in traits:
if (trait_len := len(trait)) > 20:
raise ValueError(f"`{trait}` is too long by {trait_len - 20} characters.")
if trait.lower() in UNIVERSAL_TRAITS:
raise SyntaxError(f"`{trait}` is a reserved trait and cannot be added/updated/deleted.")
if VALID_TRAIT_PATTERN.match(trait) is None:
raise SyntaxError(f"Traits can only have letters and underscores. Received `{trait}`.")
| 33.869565
| 100
| 0.671374
| 107
| 779
| 4.785047
| 0.551402
| 0.041016
| 0.066406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006568
| 0.218229
| 779
| 22
| 101
| 35.409091
| 0.834154
| 0.197689
| 0
| 0
| 0
| 0
| 0.321667
| 0.036667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e355aa3c3d4c58a325cb59719ca07b7c1a10df4b
| 2,293
|
py
|
Python
|
docker_ml_templates/simple_batch_model/container/src/tests/test_model.py
|
MadMedian/ubik
|
d8dabf0a26db1e35c653b23facb5045f2ae7bf0d
|
[
"Apache-2.0"
] | null | null | null |
docker_ml_templates/simple_batch_model/container/src/tests/test_model.py
|
MadMedian/ubik
|
d8dabf0a26db1e35c653b23facb5045f2ae7bf0d
|
[
"Apache-2.0"
] | null | null | null |
docker_ml_templates/simple_batch_model/container/src/tests/test_model.py
|
MadMedian/ubik
|
d8dabf0a26db1e35c653b23facb5045f2ae7bf0d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from ..model import RandomForestWithFeatureSelection
from sklearn.model_selection import train_test_split
import os
import numpy as np
def create_dataset(n_rows=1000, n_feats=10, pos_loc=2.0, neg_loc=0.0,
pos_scale=3.0, neg_scale=3.0, random_state=1):
np.random.seed(random_state)
X_pos = np.random.normal(pos_loc, pos_scale, size=(n_rows, n_feats))
X_neg = np.random.normal(neg_loc, neg_scale, size=(n_rows, n_feats))
X = np.vstack([X_pos, X_neg])
y = np.concatenate([np.ones(n_rows), np.zeros(n_rows)])
return X, y
class MyTestCase(unittest.TestCase):
def setUp(self):
self.random_state = 1
self.filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'model.joblib')
def test_model(self):
unit = RandomForestWithFeatureSelection(random_state=self.random_state, n_estimators=10, top_k=8)
X, y = create_dataset()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=self.random_state)
model = unit.train(X_train, y_train)
train_score = unit.score(X_train, y_train)
test_score = unit.score(X_test, y_test)
self.assertGreater(train_score['precision_score'], 0.95)
self.assertGreater(test_score['precision_score'], 0.75)
for score_train, score_test in zip(train_score.values(), test_score.values()):
self.assertGreater(score_train, score_test)
def test_save_load(self):
model = RandomForestWithFeatureSelection(random_state=self.random_state, n_estimators=5, top_k=6)
X, y = create_dataset()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=self.random_state)
model = model.train(X_train, y_train)
model.save(self.filepath)
unit = RandomForestWithFeatureSelection.load(self.filepath)
train_score = unit.score(X_train, y_train)
test_score = unit.score(X_test, y_test)
self.assertGreater(train_score['precision_score'], 0.9)
self.assertGreater(test_score['precision_score'], 0.7)
def tearDown(self):
if os.path.exists(self.filepath):
os.remove(self.filepath)
return
if __name__ == '__main__':
unittest.main()
| 39.534483
| 113
| 0.69167
| 339
| 2,293
| 4.383481
| 0.238938
| 0.081427
| 0.050471
| 0.056528
| 0.462988
| 0.440108
| 0.440108
| 0.356662
| 0.263795
| 0.263795
| 0
| 0.019978
| 0.192324
| 2,293
| 57
| 114
| 40.22807
| 0.782397
| 0
| 0
| 0.177778
| 0
| 0
| 0.034889
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.288889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e357ec80e01c5cb1d929b33f8d9bbb4379d90eae
| 43,051
|
py
|
Python
|
DataAnalysis.py
|
ben-dent/Contract-Cheating-Analysis
|
28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df
|
[
"MIT"
] | null | null | null |
DataAnalysis.py
|
ben-dent/Contract-Cheating-Analysis
|
28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df
|
[
"MIT"
] | null | null | null |
DataAnalysis.py
|
ben-dent/Contract-Cheating-Analysis
|
28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt;
plt.rcdefaults()
import csv
import sqlite3 as lite
from calendar import monthrange
from datetime import datetime, date, timedelta
from datetimerange import DateTimeRange
import numpy as np
import pycountry_convert as pc
from dateutil.relativedelta import relativedelta
from forex_python.converter import CurrencyRates, RatesNotAvailableError
import random
import pandas as pd
DATABASE_NAME = 'JobDetails.db'
con = lite.connect(DATABASE_NAME)
cur = con.cursor()
bidNames = ["Bid ID", "Job ID", "Country", "User", "Price", "Currency"]
jobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost",
"Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Year", "Week",
"Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment", "Category Type Two", "Possible Months"]
reviewJobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost",
"Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Date Scraped",
"Time Ago", "Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment",
"Possible Years", "Category Type Two", "Possible Months"]
profileNames = ["Profile ID", "Username", "Number Of Reviews", "Average Review", "Hourly Rate",
"Earnings Percentage",
"Country"]
qualificationNames = ["Qualification ID", "Qualification Type", "User", "Qualification Name", "Extra Information"]
reviewNames = ["Review ID", "Project URL", "Profile", "Score", "Amount Paid", "Currency", "Converted Currency",
"Date Scraped", "Date", "Country", "Notes", "Date Range", "Possible Months", "Possible Years"]
winnerNames = ["Job ID", "Job URL", "Username", "Profile URL"]
names = {"Bids": bidNames, "Jobs": jobNames, "JobsHourly": jobNames, "ReviewJobs": reviewJobNames, "Profiles": profileNames,
"Qualifications": qualificationNames, "Reviews": reviewNames, "Winners": winnerNames}
# Converts the currency to USD at the historic rate
def convertCurrency(currency, amount, date):
c = CurrencyRates()
dollarAmount = c.get_rate(currency, 'USD', date) * float(amount)
dollarAmount = '%.2f' % dollarAmount
split = dollarAmount.split('.')
if (int(split[1]) == 0):
return split[0]
return (dollarAmount)
def convertCurrencyWithYear(currency, amount, week, year):
week = str(year) + "-W" + str(week)
startDate = datetime.strptime(week + '-1', "%Y-W%W-%w")
endDate = startDate + relativedelta(weeks=1)
return getAverage(currency, startDate, endDate, amount)
def daterange(startDate, endDate):
for n in range(int((endDate - startDate).days)):
yield startDate + timedelta(n)
def getAverage(currency, startDate, endDate, amount):
c = CurrencyRates()
total = 0
n = 0
for singleDate in daterange(startDate, endDate):
total += c.get_rate(currency, 'USD', singleDate)
n += 1
average = total / n
dollarAmount = average * float(amount)
dollarAmount = '%.2f' % dollarAmount
split = dollarAmount.split('.')
if (int(split[1]) == 0):
return split[0]
return (dollarAmount)
def calculateWeeklyAverage(currency, amount, weeksAgo):
today = date.today()
newDay = (today + relativedelta(weeks=-weeksAgo))
week = newDay.isocalendar()[1]
# startDate = datetime.strptime(str(week) + '-1', "%Y-W%W-%w")
startDate = newDay
endDate = startDate + relativedelta(weeks=1)
return getAverage(currency, startDate, endDate, amount)
def calculateMonthlyAverage(currency, amount, monthsAgo):
today = date.today()
newDay = (today + relativedelta(months=-monthsAgo))
month = newDay.month
year = newDay.year
startDate = date(year, month, 1)
endDate = date(year, month, monthrange(year, month)[1])
return getAverage(currency, startDate, endDate, amount)
def calculateYearlyAverage(currency, amount, year):
startDate = date(year, 1, 1)
endDate = date(year + 1, 1, 1)
return getAverage(currency, startDate, endDate, amount)
# Retrieves saved details to plot
def plotFromDatabase():
cur.execute('SELECT Country FROM Bids')
results = cur.fetchall()
countries = {}
for item in results:
country = item[0]
n = 1
if (countries.get(country) != None):
n = countries.get(country) + 1
countries.update({country: n})
plotBarChartsOfBidderCountries(countries)
# Generates multiple windows of bar charts to display the countries of bidders - grouped by continent
def plotBarChartsOfBidderCountries(countryValues):
# Dictionary containing continent codes and continent names
continents = {
'AN': 'Antarctica',
'NA': 'North America',
'EU': 'Europe',
'SA': 'South America',
'AS': 'Asia',
'OC': 'Oceania',
'AF': 'Africa'
}
# Dictionary that will hold the data for each country
countryData = {
'AN': [[], []],
'NA': [[], []],
'EU': [[], []],
'SA': [[], []],
'AS': [[], []],
'OC': [[], []],
'AF': [[], []]
}
continentPlotData = {
'Antarctica': 0,
'North America': 0,
'Europe': 0,
'South America': 0,
'Asia': 0,
'Oceania': 0,
'Africa': 0
}
# Gets all the countries and the number of bidders from each country
countries = list(countryValues.keys())
values = list(countryValues.values())
# Populating the countryData dictionary with the data from the countries and values lists
# Grouped by continent
for i in range(len(countries)):
country = countries[i]
if country == 'Lao Peoples Democratic Republic':
country = "Lao People's Democratic Republic"
elif country == "Cote DIvoire":
country = "Cote D'Ivoire"
try:
country_code = pc.country_name_to_country_alpha2(country, cn_name_format="default")
except KeyError:
continue
try:
continent_code = pc.country_alpha2_to_continent_code(country_code)
except KeyError:
continue
# continent_code = pc.country_alpha2_to_continent_code(country_code)
valuesFromContinent = countryData.get(continent_code)
try:
continentCountries = valuesFromContinent[0]
except TypeError:
a = 1
continentCountries.append(country)
continentValues = valuesFromContinent[1]
continentValues.append(values[i])
countryData.update({continent_code: [continentCountries, continentValues]})
continentNames = list(countryData.keys())
# Plots a graph for each continent
for name in continentNames:
data = countryData.get(name)
if (data != [[], []]):
countries = sorted(data[0])
values = []
for country in countries:
if country == "Lao People's Democratic Republic":
country = "Lao Peoples Democratic Republic"
elif country == "Cote D'Ivoire":
country = "Cote DIvoire"
values.append(countryValues.get(country))
nameOfContinent = continents.get(name)
try:
continentPlotData.update({nameOfContinent: sum(values)})
except TypeError:
b = 2
yPos = np.arange(len(countries))
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title("Countries of bidders")
plt.xticks(yPos, countries, rotation='vertical')
ax.bar(yPos, values, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
continent_name = continents.get(name)
plt.title(continent_name)
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
imageName = "image" + ''.join(char for char in continent_name if char.isalnum()) + ".png"
plt.savefig(imageName, bbox_inches='tight', dpi=100)
yPos = np.arange(len(continentPlotData))
vals = list(continentPlotData.values())
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title("Continents")
ax.bar(yPos, vals, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
plt.xticks(yPos, sorted(list(continentPlotData.keys())), rotation='vertical')
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
plt.title("Continents")
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.savefig("imageContinents", bbox_inches='tight', dpi=100)
plt.show(block=False)
def plotComparison(data, title):
yPos = np.arange(len(data))
vals = list(data.values())
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title(title)
ax.bar(yPos, vals, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
if title == 'Categories':
vals = [1,2,3,4,5,'Not Categorised']
else:
vals = sorted(list(data.keys()))
plt.xticks(yPos, vals)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
plt.title(title)
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.savefig("image" + title, bbox_inches='tight', dpi=100)
plt.show(block=False)
def plotAllCategories(data):
labels = list(data.keys())
values = list(data.values())
yPos = np.arange(1)
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
title = 'All Categories'
fig.canvas.set_window_title(title)
ax.bar(yPos, values, align='center', alpha=0.5)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
ax.set_ylim(bottom=0)
plt.xticks(yPos, labels)
plt.ylabel('Number')
plt.title('All Categories')
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.show()
def plotSingleType(data, type):
head = list(data.keys())[0]
values = data.get(head)
yPos = np.arange(1)
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
title = ''
if type in ['Tags', 'Category', 'Range', 'Keyword']:
title = type
else:
title = 'Countries of '
if (type == 'Bids'):
title += 'Bidders'
else:
title += type
fig.canvas.set_window_title(title)
# plt.xticks(yPos, [head], rotation='vertical')
ax.bar(yPos, values, align='center', alpha=0.5)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
ax.set_ylim(bottom=0)
ax.xaxis.set_visible(False)
plt.ylabel('Number')
plt.title(head)
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.show()
def doAverages():
cur.execute('SELECT JobID, AverageBidCost FROM Jobs')
jobs = cur.fetchall()
con.commit()
for job in jobs:
jobID = job[0]
cost = job[1]
if (cost == ''):
bidAverage = calcAverage(cur, jobID)
if (bidAverage == -1):
bidAverage = "None"
bidAverage = str(str(bidAverage[1]) + str(bidAverage[0]))
update = "UPDATE Jobs SET AverageBidCost = ? WHERE JobID = ?"
cur.execute(update, [bidAverage, jobID])
con.commit()
cur.execute('SELECT JobID, AverageBidCost FROM ReviewJobs')
jobs = cur.fetchall()
con.commit()
for job in jobs:
jobID = job[0]
cost = job[1]
if (cost == ''):
bidAverage = calcAverage(cur, jobID)
if (bidAverage == -1):
bidAverage = "None"
bidAverage = str(str(bidAverage[1]) + str(bidAverage[0]))
update = "UPDATE ReviewJobs SET AverageBidCost = ? WHERE JobID = ?"
cur.execute(update, [bidAverage, jobID])
con.commit()
def calcAverage(cur, jobID):
average = 0.0
n = 0
select = "SELECT Price FROM Bids WHERE JobID = ?"
cur.execute(select, [jobID])
prices = cur.fetchall()
for price in prices:
givenAmount = price[0]
price = float(''.join(c for c in givenAmount if c.isnumeric() or c == '.'))
n += 1
average += price
try:
result = average / n
except ZeroDivisionError:
return [-1, -1]
symbol = givenAmount[0]
return [float('%.2f' % result), symbol]
# Saving values from the database to CSV files
def saveAllDataToCSV():
cur.execute("SELECT name FROM sqlite_master WHERE type = 'table'")
con.commit()
tables = [each[0] for each in cur.fetchall()]
saveToCSV(tables, '*', None, None)
def saveToCSV(tables, columns, filter, name):
for table in tables:
query = "SELECT " + columns + " FROM " + table
if filter is not None:
query += " WHERE " + filter
cur.execute(query)
data = []
for item in cur.fetchall():
data.append(list(item))
con.commit()
if name is None:
file = table + ".csv"
else:
if (table == "ReviewJobs"):
outputTable = "Review Jobs"
else:
outputTable = table
file = name.split('.')[0] + ' - ' + outputTable + '.csv'
columnNames = names.get(table)
if len(data) > 0:
data.insert(0, columnNames)
data.insert(1, [])
for i in range(len(data)):
line = data[i]
if (i == 0):
open(file, 'w').close()
with open(file, 'a', newline='') as fp:
a = csv.writer(fp, delimiter=',')
line = [line]
a.writerows(line)
def countDateRange(start, end):
givenRange = DateTimeRange(start, end)
tables = ['Jobs', 'ReviewJobs']
for table in tables:
data = []
query = 'SELECT * FROM ' + table
cur.execute(query)
results = [list(each) for each in cur.fetchall()]
for job in results:
dateRange = job[15]
d = [each.lstrip().rstrip() for each in dateRange.split("-")]
s = d[0].split("/")
startFormat = str(int(s[2]) + 2000) + "/" + s[1] + "/" + s[0]
inRange = False
if len(d) > 1:
e = d[1].split("/")
endFormat = str(int(e[2]) + 2000) + "/" + e[1] + "/" + e[0]
tableRange = DateTimeRange(startFormat, endFormat)
for day in tableRange.range(relativedelta(days=1)):
if day in givenRange:
inRange = True
else:
inRange = startFormat in givenRange
if inRange:
data.append(job)
return len(data)
def saveDateRange(start, end):
givenRange = DateTimeRange(start, end)
tables = ['Jobs', 'ReviewJobs']
for table in tables:
data = []
query = 'SELECT * FROM ' + table
cur.execute(query)
results = [list(each) for each in cur.fetchall()]
for job in results:
dateRange = job[15]
d = [each.lstrip().rstrip() for each in dateRange.split("-")]
s = d[0].split("/")
startFormat = str(int(s[2]) + 2000) + "/" + s[1] + "/" + s[0]
inRange = False
if len(d) > 1:
e = d[1].split("/")
endFormat = str(int(e[2]) + 2000) + "/" + e[1] + "/" + e[0]
tableRange = DateTimeRange(startFormat, endFormat)
for day in tableRange.range(relativedelta(days=1)):
if day in givenRange:
inRange = True
else:
inRange = startFormat in givenRange
if inRange:
data.append(job)
columnNames = names.get(table)
file = "Date Range for " + table + " from " + start.replace("/", "-") + " to " + end.replace("/", "-") + ".csv"
if len(data) > 0:
data.insert(0, columnNames)
data.insert(1, [])
for i in range(len(data)):
line = data[i]
if (i == 0):
open(file, 'w+').close()
with open(file, 'a', newline='') as fp:
a = csv.writer(fp, delimiter=',')
line = [line]
a.writerows(line)
def scoreProjects(constant, doPrint):
positive, negative = getKeywords()
positiveCopy = []
for word in positive:
positiveCopy.append(word)
new = ''.join(c + '.' for c in word if c.isalpha())
positiveCopy.append(new[:-1])
positive = positiveCopy
ratio = (len(positive) * constant) / len(negative)
cur.execute('SELECT JobID, Title, Description FROM Jobs')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
if doPrint:
print("Job Score " + str(i + 1) + "/" + str(len(results) + 1))
job = results[i]
jID = job[0]
title = job[1].lower()
description = job[2].lower()
posMatches = ""
negMatches = ""
numPositive = 0
numNegative = 0
for keyword in positive:
numPositive += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in posMatches):
posMatches += (", " + keyword)
for keyword in negative:
numNegative += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in negMatches):
negMatches += (", " + keyword)
try:
# numNegative *= ratio
# l = (numPositive * ratio)
# score = round((numPositive / (numPositive + numNegative)) * 100)
score = max(0, round((((numPositive * 100) - (ratio * numNegative)) / (numPositive + numNegative))))
except ZeroDivisionError:
score = -1
p = posMatches.split(",")
b = ""
for j in range(len(p)):
if (j > 0):
b += p[j]
if (j != len(p) - 1):
b += ", "
posMatches = b.lstrip()
n = negMatches.split(",")
b = ""
for j in range(len(n)):
if (j > 0):
b += n[j]
if (j != len(n) - 1):
b += ", "
negMatches = b.lstrip()
query = "UPDATE Jobs SET Score = " + str(score) + \
", PositiveMatches = '" + str(posMatches) + "', NegativeMatches = '" + str(
negMatches) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
cur.execute('SELECT JobID, Title, Description FROM ReviewJobs')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
if doPrint:
print("Review Job Score " + str(i + 1) + "/" + str(len(results) + 1))
job = results[i]
jID = job[0]
title = job[1].lower()
description = job[2].lower()
posMatches = ""
negMatches = ""
numPositive = 0
numNegative = 0
for keyword in positive:
numPositive += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in posMatches):
posMatches += (", " + keyword)
for keyword in negative:
numNegative += (len(title.split(keyword)) - 1) + (len(description.split(keyword)) - 1)
if (len(title.split(keyword)) > 1) or (len(description.split(keyword)) > 1):
if (keyword not in negMatches):
negMatches += (", " + keyword)
try:
# numNegative *= ratio
# l = (numPositive * ratio)
# score = round((numPositive / (numPositive + numNegative)) * 100)
score = max(0, round((((numPositive * 100) - (ratio * numNegative)) / (numPositive + numNegative))))
except ZeroDivisionError:
score = -1
p = posMatches.split(",")
b = ""
for i in range(len(p)):
if (i > 0):
b += p[i]
if (i != len(p) - 1):
b += ", "
posMatches = b.lstrip()
n = negMatches.split(",")
b = ""
for i in range(len(n)):
if (i > 0):
b += n[i]
if (i != len(n) - 1):
b += ", "
negMatches = b.lstrip()
query = "UPDATE ReviewJobs SET Score = " + str(score) + \
", PositiveMatches = '" + str(posMatches) + "', NegativeMatches = '" + str(
negMatches) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def getKeywords():
positive = []
negative = []
for line in open('positiveKeywords.txt'):
if (len(line) > 1):
word = line.rstrip('\n')
positive.append(word)
for line in open('negativeKeywords.txt'):
if (len(line) > 1):
word = line.rstrip('\n')
negative.append(word)
return [keyword.lower() for keyword in positive], [keyword.lower() for keyword in negative]
def jobsInDateRange(start, end):
givenRange = DateTimeRange(start, end)
tables = ['Jobs', 'ReviewJobs']
data = []
for table in tables:
query = 'SELECT DateRange, JobID, CountryOfWinner FROM ' + table
cur.execute(query)
results = [list(each) for each in cur.fetchall()]
for job in results:
dateRange = job[0]
d = [each.lstrip().rstrip() for each in dateRange.split("-")]
s = d[0].split("/")
startFormat = str(int(s[2]) + 2000) + "/" + s[1] + "/" + s[0]
inRange = False
endFormat = ''
if len(d) > 1:
e = d[1].split("/")
endFormat = str(int(e[2]) + 2000) + "/" + e[1] + "/" + e[0]
tableRange = DateTimeRange(startFormat, endFormat)
for day in tableRange.range(relativedelta(days=1)):
if day in givenRange:
inRange = True
else:
inRange = startFormat in givenRange
if inRange:
data.append([job[1], job[2]])
return data
def conversions():
cur.execute("SELECT ReviewID, AmountPaid, Currency, Date FROM Reviews WHERE ConvertedCurrency = 'None' or ConvertedCurrency = '' AND AmountPaid != ''")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Review " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
id = r[0]
value = r[1]
amount = ""
if (value != 'SEALED'):
try:
amount = float(''.join(c for c in value if c.isnumeric() or c == '.'))
except ValueError:
a = 1
else:
amount = "None"
currency = r[2]
dateOff = r[3]
timeSplit = dateOff.split()
timeFrame = timeSplit[1]
timeAmount = int(timeSplit[0])
convertedCurrency = "None"
if amount != "None":
if ((timeFrame == 'month') or (timeFrame == 'months')):
convertedCurrency = calculateMonthlyAverage(currency, amount, timeAmount)
elif ((timeFrame == 'week') or (timeFrame == 'weeks')):
convertedCurrency = calculateWeeklyAverage(currency, amount, timeAmount)
elif ((timeFrame == 'year') or (timeFrame == 'years')):
convertedCurrency = calculateYearlyAverage(currency, amount,
date.today().year - timeAmount)
elif ((timeFrame == 'day') or (timeFrame == 'days')):
dateToConvert = date.today() - relativedelta(days=timeAmount)
convertedCurrency = convertCurrency(currency, amount, dateToConvert)
convertedCurrency = "$" + str(convertedCurrency)
query = "UPDATE Reviews SET ConvertedCurrency = '" + str(convertedCurrency) + "' WHERE ReviewID = " + str(id)
cur.execute(query)
con.commit()
def jobConversions():
cur.execute(
"SELECT JobID, FinalCost, Currency, Year, Week FROM Jobs WHERE (ConvertedFinalCost = 'None' or ConvertedFinalCost = '') AND FinalCost != 'None'")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
id = r[0]
value = r[1]
if (value != 'None'):
amount = float(''.join(c for c in value if c.isnumeric() or c == '.'))
else:
amount = "None"
currency = r[2]
year = r[3]
week = r[4]
convertedCurrency = "None"
if amount != "None":
# convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# success = False
# while not success:
try:
convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# convertedCurrency = "$" + str(convertedCurrency)
except RatesNotAvailableError:
convertedCurrency = "Unavailable"
query = "UPDATE Jobs SET ConvertedFinalCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
id)
cur.execute(query)
con.commit()
reviewJobConversions()
def jobAvConversions():
cur.execute(
"SELECT JobID, AverageBidCost, Currency, Year, Week FROM Jobs")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
id = r[0]
value = r[1]
if (value != 'None'):
amount = float(''.join(c for c in value if c.isnumeric() or c == '.'))
else:
amount = "None"
currency = r[2]
year = r[3]
week = r[4]
convertedCurrency = "None"
if amount != "None":
# convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# success = False
# while not success:
try:
convertedCurrency = convertCurrencyWithYear(currency, amount, week, year)
# convertedCurrency = "$" + str(convertedCurrency)
except RatesNotAvailableError:
convertedCurrency = "Unavailable"
query = "UPDATE Jobs SET AverageBidCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
id)
cur.execute(query)
con.commit()
reviewAvJobConversions()
def reviewAvJobConversions():
cur.execute(
"SELECT JobID, AverageBidCost, Currency, TimeAgo FROM ReviewJobs")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Review Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
timeSplit = r[3].split()
timeFrame = timeSplit[1]
timeAmount = int(timeSplit[0])
currency = r[2]
finalCost = r[1]
convertedCurrency = ""
jID = r[0]
if (finalCost != "None"):
valuePaid = float(''.join(c for c in finalCost if c.isnumeric() or c == '.'))
if ((timeFrame == 'month') or (timeFrame == 'months')):
convertedCurrency = calculateMonthlyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'week') or (timeFrame == 'weeks')):
convertedCurrency = calculateWeeklyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'year') or (timeFrame == 'years')):
convertedCurrency = calculateYearlyAverage(currency, valuePaid,
date.today().year - timeAmount)
elif ((timeFrame == 'day') or (timeFrame == 'days')):
dateToConvert = date.today() - relativedelta(days=timeAmount)
convertedCurrency = convertCurrency(currency, valuePaid, dateToConvert)
convertedCurrency = "$" + str(convertedCurrency)
query = "UPDATE ReviewJobs SET AverageBidCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def reviewJobConversions():
cur.execute(
"SELECT JobID, FinalCost, Currency, TimeAgo FROM ReviewJobs WHERE (ConvertedFinalCost = 'None' or ConvertedFinalCost = '' or ConvertedFinalCost = '$') AND FinalCost != 'None'")
res = cur.fetchall()
results = []
for result in res:
results.append(list(result))
for i in range(len(results)):
print("Review Job " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
timeSplit = r[3].split()
timeFrame = timeSplit[1]
timeAmount = int(timeSplit[0])
currency = r[2]
finalCost = r[1]
convertedCurrency = ""
jID = r[0]
if (finalCost != "None"):
valuePaid = float(''.join(c for c in finalCost if c.isnumeric() or c == '.'))
if ((timeFrame == 'month') or (timeFrame == 'months')):
convertedCurrency = calculateMonthlyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'week') or (timeFrame == 'weeks')):
convertedCurrency = calculateWeeklyAverage(currency, valuePaid, timeAmount)
elif ((timeFrame == 'year') or (timeFrame == 'years')):
convertedCurrency = calculateYearlyAverage(currency, valuePaid,
date.today().year - timeAmount)
elif ((timeFrame == 'day') or (timeFrame == 'days')):
dateToConvert = date.today() - relativedelta(days=timeAmount)
convertedCurrency = convertCurrency(currency, valuePaid, dateToConvert)
convertedCurrency = "$" + str(convertedCurrency)
query = "UPDATE ReviewJobs SET ConvertedFinalCost = '" + str(convertedCurrency) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def calcDateRange(time):
today = date.today()
split = time.split()
timeFrame = split[1]
timeAmount = int(split[0])
if (timeFrame == "day") or (timeFrame == "days"):
newDate = today + relativedelta(days=-timeAmount)
timeRange = newDate.strftime("%d/%m/%y")
return timeRange
if (timeFrame == "hour") or (timeFrame == "hours"):
startDate = today + relativedelta(days=-1)
endDate = today
if (timeFrame == "week") or (timeFrame == "weeks"):
newDate = today + relativedelta(weeks=-timeAmount)
while (newDate.weekday() != 0):
newDate += relativedelta(days=-1)
startDate = newDate
endDate = startDate + relativedelta(days=6)
elif (timeFrame == "month") or (timeFrame == "months"):
newDate = today + relativedelta(months=-timeAmount)
year = newDate.year
month = newDate.month
startDate = date(year, month, 1)
endDate = date(year, month, monthrange(year, month)[1])
elif (timeFrame == "year") or (timeFrame == "years"):
newDate = today + relativedelta(years=-timeAmount)
startDate = date(newDate.year, 1, 1)
endDate = date(newDate.year, 12, 31)
return (startDate.strftime("%d/%m/%y") + " - " + endDate.strftime("%d/%m/%y"))
def getDateRanges():
today = date.today()
cur.execute('SELECT JobID, TimeAgo FROM ReviewJobs WHERE DateRange IS NULL')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
print("Review Job Date " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
# timeSplit = r[1].split()
# timeFrame = timeSplit[1]
# timeAmount = int(timeSplit[0])
jID = r[0]
timeRange = calcDateRange(r[1])
query = "UPDATE ReviewJobs SET DateRange = '" + str(timeRange) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
cur.execute('SELECT ReviewID, Date FROM Reviews WHERE DateRange IS NULL')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
print("Review Date " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
# timeSplit = r[1].split()
# timeFrame = timeSplit[1]
# timeAmount = int(timeSplit[0])
jID = r[0]
timeRange = ""
timeRange = calcDateRange(r[1])
query = "UPDATE Reviews SET DateRange = '" + str(timeRange) + "' WHERE ReviewID = " + str(
jID)
cur.execute(query)
con.commit()
cur.execute('SELECT JobID, Year, Week FROM Jobs')
res = cur.fetchall()
results = []
for r in res:
results.append(list(r))
for i in range(len(results)):
print("Job Date " + str(i + 1) + "/" + str(len(results) + 1))
r = results[i]
year = r[1]
jobWeek = r[2]
jID = r[0]
week = str(year) + "-W" + str(jobWeek)
startDate = datetime.strptime(week + '-1', "%Y-W%W-%w")
endDate = startDate + relativedelta(weeks=1)
timeRange = startDate.strftime("%d/%m/%y") + " - " + endDate.strftime("%d/%m/%y")
query = "UPDATE Jobs SET DateRange = '" + str(timeRange) + "' WHERE JobID = " + str(
jID)
cur.execute(query)
con.commit()
def optimiseConstant():
low = 9
high = 17
averageDistance = 1000
constant = random.randrange(low, high + 1)
iteration = 1
ranges = {1: [0, 20], 2: [20, 40], 3: [40, 60], 4: [60, 80], 5: [80, 100]}
while ((averageDistance >= 5) and (iteration < 10000)):
print("Iteration number: " + str(iteration) + " - Constant = " + str(constant))
tooBig = 0
tooSmall = 0
scoreProjects(constant, False)
averageDistances = []
for i in range(1, 6):
totalDistance = 0
n = 0
query = 'SELECT Score FROM ReviewJobs WHERE Category = ' + str(i)
cur.execute(query)
results = [r[0] for r in cur.fetchall()]
scoreRange = ranges.get(i)
lower = scoreRange[0]
upper = scoreRange[1]
for result in results:
n += 1
if (result != -1):
if ((result >= lower) and (result <= upper)):
distance = 0
elif (result > upper):
distance = result - upper
tooBig += 1
else:
distance = lower - result
tooSmall += 1
# distance = min(abs(result - lower), abs(result - upper))
totalDistance += distance
averageDistances.append(totalDistance / n)
averageDistance = sum(averageDistances) / 5
print("Average Distance: " + str(averageDistance) + "\n")
if (averageDistance >= 5):
if (tooBig > tooSmall):
constant += 0.0125
else:
constant -= 0.0125
iteration += 1
print(constant)
def plotYears(type):
cur.execute('SELECT DISTINCT(Year) FROM Jobs ORDER BY Year')
years = [each[0] for each in cur.fetchall()]
cur.execute('SELECT PossibleYears FROM ReviewJobs')
results = [each[0] for each in cur.fetchall()]
for result in results:
ys = [int(each.lstrip().rstrip()) for each in result.split(',')]
years += [each for each in ys if each not in years]
years = sorted(years)
data = {}
for year in years:
num = 0
if type == 'Projects':
query = "SELECT COUNT(JobID) FROM Jobs WHERE Year = " + str(year)
cur.execute(query)
num = cur.fetchone()[0]
query = "SELECT COUNT(JobID) FROM ReviewJobs WHERE PossibleYears LIKE '%" + str(year) + "%'"
cur.execute(query)
num += cur.fetchone()[0]
elif type == 'Bidders':
query = "SELECT COUNT(DISTINCT(User)) FROM Bids WHERE JobID IN (SELECT JobID FROM Jobs WHERE Year = " + str(year) + ")"
cur.execute(query)
num = cur.fetchone()[0]
query = "SELECT COUNT(DISTINCT(User)) FROM Bids WHERE JobID IN (SELECT JobID FROM ReviewJobs WHERE PossibleYears LIKE '%" + str(
year) + "%')"
cur.execute(query)
num += cur.fetchone()[0]
data.update({year: num})
yPos = np.arange(len(data))
vals = []
for year in sorted(list(data.keys())):
vals.append(data.get(year))
fig, ax = plt.subplots(1, 1, sharex=True, sharey=True)
fig.canvas.set_window_title(type + ' By Year')
ax.bar(yPos, vals, align='center', alpha=0.5)
ax.set_ylim(bottom=0)
plt.xticks(yPos, years)
ax.yaxis.set_major_locator(plt.MaxNLocator(20, integer=True))
plt.ylabel('Number')
plt.title(type + ' By Year')
# Resizing the graphs to fit in the window
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 10
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
plt.savefig("image" + type + ' By Year', bbox_inches='tight', dpi=100)
plt.show(block=False)
def possibleYears():
cur.execute('SELECT JobID, DateRange FROM ReviewJobs')
results = [list(each) for each in cur.fetchall()]
for pair in results:
jID = pair[0]
dateRange = pair[1]
years = ''
split = dateRange.split()
startYear = 2000 + int(split[0].split('/')[-1])
endYear = 2000 + int(split[-1].split('/')[-1])
for year in range(startYear, endYear):
years += str(year) + ", "
years += str(endYear)
query = "UPDATE ReviewJobs SET PossibleYears = '" + years + "' WHERE JobID = " + str(jID)
cur.execute(query)
con.commit()
def avConversions():
data = pd.read_csv('savedIDs.txt', delimiter="\t")
saved = [each[0] for each in data.values]
jobsDf = pd.read_csv('jobsAv.txt', delimiter="\t")
jobsToSave = [pair for pair in jobsDf.values if pair[0] not in saved]
a = open('savedIDs.txt', 'at')
for i in range(len(jobsToSave)):
pair = jobsToSave[i]
print("Job " + str(i + 1) + "/" + str(len(jobsToSave)))
jID = pair[0]
av = pair[1]
if av != 'None':
cur.execute("SELECT DateRange FROM Jobs WHERE JobID = " + str(jID))
dateRange = cur.fetchone()[0]
split = dateRange.split()
startSplit = split[0].split('/')
startDate = date(2000 + int(startSplit[2]), int(startSplit[1]), int(startSplit[0]))
endSplit = split[2].split('/')
endDate = date(2000 + int(endSplit[2]), int(endSplit[1]), int(endSplit[0]))
cur.execute('SELECT Currency FROM Jobs WHERE JobID = ' + str(jID))
currency = cur.fetchone()[0]
av = getAverage(currency, startDate, endDate, av)
cur.execute('SELECT NumberOfBidders FROM Jobs WHERE JobID = ' + str(jID))
numBids = cur.fetchone()[0]
if numBids != 0:
av = '%.2f' % (float(av) / int(numBids))
else:
av = 0
cur.execute("UPDATE Jobs SET AverageBidCost = " + str(av) + " WHERE JobID = " + str(jID))
else:
cur.execute("UPDATE Jobs SET AverageBidCost = 'None' WHERE JobID = " + str(jID))
a.write(str(jID) + "\n")
con.commit()
reviewJobsDf = pd.read_csv('reviewJobsAv.txt', delimiter="\t")
reviewJobstoSave = [pair for pair in reviewJobsDf if pair[0] not in saved]
for i in range(len(reviewJobstoSave)):
pair = reviewJobstoSave[i]
print("Job " + str(i + 1) + "/" + str(len(reviewJobstoSave)))
jID = pair[0]
av = pair[1]
if av != 'None':
cur.execute("SELECT DateRange FROM ReviewJobs WHERE JobID = " + str(jID))
dateRange = cur.fetchone()[0]
split = dateRange.split()
startSplit = split[0].split('/')
startDate = date(2000 + int(startSplit[2]), int(startSplit[1]), int(startSplit[0]))
endSplit = split[2].split('/')
endDate = date(2000 + int(endSplit[2]), int(endSplit[1]), int(endSplit[0]))
cur.execute("SELECT Currency FROM ReviewJobs WHERE JobID = " + str(jID))
currency = cur.fetchone()[0]
av = getAverage(currency, startDate, endDate, av)
cur.execute("SELECT NumberOfBidders FROM ReviewJobs WHERE JobID = " + str(jID))
numBids = cur.fetchone()[0]
av = '%.2f' % (av / numBids)
cur.execute("UPDATE ReviewJobs SET AverageBidCost = " + str(av) + ' WHERE JobID = ' + str(jID))
a.write(str(jID) + "\n")
con.commit()
# def doExtras():
# # doAverages()
# # jobConversions()
# # reviewJobConversions()
# # conversions()
# # getDateRanges()
# # possibleYears()
# # plotYears('Projects')
# doExtras()
# avConversions()
| 31.016571
| 184
| 0.551207
| 4,617
| 43,051
| 5.119558
| 0.10548
| 0.02073
| 0.016246
| 0.008377
| 0.646063
| 0.610653
| 0.573254
| 0.55887
| 0.547066
| 0.526632
| 0
| 0.01628
| 0.312281
| 43,051
| 1,388
| 185
| 31.016571
| 0.782079
| 0.044575
| 0
| 0.568769
| 0
| 0.004137
| 0.13025
| 0.001023
| 0.001034
| 0
| 0
| 0
| 0
| 1
| 0.033092
| false
| 0
| 0.01241
| 0
| 0.061013
| 0.015512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e359de552a30d24e6371b5e1ad922405353576ab
| 1,733
|
py
|
Python
|
deploy_flask_plotly/app.py
|
mohamedsaadmoustafa/Arabic_Dialect_Classification
|
a13e92ddaa8fda5afcc40d1ce97946174f9a4674
|
[
"BSD-3-Clause"
] | null | null | null |
deploy_flask_plotly/app.py
|
mohamedsaadmoustafa/Arabic_Dialect_Classification
|
a13e92ddaa8fda5afcc40d1ce97946174f9a4674
|
[
"BSD-3-Clause"
] | null | null | null |
deploy_flask_plotly/app.py
|
mohamedsaadmoustafa/Arabic_Dialect_Classification
|
a13e92ddaa8fda5afcc40d1ce97946174f9a4674
|
[
"BSD-3-Clause"
] | 1
|
2022-03-14T19:41:57.000Z
|
2022-03-14T19:41:57.000Z
|
from flask import Flask, render_template, request, jsonify
import numpy as np
import pickle
import sys
import json
import re
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
target_names = [
'AE', 'BH', 'DZ',
'EG', 'IQ', 'JO',
'KW', 'LB', 'LY',
'MA', 'OM','PL',
'QA', 'SA', 'SD',
'SY', 'TN', 'YE'
]
arabic_dialects = {
'AE': 'لهجة اماراتية', 'BH': 'لهجة بحرينية', 'DZ': 'لهجة جزائرية', 'EG': 'لهجة مصرية', 'IQ': 'لهجة عراقية',
'JO': 'لهجة أردنية', 'KW': 'لهجة كويتية', 'LB': 'لهجة لبنانية', 'LY': 'لهجة ليبية', 'MA': 'لهجة مغربية',
'OM': 'لهجة عمانية', 'PL': 'لهجة فلسطينية', 'QA': 'لهجة قطرية', 'SA': 'لهجة سعودية', 'SD': 'لهجة سودانية',
'SY': 'لهجة سورية', 'TN': 'لهجة تونسية', 'YE': 'لهجة يمنية'
}
def model(text):
print(text, file=sys.stderr)
filename = 'model.sav'
loaded_model = pickle.load(open(filename, 'rb'))
pred = loaded_model.predict( [text] )
pred_p = ( loaded_model.predict_proba( [text] )[0] * 100 ).round(2) # %
return arabic_dialects[target_names[pred[0]]], pred_p
@app.route('/')
def home():
for i in range(3): print(i)
return render_template('home.html')
@app.route('/api')
def predict():
text_input = request.args.get('text')#.decode("utf-8")
text_input = re.sub(r'[0-9a-zA-Z?]', '', text_input) #remove english words and numbers
if text_input == "": return "null"
predict, predict_p = model(text_input)
return jsonify(
{
'predict': json.dumps(predict, ensure_ascii = False ),
'predict_p': predict_p.tolist(),
}
)
if __name__ == '__main__':
app.run(debug=True)
| 27.507937
| 112
| 0.562031
| 226
| 1,733
| 4.154867
| 0.50885
| 0.047923
| 0.038339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007675
| 0.248125
| 1,733
| 63
| 113
| 27.507937
| 0.71297
| 0.028275
| 0
| 0
| 0
| 0
| 0.219271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.12766
| 0
| 0.255319
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e359e595d02499d12ce9088ccf34ac138ffada36
| 384
|
py
|
Python
|
2018/2/hash.py
|
octonion/adventofcode
|
132e8bf0c9bc0ad64a0e12e22170177df4947e37
|
[
"MIT"
] | 1
|
2019-01-10T09:43:34.000Z
|
2019-01-10T09:43:34.000Z
|
2018/2/hash.py
|
octonion/adventofcode
|
132e8bf0c9bc0ad64a0e12e22170177df4947e37
|
[
"MIT"
] | null | null | null |
2018/2/hash.py
|
octonion/adventofcode
|
132e8bf0c9bc0ad64a0e12e22170177df4947e37
|
[
"MIT"
] | null | null | null |
data = [i.strip() for i in open("input.txt").readlines()]
two = 0
three = 0
for code in data:
counts = {}
for i in range(0,len(code)):
if code[i] in counts.keys():
counts[code[i]] += 1
else:
counts[code[i]] = 1
if (2 in counts.values()):
two += 1
if (3 in counts.values()):
three += 1
print(two*three)
| 21.333333
| 57
| 0.492188
| 58
| 384
| 3.258621
| 0.413793
| 0.047619
| 0.063492
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.34375
| 384
| 17
| 58
| 22.588235
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e35b469d7625c1fa8f422ae121b4eaab1ed606da
| 10,171
|
py
|
Python
|
origin/app_bb_modifier.py
|
nukeguys/myutil
|
65d0aff36ec45bffbd2e52fea0fabfbabd5609b1
|
[
"Apache-2.0"
] | null | null | null |
origin/app_bb_modifier.py
|
nukeguys/myutil
|
65d0aff36ec45bffbd2e52fea0fabfbabd5609b1
|
[
"Apache-2.0"
] | null | null | null |
origin/app_bb_modifier.py
|
nukeguys/myutil
|
65d0aff36ec45bffbd2e52fea0fabfbabd5609b1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import sys
import os
import io
from orderedset import OrderedSet
from shell import Shell
import logpath as LogPath
VERSION = '1.1'
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
current_path = os.getcwd()
current_meta = ''
if current_path.endswith('meta-signage') == True:
current_meta = 'signage'
elif current_path.endswith('meta-commercial') == True:
current_meta = 'commercial'
elif current_path.endswith('meta-id') == True:
current_meta = 'id'
else:
print('You should execute this file in [%smeta-id, meta-commercial, meta-signage%s] path' % (WARNING, ENDC))
exit()
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.items())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
class AbstractVerifyCommit:
def __init__(self, _appName, _tagName):
self.appName = _appName
self.tagName = _tagName
if(len(self.appName) == 0):
self.appName = self.inputString('app name')
if(len(self.tagName) == 0):
self.tagName = self.inputString('tag name')
self.tagName = self.tagName.strip()
print('AppName : %s, TagName : %s' % (self.appName, self.tagName))
def inputString(self, title):
sys.stdout.write(title + ' : ')
ret = input()
if len(ret) == 0:
sys.exit()
return ret
def process(self):
if not self.getBBFileName() or not self.getBBFileDir():
return
self.changeDir()
self.writeWebOsVersion()
self.fileReadAndWrite()
self.fileRemoveAndRename()
self.changeDirBack()
def changeDir(self):
os.chdir(self.dirPath)
def getShowRefTag(self):
bashCommands = [
"git ls-remote --tags ssh://we.lge.com:29425/id/app/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://wall.lge.com:29448/app/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/webos-pro/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/gpro/starfish/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/module/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/gpro/webos-pro/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://wall.lge.com:29448/webos-pro/%s.git | grep '%s$' | awk {'print $1'}",
"git ls-remote --tags ssh://we.lge.com:29425/id/service/%s.git | grep '%s$' | awk {'print $1'}"
]
err_msg=""
changeAppName=self.appName
if self.appName == 'configd-data':
changeAppName = 'configd-data-starfish'
for bashCommand in bashCommands:
bashCommand = bashCommand % (changeAppName, self.tagName)
str_output, str_err = Shell.execute(bashCommand)
if str_err == "":
return str_output.replace('\n', '')
else:
err_msg = str_err
print('%s%s \n%s!!!!!!!%s' % (WARNING, bashCommand, str_err, ENDC))
sys.exit()
def getBBFileName(self):
bashCommand = "find . -type f -name '%s.b*' -exec basename {} \; " % (self.appName)
str_output, str_err = Shell.execute(bashCommand)
if str_err != "":
print('%s%s Error : %s!!!!!!!%s' % (WARNING, bashCommand, str_err, ENDC))
return False
else:
self.bbfilename=str_output.replace('\n', '')
return True
def getBBFileDir(self):
bashCommand = "find . -type f -name '%s.b*' -exec dirname {} \; " % (self.appName)
str_output, str_err = Shell.execute(bashCommand)
if str_err != "":
print('%s%s Error : %s!!!!!!!%s' % (WARNING, bashCommand, str_err, ENDC))
return False
else:
self.dirPath=str_output.replace('\n','')
return True
def writeWebOsVersion(self):
self.fileContents['WEBOS_VERSION'] = self.tagName.split('/')[::-1][0] + '_' + self.getShowRefTag()
def fileReadAndWrite(self):
with open(self.bbfilename, 'r') as readF, open('my_' + self.bbfilename, 'w') as writeF:
isWritten = False
for x in readF.readlines():
for key in self.fileContents:
if x.startswith(key):
key_content=x.replace(key, '').replace('=', '').replace('"','').split()[0]
version=key_content.split('-')[0]
writeF.write(key + ' = "' + version + '-' + self.fileContents[key] + '"\n')
isWritten = True
break;
if isWritten == False:
writeF.write(x)
isWritten = False
def fileRemoveAndRename(self):
Shell.execute('rm %s' % self.bbfilename)
Shell.execute('mv my_%s %s' % (self.bbfilename, self.bbfilename))
def changeDirBack(self):
os.chdir(current_path)
class App(AbstractVerifyCommit):
fileContents = { 'WEBOS_VERSION':'' }
def __init__(self, appName, tagName):
AbstractVerifyCommit.__init__(self, appName, tagName)
class InputHelper:
def process(self, listLogs):
print('%sVersion%s : %s' % (OKBLUE, ENDC, VERSION))
print('If you have any questions, please inquire at %s%shyeonsub.jung@lge.com%s' % (UNDERLINE, BOLD, ENDC))
for log in listLogs:
App(log.name, 'submissions/' + log.tag).process()
#sys.stdout.write('Multiple Apps?[Y/N] : ')
#multipleApps=input()
#if(multipleApps == 'Y' or multipleApps == 'y'):
# print('')
#else:
# App().process()
class Log:
list_name = []
list_tag = []
IDX=enum('RELEASE_NOTES', 'DETAILED_NOTES', 'TESTING_PERFORMED', 'QA_NOTES', 'ISSUES_ADDRESSED')
list_title = [
':Release Notes:',
':Detailed Notes:',
':Testing Performed:',
':QA Notes:',
':Issues Addressed:'
]
def __init__(self):
self.name = ''
self.tag = ''
self.items = [ [], [], [], [], [] ]
def process(self, filename):
with open(filename, 'r') as readF:
idx=-1
for strX in readF.readlines():
if len(self.name) == 0:
self.name = strX.split('=')[0]
self.tag = strX.split('=')[1]
self.list_name.append(self.name)
self.list_tag.append(self.tag)
elif len(self.list_title) > (idx+1) and strX.startswith(self.list_title[idx+1]):
idx=idx+1
else:
if idx == Log.IDX.QA_NOTES or idx == Log.IDX.ISSUES_ADDRESSED:
if strX.lower() == 'na' or strX.lower() == 'none':
continue
if idx != -1:
self.items[idx].append(strX)
def printAll(self):
print ('name : %s, tag : %s' % (self.name, self.tag))
for item in self.items:
for content in item:
print (content)
class GeneratorLog():
def __init__(self):
GeneratorLog.topdir = LogPath.getAppLogPath()
if GeneratorLog.topdir == '':
print('You must make log first!')
sys.exit()
self.listLogs = []
def printLog(self, logfile):
with open(logfile, 'r') as readF:
for i in readF:
sys.stdout.write(i)
def makeLog(self):
logfile = self.topdir + 'commit_log'
with open(logfile, 'w') as writeF:
writeF.write(self.makeTitle() + '\n')
for i in range(0, len(Log.list_title)):
self.makeNotes(Log.list_title[i], i, writeF)
#self.printLog(logfile)
def makeNotes(self, title, idx, writeF):
writeF.write(title + '\n')
items = []
for log in self.listLogs:
if idx == Log.IDX.DETAILED_NOTES:
items.append('[' + log.name[log.name.rfind('.')+1:] + ']\n')
for item in log.items[idx]:
items.append(item)
#if idx > Log.IDX.DETAILED_NOTES:
# items = list(OrderedSet(items))
for item in items:
writeF.write(item)
def getDuplicateTitle(self):
l = list(self.listLogs[0].list_name[0])
for name in self.listLogs[0].list_name[1:]:
i = 0
for char in list(name):
if len(l) <= i or l[i] != char:
return ''.join(l[:i])
i += 1
return ''
def makeTitle(self):
duplicateTitles = self.getDuplicateTitle()
containsDuplicateTitle = len(duplicateTitles) > 0
title_str = io.StringIO()
if containsDuplicateTitle == True:
title_str.write(duplicateTitles + '{')
for name, tag in zip(self.listLogs[0].list_name, self.listLogs[0].list_tag):
title_str.write(name[len(duplicateTitles):] + '=' + tag + ',')
title = title_str.getvalue()
title = title[:len(title)-1]
return title + '}' if containsDuplicateTitle == True else title
def parseLog(self):
for root, dirs, files in os.walk(self.topdir, topdown=False):
for name in files:
if name.lower().endswith('.log'):
filename = os.path.join(root, name)
log = Log()
log.process(filename)
#log.printAll()
self.listLogs.append(log)
if __name__ == '__main__':
generater = GeneratorLog()
generater.parseLog()
generater.makeLog()
InputHelper().process(generater.listLogs)
| 39.730469
| 125
| 0.534756
| 1,155
| 10,171
| 4.627706
| 0.193074
| 0.022638
| 0.016464
| 0.022451
| 0.240225
| 0.200187
| 0.200187
| 0.16913
| 0.16913
| 0.156408
| 0
| 0.016525
| 0.3158
| 10,171
| 255
| 126
| 39.886275
| 0.751545
| 0.026153
| 0
| 0.101322
| 0
| 0.044053
| 0.169497
| 0.037801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105727
| false
| 0
| 0.026432
| 0
| 0.22467
| 0.088106
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e35b94ca7170c796ccad0fbd61ea4ee542cd52e0
| 3,514
|
py
|
Python
|
gamebike/controlmapbits.py
|
johnlpage/gamebike
|
429736d0238dca2961763f2a33d8e4e72ed97364
|
[
"Apache-2.0"
] | null | null | null |
gamebike/controlmapbits.py
|
johnlpage/gamebike
|
429736d0238dca2961763f2a33d8e4e72ed97364
|
[
"Apache-2.0"
] | null | null | null |
gamebike/controlmapbits.py
|
johnlpage/gamebike
|
429736d0238dca2961763f2a33d8e4e72ed97364
|
[
"Apache-2.0"
] | null | null | null |
# These were used when I was trying to map between controllers
# To map to a wheel - but was defeated in that by using a driver
# 2021 comment (What did I mean there?)
GAMEPAD_TRIANGLE = (0, 0x08)
GAMEPAD_CIRCLE = (0, 0x04)
GAMEPAD_CROSS = (0, 0x02)
GAMEPAD_SQUARE = (0, 0x01)
GAMEPAD_DPAD_MASK = 0x0F
GAMEPAD_DPAD_NONE = (2, 0x0F)
GAMEPAD_DPAD_U = (2, 0x00)
GAMEPAD_DPAD_R = (2, 0x02)
GAMEPAD_DPAD_D = (2, 0x04)
GAMEPAD_DPAD_L = (2, 0x06)
GAMEPAD_PSMENU = (1, 0x10)
GAMEPAD_SELECT = (1, 0x01)
GAMEPAD_START = (1, 0x02)
GAMEPAD_LJOY_BUTTON = (1, 0x04)
GAMEPAD_RJOY_BUTTON = (1, 0x08)
GAMEPAD_L1 = (0, 0x10)
GAMEPAD_R1 = (0, 0x20)
GAMEPAD_L2 = (0, 0x40)
GAMEPAD_R2 = (0, 0x80)
GAMEPAD_RTRIGGER = 18
GAMEPAD_LTRIGGER = 17
# These are Bytes not Bits
GAMEPAD_LJOY_X = 3
GAMEPAD_LJOY_Y = 4
GAMEPAD_RJOY_X = 5
GAMEPAD_RJOY_Y = 6
CLICKER_BUTTONS = 2
CLICKER_LEFT = [0x4B]
CLICKER_RIGHT = [0x4E]
CLICKER_UP = [0x05]
CLICKER_DOWN = [0x3E, 0x29] # Toggles
STEER_MIN = 0x0000
STEER_MAX = 0x3FFF
STEER_MID = 0x1FFF
WHEEL_NEUTRAL = [0x08, 0x00, 0x00, 0x5E, 0x00, 0x20, 0x7F, 0xFF]
WHEEL_TRIANGLE = (0, 0x80)
WHEEL_CIRCLE = (0, 0x40)
WHEEL_CROSS = (0, 0x10)
WHEEL_SQUARE = (0, 0x20)
WHEEL_DPAD_MASK = 0x0F
WHEEL_DPAD_NONE = (0, 0x08)
WHEEL_DPAD_U = (0, 0x00)
WHEEL_DPAD_R = (0, 0x02)
WHEEL_DPAD_D = (0, 0x04)
WHEEL_DPAD_L = (0, 0x06)
WHEEL_RPADDLE = (1, 0x01)
WHEEL_LPADDLE = (1, 0x02)
WHEEL_L1 = (1, 0x80)
WHEEL_L2 = (1, 0x08)
WHEEL_R1 = (1, 0x40)
WHEEL_R2 = (1, 0x04)
WHEEL_SELECT = (1, 0x10)
WHEEL_START = (1, 0x20)
WHEEL_PSMENU = (2, 0x08)
WHEEL_GEARUP = (2, 0x01)
WHEEL_GEARDOWN = (2, 0x02)
WHEEL_BACK = (2, 0x04)
WHEEL_ADJUST_CLOCKWISE = (2, 0x10)
WHEEL_ADJUST_ANTICLOCKWISE = (2, 0x20)
WHEEL_PLUS = (2, 0x80)
WHEEL_MINUS = (2, 0x40)
# Bytes
WHEEL_WHEEL_HIGHBYTE = 5
WHEEL_WHEEL_LOWBYTE = 4 # 0000-EFF3 But 0000 is extreme
WHEEL_ACCELERATEBYTE = 6 # 0-FF 0 IS DOWN
WHEEL_BRAKEBYTE = 7 # 0-FF 0 IS DOWN
# (FromByte,From Bit) -> (ToByte,ToBit)
# Wheel Has dedicated Gear buttons and Shifter that arent on the controller
# Stick Click is not used in TDU2 at all so will use that
BUTTON_MAPPINGS = [
(GAMEPAD_TRIANGLE, WHEEL_TRIANGLE),
(GAMEPAD_CIRCLE, WHEEL_CIRCLE),
(GAMEPAD_SQUARE, WHEEL_SQUARE),
(GAMEPAD_CROSS, WHEEL_CROSS),
(GAMEPAD_R1, WHEEL_R2),
(GAMEPAD_L1, WHEEL_L2),
(GAMEPAD_PSMENU, WHEEL_PSMENU),
(GAMEPAD_START, WHEEL_START),
(GAMEPAD_SELECT, WHEEL_SELECT),
(GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN),
(GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP),
]
#These made it work in PS3 menu screen
XMB_BUTTON_MAPPINGS = [
(GAMEPAD_TRIANGLE, WHEEL_TRIANGLE),
(GAMEPAD_CIRCLE, WHEEL_CIRCLE),
(GAMEPAD_CROSS, WHEEL_SQUARE),
(GAMEPAD_SQUARE, WHEEL_CROSS),
(GAMEPAD_R1, WHEEL_R2),
(GAMEPAD_L1, WHEEL_L2),
(GAMEPAD_PSMENU, WHEEL_PSMENU),
(GAMEPAD_START, WHEEL_START),
(GAMEPAD_SELECT, WHEEL_SELECT),
(GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN),
(GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP),
]
DPAD_MAPPINGS = [
(GAMEPAD_DPAD_NONE, WHEEL_DPAD_NONE),
(GAMEPAD_DPAD_U, WHEEL_DPAD_U),
(GAMEPAD_DPAD_D, WHEEL_DPAD_D),
(GAMEPAD_DPAD_L, WHEEL_DPAD_L),
(GAMEPAD_DPAD_R, WHEEL_DPAD_R),
]
STEAM_BUTTON_MAPPINGS = [
WHEEL_CROSS,WHEEL_CIRCLE,WHEEL_TRIANGLE,WHEEL_SQUARE,
WHEEL_START,WHEEL_PSMENU,WHEEL_SELECT,
WHEEL_GEARUP,WHEEL_GEARDOWN,WHEEL_L1,WHEEL_R1
]
STEAM_BUTTONS2_MAPPINGS = [WHEEL_LPADDLE,WHEEL_RPADDLE,WHEEL_PLUS,WHEEL_MINUS]
STEAM_DPAD_MAPPINGS = [ WHEEL_DPAD_U,WHEEL_DPAD_L,WHEEL_DPAD_D,WHEEL_DPAD_R]
| 24.746479
| 79
| 0.726807
| 538
| 3,514
| 4.399628
| 0.280669
| 0.057034
| 0.021546
| 0.00507
| 0.213773
| 0.205323
| 0.205323
| 0.205323
| 0.205323
| 0.205323
| 0
| 0.090971
| 0.164769
| 3,514
| 142
| 80
| 24.746479
| 0.715503
| 0.132328
| 0
| 0.173077
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082345
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e360941b07ce2d49e4d682a79c218a27dc642b96
| 1,696
|
py
|
Python
|
tests/test_init.py
|
nuvolos-cloud/resolos
|
0918066cab7b11ef04ae005f3e052b14a65ded68
|
[
"MIT"
] | 1
|
2021-11-30T06:47:24.000Z
|
2021-11-30T06:47:24.000Z
|
tests/test_init.py
|
nuvolos-cloud/resolos
|
0918066cab7b11ef04ae005f3e052b14a65ded68
|
[
"MIT"
] | 1
|
2021-04-08T12:56:39.000Z
|
2021-04-08T12:56:39.000Z
|
tests/test_init.py
|
nuvolos-cloud/resolos
|
0918066cab7b11ef04ae005f3e052b14a65ded68
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from click.testing import CliRunner
from resolos.interface import res, res_run
from resolos.shell import run_shell_cmd
from tests.common import verify_result
import logging
logger = logging.getLogger(__name__)
def test_init_empty():
runner = CliRunner()
with runner.isolated_filesystem() as fs:
verify_result(runner.invoke(res, ["-v", "debug", "init", "-y"]))
def test_init_from_archive():
runner = CliRunner()
with runner.isolated_filesystem() as fs:
project_folder = Path(fs)
run_shell_cmd("which python")
verify_result(
runner.invoke(
res,
[
"-v",
"debug",
"init",
"-u",
"https://resolos.s3.eu-central-1.amazonaws.com/examples/v0.3.0/data_with_pandas.tar.gz",
],
)
)
assert (project_folder / "README.md").exists()
assert (project_folder / "process_dataset.py").exists()
assert (project_folder / "var_spx_monthly.csv").exists()
assert not (project_folder / "var_spx_monthly_mean.csv").exists()
output = verify_result(
runner.invoke(
res_run,
["which python; python process_dataset.py"],
)
)
assert "Written the mean of the columns to var_spx_monthly_mean.csv" in output
assert (project_folder / "README.md").exists()
assert (project_folder / "process_dataset.py").exists()
assert (project_folder / "var_spx_monthly.csv").exists()
assert (project_folder / "var_spx_monthly_mean.csv").exists()
| 34.612245
| 108
| 0.595519
| 193
| 1,696
| 4.984456
| 0.393782
| 0.121622
| 0.138254
| 0.129938
| 0.556133
| 0.507277
| 0.507277
| 0.507277
| 0.251559
| 0.251559
| 0
| 0.004174
| 0.293632
| 1,696
| 48
| 109
| 35.333333
| 0.798831
| 0
| 0
| 0.27907
| 0
| 0.023256
| 0.212854
| 0.042453
| 0
| 0
| 0
| 0
| 0.209302
| 1
| 0.046512
| false
| 0
| 0.139535
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e36654005301b9cf41913be091e578a74c259424
| 1,669
|
py
|
Python
|
pythonapm/collector/test_reqhandler.py
|
nextapm/pythonapm
|
ddd8ad374e4f268516fc81f0bf710206565b737e
|
[
"FTL"
] | null | null | null |
pythonapm/collector/test_reqhandler.py
|
nextapm/pythonapm
|
ddd8ad374e4f268516fc81f0bf710206565b737e
|
[
"FTL"
] | null | null | null |
pythonapm/collector/test_reqhandler.py
|
nextapm/pythonapm
|
ddd8ad374e4f268516fc81f0bf710206565b737e
|
[
"FTL"
] | null | null | null |
import unittest
import json
import requests
from unittest import mock
from .reqhandler import send_req
from pythonapm.agent import Agent
from pythonapm import constants
class Resp:
def __init__(self):
self.data = json.dumps({'txn':'success'})
def json(self):
return json.loads(self.data)
class ReqhandlerTest(unittest.TestCase):
def setUp(self):
self.agent = Agent()
self.agent.config.license_key = 'key'
self.agent.config.project_id = 'id'
self.agent.config.print_payload = True
@mock.patch('pythonapm.collector.reqhandler.get_agent')
@mock.patch('pythonapm.collector.reqhandler.agentlogger')
@mock.patch('pythonapm.collector.reqhandler.requests')
def test_send_req(self,mock_requests,mock_logger,mock_agent):
mock_agent.return_value = self.agent
payload = {'txn':'txn_data'}
payload_str = json.dumps(payload)
response_data = {'txn':'success'}
complete_url = f'{constants.collector_domain}/api/agent?licenseKey=key&projectId=id'
mock_requests.post.return_value = Resp()
send_req('/api/agent',payload)
mock_requests.post.assert_called_with(complete_url,data= payload_str,headers = {'content-type':'application/json'})
mock_logger.info.assert_called_with(f'response for /api/agent request :{json.dumps(response_data)}')
self.assertEqual(mock_logger.info.call_count,3)
self.assertListEqual(mock_logger.info.mock_calls,[mock.call(f'sending request to {constants.collector_domain}/api/agent'),mock.call(f'payload :{payload_str}'),mock.call('response for /api/agent request :{"txn": "success"}')])
| 37.931818
| 233
| 0.711803
| 216
| 1,669
| 5.319444
| 0.319444
| 0.039164
| 0.039164
| 0.070496
| 0.197563
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000714
| 0.161174
| 1,669
| 43
| 234
| 38.813953
| 0.82
| 0
| 0
| 0
| 0
| 0
| 0.270546
| 0.15177
| 0
| 0
| 0
| 0
| 0.121212
| 1
| 0.121212
| false
| 0
| 0.212121
| 0.030303
| 0.424242
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e369c6172f4572b618818bae53263220f4153bc2
| 934
|
py
|
Python
|
util/label.py
|
bluehackmaster/bl-api-objdetect
|
dc8b514e62346904c3b9ab7e88987461721dd6b0
|
[
"Apache-2.0"
] | null | null | null |
util/label.py
|
bluehackmaster/bl-api-objdetect
|
dc8b514e62346904c3b9ab7e88987461721dd6b0
|
[
"Apache-2.0"
] | 16
|
2020-01-28T21:56:54.000Z
|
2022-03-11T23:15:09.000Z
|
util/label.py
|
bluehackmaster/bl-api-objdetect
|
dc8b514e62346904c3b9ab7e88987461721dd6b0
|
[
"Apache-2.0"
] | 1
|
2017-10-17T04:52:08.000Z
|
2017-10-17T04:52:08.000Z
|
def convert_class_to_code(label_map,
max_num_classes,
use_display_name=True):
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
| 33.357143
| 72
| 0.635974
| 128
| 934
| 4.328125
| 0.367188
| 0.054152
| 0.070397
| 0.086643
| 0.185921
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002941
| 0.271949
| 934
| 27
| 73
| 34.592593
| 0.811765
| 0
| 0
| 0.076923
| 0
| 0
| 0.105038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e36ae367550f66dd2b4a1cdb03a10bf47b3c6b9c
| 5,694
|
py
|
Python
|
hdp_api/routes/__init__.py
|
CedricCazinHC/HyperAPI
|
789419b95679faf550a57773b9cc57107b2b8504
|
[
"BSD-3-Clause"
] | null | null | null |
hdp_api/routes/__init__.py
|
CedricCazinHC/HyperAPI
|
789419b95679faf550a57773b9cc57107b2b8504
|
[
"BSD-3-Clause"
] | null | null | null |
hdp_api/routes/__init__.py
|
CedricCazinHC/HyperAPI
|
789419b95679faf550a57773b9cc57107b2b8504
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import ABCMeta, abstractproperty, abstractmethod
import inspect
import random
import re
import time
from requests.exceptions import HTTPError
class RoutePathInvalidException(Exception):
def __init__(self, name, value, path, validator):
self.path = path
self.name = name
self.value = value
self.validator = validator
def __str__(self):
return 'Route path invalid : {}={} ({})\n\t{}'.format(self.name, self.value, self.validator.__class__.__name__, self.path)
class ValidatorObjectID(object):
"""(str) A 24 hex digit MongoDB ObjectID."""
@staticmethod
def __call__(value):
return re.match('[0-9a-z]{24}','{}'.format(value)) is not None
@staticmethod
def getRandom():
return ''.join(random.choices('0123456789abcdef', k=24))
class ValidatorAny(object):
"""(any) Any object except None and empty string."""
@staticmethod
def __call__(value):
if value is None :
return False
if isinstance(value,str) and not value.strip() :
return False
return True
@staticmethod
def getRandom():
return ''.join(random.choices('0123456789abcdef', k=24))
class ValidatorInt(object):
"""(int) An Integer Value."""
@staticmethod
def __call__(value):
return isinstance(value,int)
@staticmethod
def getRandom():
return random.randint(0,100)
class Route(object):
__metaclass__ = ABCMeta
GET = "GET"
POST = "POST"
_path_keys = {}
VALIDATOR_OBJECTID = ValidatorObjectID()
VALIDATOR_ANY = ValidatorAny()
VALIDATOR_INT = ValidatorInt()
@abstractproperty
def name(self):
"""The Route key (not name) as defined in the API schema"""
return "Route Name"
@abstractproperty
def httpMethod(self):
"""The Route http method as defined in the API schema"""
return "http Method"
@abstractproperty
def path(self):
"""The Route path as defined in the API schema"""
return "Route Path"
def __init__(self,session, watcher=None):
self.session = session
self._watcher = watcher
def __call__(self,**kwargs):
formatter = dict.fromkeys(self._path_keys)
for _path_key, _validator in self._path_keys.items():
_value = kwargs.pop(_path_key,None)
if not _validator(_value) :
raise RoutePathInvalidException(_path_key, _value, self.path, _validator)
formatter[_path_key] = _value
_path = self.path if self.path[0] != '/' else self.path[1:]
_path = _path.format(**formatter)
if self._watcher:
self._watcher(str(self),kwargs.pop('info','call'))
try:
_result = self.session.request(self.httpMethod, _path, **kwargs)
self._watcher(str(self),'200')
return _result
except HTTPError as HE:
self._watcher(str(self), str(HE.response))
raise
return self.session.request(self.httpMethod, _path, **kwargs)
def call_when(self, condition=lambda x:True, call=lambda x: None, step=1, timeout=500, **kwargs):
_remaining = timeout
if self._watcher:
kwargs['info'] = 'call'
while _remaining > 0:
_remaining = _remaining - step
time.sleep(step)
_res = self.__call__(**kwargs)
if condition(_res) :
return call(_res)
elif kwargs.get('info', None) == 'call':
kwargs['info'] = 'retry'
if self._watcher:
self._watcher(str(self),'timeout')
return None
def wait_until(self, condition=lambda x:True, step=1, timeout=60, **kwargs):
_remaining = timeout
if self._watcher:
kwargs['info'] = 'call'
while _remaining > 0:
_remaining = _remaining - step
time.sleep(step)
_res = self.__call__(**kwargs)
if condition(_res) :
return _res
elif kwargs.get('info', None) == 'call':
kwargs['info'] = 'retry'
if self._watcher:
self._watcher(str(self),'timeout')
return None
@property
def help(self):
msg = 'Route {} [{}]'.format(self.name, self.httpMethod)
msg += '\n{}'.format(self.path)
for _k,_v in self._path_keys.items():
msg += '\n{:>20} : {}'.format(_k,_v.__doc__)
msg += '\n'
print(msg)
def __repr__(self):
return '{} <{}> {}:{}'.format(self.__class__.__name__, id(self), self.httpMethod, self.path)
def __str__(self):
return '{: >4}:{}'.format(self.httpMethod, self.path)
class Resource(object):
__metaclass__ = ABCMeta
@abstractproperty
def name(self):
"""The resource name as defined in the API schema"""
return "Resource Name"
def __init__(self,session, watcher=None):
self.session = session
self._routes = {}
for _route in (_m[1] for _m in inspect.getmembers(self.__class__) if inspect.isclass(_m[1]) and issubclass(_m[1], Route)) :
_routeInstance = _route(session, watcher=watcher)
_routeName = _route.__name__.lower().replace('_','')
self.__setattr__(_routeName, _routeInstance)
self._routes[_routeName] = _routeInstance
def __iter__(self):
for _r in self._routes.values():
yield _r
@property
def help(self):
for _r in self._routes.values():
_r.help
def __repr__(self):
return '{} <{}>'.format(self.__class__.__name__, id(self))
| 31.114754
| 131
| 0.592378
| 634
| 5,694
| 5.020505
| 0.225552
| 0.03016
| 0.020421
| 0.028275
| 0.404964
| 0.340245
| 0.340245
| 0.278668
| 0.24568
| 0.24568
| 0
| 0.013248
| 0.284159
| 5,694
| 182
| 132
| 31.285714
| 0.767664
| 0.053565
| 0
| 0.457746
| 0
| 0
| 0.050813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161972
| false
| 0
| 0.042254
| 0.06338
| 0.457746
| 0.007042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e36c689a2b24e54549cda9f00830211a35aefafa
| 5,141
|
py
|
Python
|
source.py
|
Sakshisingh05/ClockChian
|
21ce1005c83b003a9fc62203d03c50b3e8f70793
|
[
"MIT"
] | null | null | null |
source.py
|
Sakshisingh05/ClockChian
|
21ce1005c83b003a9fc62203d03c50b3e8f70793
|
[
"MIT"
] | null | null | null |
source.py
|
Sakshisingh05/ClockChian
|
21ce1005c83b003a9fc62203d03c50b3e8f70793
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template, redirect, url_for
from flask import request
import blockChain
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
# print(request.method )
if request.method == 'POST':
text = request.form['text']
if len(text) < 1:
return redirect(url_for('index'))
try:
make_proof = request.form['make_proof']
except Exception:
make_proof = False
blockChain.write_block(text, make_proof)
return redirect(url_for('index'))
return render_template('index.html')
@app.route('/check', methods=[ 'POST'])
def integrity():
results = blockChain.check_blocks_integrity()
if request.method == 'POST':
return render_template('index.html', results=results)
return render_template('index.html')
@app.route('/mining', methods=[ 'POST'])
def mining():
if request.method == 'POST':
max_index = int(blockChain.get_next_block())
for i in range(2, max_index):
blockChain.get_POW(i)
return render_template('index.html', querry=max_index)
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
import hashlib
import json
import os
from time import time
BLOCKCHAIN_DIR = os.curdir + '/blocks/'
def check_blocks_integrity():
result = list()
cur_proof = - 1
for i in range(2, int(get_next_block())):
prev_index = str(i-1)
cur_index = str(i)
tmp = {'block' : '', 'result' : '', 'proof': ''}
try:
file_dict = json.load(open(BLOCKCHAIN_DIR + cur_index + '.json'))
cur_hash = file_dict['prev_hash']
cur_proof = file_dict['proof']
except Exception as e:
print(e)
try:
prev_hash = hashlib.sha256(open(BLOCKCHAIN_DIR + prev_index + '.json', 'rb').read()).hexdigest()
except Exception as e:
print(e)
tmp['block'] = prev_index
tmp['proof'] = cur_proof
if cur_hash == prev_hash:
tmp['result'] = 'ok'
else:
tmp['result'] = 'error'
result.append(tmp)
return result
def check_block(index):
cur_index = str(index)
prev_index = str(int(index) - 1)
cur_proof = - 1
cur_hash = 0
prev_hash =0
tmp = {'block' : '', 'result' : '', 'proof': ''}
try:
file_dict = json.load(open(BLOCKCHAIN_DIR + cur_index + '.json'))
cur_hash = file_dict['prev_hash']
cur_proof = file_dict['proof']
except Exception as e:
print(e)
try:
prev_hash = hashlib.sha256(open(BLOCKCHAIN_DIR + prev_index + '.json', 'rb').read()).hexdigest()
except Exception as e:
print(e)
tmp['block'] = prev_index
tmp['proof'] = cur_proof
if cur_hash == prev_hash:
tmp['result'] = 'ok'
else:
tmp['result'] = 'error'
return tmp
def get_hash(file_name):
file_name = str(file_name)
if not file_name.endswith('.json'):
file_name += '.json'
try:
with open(BLOCKCHAIN_DIR + file_name, 'rb') as file:
return hashlib.sha256(file.read()).hexdigest()
except Exception as e:
print('File "'+file_name+'" does not exist!n', e)
def get_next_block():
files = os.listdir(BLOCKCHAIN_DIR)
index_list = [int(file.split('.')[0]) for file in files]
cur_index = sorted(index_list)[-1]
next_index = cur_index + 1
return str(next_index)
def is_valid_proof(last_proof, proof, difficulty):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:difficulty] == '0' * difficulty
def get_POW(file_name, difficulty=1):
# POW - proof of work
file_name = str(file_name)
if file_name.endswith('.json'):
file_name = int(file_name.split('.')[0])
else:
file_name = int(file_name)
last_proof = json.load(open(BLOCKCHAIN_DIR + str(file_name - 1) + '.json'))['proof']
proof = 0
while is_valid_proof(last_proof, proof, difficulty) is False:
proof += 1
cur_block = json.load(open(BLOCKCHAIN_DIR + str(file_name) + '.json'))
cur_block['proof'] = proof
cur_block['prev_hash'] = get_hash(str(file_name - 1))
with open(BLOCKCHAIN_DIR + str(file_name) + '.json', 'w') as file:
json.dump(cur_block, file, indent=4, ensure_ascii=False)
def write_block(text, make_proof=False):
cur_index = get_next_block()
prev_index = str(int(cur_index) - 1)
prev_block_hash = get_hash(prev_index)
data = {'text' : text,
'prev_hash' : prev_block_hash,
'timestamp' : time(),
'proof' : -1,
'index' : cur_index
}
with open(BLOCKCHAIN_DIR + cur_index + '.json', 'w') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
if make_proof is True:
get_POW(str(cur_index))
if __name__ == '__main__':
# for i in range(10):
# write_block(str(i),True)
for i in range(2,10):
print(check_block(str(i)))
print(check_blocks_integrity())
| 29.545977
| 108
| 0.606497
| 682
| 5,141
| 4.334311
| 0.16129
| 0.051421
| 0.051759
| 0.042287
| 0.479364
| 0.406631
| 0.305819
| 0.23207
| 0.207713
| 0.207713
| 0
| 0.010419
| 0.253258
| 5,141
| 173
| 109
| 29.716763
| 0.759573
| 0.016923
| 0
| 0.371429
| 0
| 0
| 0.082591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.057143
| 0
| 0.214286
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e36e0bc7f72121825603a719d4feff88206f860b
| 5,668
|
py
|
Python
|
download_pinterest_images.py
|
BrunoKrinski/pinterest_download_tools
|
c804f83bc97c418ea44f1d179ad9864e90631fe5
|
[
"MIT"
] | 1
|
2022-03-07T04:38:26.000Z
|
2022-03-07T04:38:26.000Z
|
download_pinterest_images.py
|
BrunoKrinski/pinterest_download_tools
|
c804f83bc97c418ea44f1d179ad9864e90631fe5
|
[
"MIT"
] | null | null | null |
download_pinterest_images.py
|
BrunoKrinski/pinterest_download_tools
|
c804f83bc97c418ea44f1d179ad9864e90631fe5
|
[
"MIT"
] | null | null | null |
import os
import wget
import time
import argparse
import subprocess
import geckodriver_autoinstaller
import chromedriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import FirefoxOptions
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium_stealth import stealth
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#def execute_with_retry(method, max_attempts):
# e = None
# for i in range(0, max_attempts):
# try:
# return method()
# except Exception as e:
# print(e)
# time.sleep(1)
# if e is not None:
# raise e
def download_images(urls, dpath):
urls = list(set(urls))
print('\nDownloading imagens...')
log_file.write('Downloading imagens...\n')
icont = 0
for url in urls:
try:
wget.download(url, out=dpath)
icont += 1
except:
print('\nCound not download the image: ' + url)
images_err.write('Cound not download the image: ' + url + '\n')
#os.system("rm *\(1\)*")
#os.system("rm images/*\(1\)*")
#os.system("Get-ChildItem -recurse | Where-Object {$_.Name -match 'images/ \(1\)'} | Remove-Item")
subprocess.run(["powershell", "-Command",
"Get-ChildItem -recurse -Path images | Where-Object {$_.Name -match '\(1\)'} | Remove-Item"])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, dest='user', action='store',
required=True, help='Windows or Linux user.')
parser.add_argument('--link', type=str, dest='link', action='store',
help='Url to a pinterest folder.')
parser.add_argument('--list', type=str, dest='url_list', action='store',
help='Path to a txt file with a list of urls.')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
user = args.user
link = args.link
url_list = args.url_list
if link == None:
if url_list == None:
print('Please enter an url or an url file!')
exit()
links = open(url_list, 'r').read().splitlines()
else:
links = [link]
log_file = open('log.txt','w')
images_err = open('images_err.txt', 'w')
#geckodriver_autoinstaller.install()
chromedriver_autoinstaller.install()
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--user-data-dir=C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(user))
driver = webdriver.Chrome(options=options)
images_folder = 'images'
print('Creating folder ' + images_folder + '...!')
log_file.write('Creating folder ' + images_folder + '...!\n')
os.makedirs(images_folder, exist_ok=True)
num_links = len(links)
cont = 0
for link in links:
dpath = 'images/' + str(cont).zfill(4)
os.mkdir(dpath)
print('\nDownloading ' + str(cont) + '/' + str(num_links) + '...')
log_file.write('Downloading ' + str(cont) + '/' + \
str(num_links) + '...\n')
cont += 1
print('Accessing pinterest link: ' + link)
log_file.write('Accessing pinterest link: ' + link + '\n')
try:
driver.get(link)
print('Link successfully accessed!')
log_file.write('Link successfully accessed!\n')
except TimeoutException as e:
print('Could not access the link:' + link)
log_file.write('Could not access the link:' + link + '\n')
#exit()
print('Waitning page load...')
log_file.write('Waiting page load...\n')
time.sleep(10)
last_height = driver.execute_script("return document.body.scrollHeight")
urls = []
len_urls = 0
change_times = 0
scroll_times = 0
print('Searching images... It can take a long time!')
log_file.write('Searching images... It can take a long time!\n')
cont_images = 0
while True:
link_tags = driver.find_elements_by_tag_name('img')
for tag in link_tags:
try:
url = tag.get_attribute('srcset')
url = url.split(' ')
if len(url) == 8:
url = url[6]
urls.append(url)
except:
continue
driver.execute_script("window.scrollBy(0, 50);")
scroll_times += 1
if scroll_times == 50:
cont_images += len(urls)
download_images(urls, dpath)
urls = []
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height or cont_images > 20000:
break
else:
last_height = new_height
scroll_times = 0
log_file.close()
images_err.close()
| 35.873418
| 114
| 0.569689
| 636
| 5,668
| 4.946541
| 0.308176
| 0.049587
| 0.060076
| 0.034329
| 0.211697
| 0.158932
| 0.143039
| 0.109981
| 0.054037
| 0.054037
| 0
| 0.007965
| 0.313338
| 5,668
| 157
| 115
| 36.101911
| 0.80036
| 0.07657
| 0
| 0.141667
| 0
| 0.016667
| 0.197155
| 0.023508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.166667
| 0
| 0.191667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e3727e0484521064be92f2b66e6c5b9dd289ef54
| 897
|
py
|
Python
|
kinsumer/helpers.py
|
ungikim/kinsumer
|
01bd9626d985bc3c239b979f0d98094f78cc102f
|
[
"MIT"
] | 5
|
2018-03-09T05:16:38.000Z
|
2021-11-12T11:56:18.000Z
|
kinsumer/helpers.py
|
ungikim/kinsumer
|
01bd9626d985bc3c239b979f0d98094f78cc102f
|
[
"MIT"
] | 2
|
2017-10-16T06:38:28.000Z
|
2017-10-18T08:05:37.000Z
|
kinsumer/helpers.py
|
balancehero/kinsumer
|
01bd9626d985bc3c239b979f0d98094f78cc102f
|
[
"MIT"
] | 1
|
2017-10-18T08:15:28.000Z
|
2017-10-18T08:15:28.000Z
|
""":mod:`kinsumer.helpers` --- Implements various helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from threading import RLock
_missing = object()
class locked_cached_property(object):
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
| 27.181818
| 61
| 0.570792
| 103
| 897
| 4.407767
| 0.38835
| 0.052863
| 0.052863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.276477
| 897
| 32
| 62
| 28.03125
| 0.699538
| 0.124861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.045455
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|