hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da0cc012c8071ddd102f587a464226bdf7578158 | 1,381 | py | Python | app.py | dhairyaostwal/bankingo | cc148940a9d4ae60d80acdc2e3c90a01a8a99c46 | [
"MIT"
] | 2 | 2021-12-11T02:32:35.000Z | 2021-12-12T08:42:41.000Z | app.py | dhairyaostwal/bankingo | cc148940a9d4ae60d80acdc2e3c90a01a8a99c46 | [
"MIT"
] | null | null | null | app.py | dhairyaostwal/bankingo | cc148940a9d4ae60d80acdc2e3c90a01a8a99c46 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
import pickle
app = Flask(__name__)
userInput = []
@app.route("/", methods=["GET", "POST"])
def hello():
userInput.clear()
if request.method == "POST":
variance = request.form.get("variance")
skewness = request.form.get("skewness")
curtosis = request.form.get("curtosis")
entropy = request.form.get("entropy")
userInput.append(variance)
userInput.append(skewness)
userInput.append(curtosis)
userInput.append(entropy)
# converting string to float values
for i in range(len(userInput)):
userInput[i] = float(userInput[i])
print("User input: ", userInput)
# testing our pickle file
with open('pickleOutput2', 'rb') as f:
mp = pickle.load(f)
pickle_test = mp.predict([userInput])
print("Predicted Output: ", pickle_test)
if pickle_test[0]==1:
return render_template("trueBundle.html")
else:
return render_template("falseBundle.html")
return render_template("index.html")
@app.route("/verified/")
def verified():
return render_template("trueBundle.html")
@app.route("/not-verified/")
def notVerified():
return render_template("falseBundle.html")
if __name__ == '__main__':
app.debug = True
app.run() | 26.557692 | 54 | 0.620565 | 152 | 1,381 | 5.5 | 0.447368 | 0.100478 | 0.119617 | 0.07177 | 0.165072 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002882 | 0.246198 | 1,381 | 52 | 55 | 26.557692 | 0.800192 | 0.041274 | 0 | 0.108108 | 0 | 0 | 0.145234 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.054054 | 0.054054 | 0.27027 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0d025051a5ed1885fbb8e49bb40af12912744c | 3,210 | py | Python | AlarmTimer.py | amjith/PyAlarmTimer | f664daa42d9ec70fc7ac512ce71868c703e8a011 | [
"MIT"
] | 2 | 2015-01-13T00:36:29.000Z | 2015-04-12T19:17:32.000Z | AlarmTimer.py | amjith/PyAlarmTimer | f664daa42d9ec70fc7ac512ce71868c703e8a011 | [
"MIT"
] | 1 | 2015-01-12T23:02:28.000Z | 2015-01-13T00:36:26.000Z | AlarmTimer.py | amjith/PyAlarmTimer | f664daa42d9ec70fc7ac512ce71868c703e8a011 | [
"MIT"
] | null | null | null | import sys
from PyQt4 import QtCore, QtGui
from itertools import cycle
from Resources.LcdNumber_ui import Ui_Form
class AlarmTimer(QtGui.QMainWindow):
def __init__(self, timer_values, parent=None):
QtGui.QWidget.__init__(self, parent)
QtGui.QMainWindow.__init__(self, None, QtCore.Qt.WindowStaysOnTopHint|QtCore.Qt.FramelessWindowHint)
self.ui = Ui_Form()
self.ui.setupUi(self)
# Initialize member variables
self.color_names = [ "Normal", "Yellow" ]
self.color_idx = 1
self.updateTimers(timer_values)
self.cur_timer = self.timer_iter.next() # Current timer value
self.snooze_time = 1 * 60
self.show()
self.oneSecondCounter = 0
self.timerPause = False
# Start a timer for 250ms and call showTimer()
timer = QtCore.QTimer(self)
timer.timeout.connect(self.showTimer)
timer.start(250)
def showTimer(self):
if self.timerPause:
return
text = "%d:%02d" % (self.cur_timer/60,self.cur_timer % 60)
self.ui.lcdNumber.display(text)
if (self.cur_timer == 0):
self.color_idx = 3 - self.color_idx
self.show()
self.setStyleSheet("QWidget { background-color: %s }" % self.color_names[self.color_idx - 1])
elif self.oneSecondCounter == 3:
self.cur_timer -= 1
self.oneSecondCounter = 0
else:
self.oneSecondCounter += 1
def updateTimers(self, timer_list):
self.alarm_times = timer_list
self.timer_iter = cycle(self.alarm_times) # An iterator that cycles through the list
def pauseTimer(self):
self.timerPause = not self.timerPause
def resetTimer(self): # Reset the timer back to the head of the list
self.timer_iter = cycle(self.alarm_times)
self.cur_timer = self.timer_iter.next()
def mouseReleaseEvent(self, event):
button = event.button()
if button == 2:
self.hide()
if (self.cur_timer == 0):
self.cur_timer = self.snooze_time # Start the timer with snooze value if teh cur_timer has expired
elif button == 1: # left click
if (self.cur_timer == 0): # blinking timer should be closed on a left click
self.cur_timer = self.timer_iter.next()
self.setStyleSheet("QWidget { background-color: Normal }" )
def mousePressEvent(self, event):
button = event.button()
if button == 1:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft();
def mouseMoveEvent(self, event):
if event.buttons() != QtCore.Qt.LeftButton: # not left click
return
self.move(event.globalPos() - self.dragPosition)
def Str2Num(str_list):
num = []
for str in str_list:
try:
num.append(int(str))
except ValueError:
num.append(float(str))
return num
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
timerList = Str2Num(sys.argv[1:])
myapp = AlarmTimer(timerList)
myapp.show()
sys.exit(app.exec_())
| 33.092784 | 121 | 0.610592 | 385 | 3,210 | 4.942857 | 0.348052 | 0.046243 | 0.063058 | 0.033631 | 0.20494 | 0.139254 | 0.119285 | 0.037835 | 0 | 0 | 0 | 0.014442 | 0.288162 | 3,210 | 96 | 122 | 33.4375 | 0.818381 | 0.098131 | 0 | 0.213333 | 0 | 0 | 0.03294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.053333 | 0 | 0.226667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0dfc1a5ddc1f3fd9cff38b3e12d87c2cfff865 | 3,200 | py | Python | backend/handlers/graphql/resolvers/quota.py | al-indigo/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | null | null | null | backend/handlers/graphql/resolvers/quota.py | al-indigo/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | 8 | 2017-10-11T13:26:10.000Z | 2021-12-13T20:27:52.000Z | backend/handlers/graphql/resolvers/quota.py | ispras/vmemperor | 80eb6d47d839a4736eb6f9d2fcfad35f0a7b3bb1 | [
"Apache-2.0"
] | 4 | 2017-07-27T12:25:42.000Z | 2018-01-28T02:06:26.000Z | from graphql import ResolveInfo
from rethinkdb.errors import ReqlNonExistenceError
from handlers.graphql.graphql_handler import ContextProtocol
from handlers.graphql.types.pool import Quota
from handlers.graphql.utils.query import resolve_from_root
import constants.re as re
from utils.quota import check_vdi_size, check_memory, check_vcpu_count, check_vm_count, get_used_vdi_size, \
get_used_memory, get_used_vcpu_count, get_used_vm_count
from utils.user import user_entities, get_user_object
def resolve_quotas(root, info, **args):
from xenadapter import Pool
ctx: ContextProtocol = info.context
if ctx.user_authenticator.is_admin():
return re.db.table(Pool.quotas_table_name).coerce_to('array').run()
else:
return re.db.table(Pool.quotas_table_name).get_all(*user_entities(ctx.user_authenticator)).coerce_to('array').run()
def get_item(user):
from xenadapter import Pool
result = re.db.table(Pool.quotas_table_name).get(user).run()
if result:
return result
else:
user_object = get_user_object(user)
if user_object:
result = {key: None for key in Quota._meta.fields.keys()}
result.update({
"user_id": user
})
return result
else:
raise ValueError(f"No such user: {user}")
def resolve_quota(root, info, user):
ctx: ContextProtocol = info.context
if not ctx.user_authenticator.is_admin():
if user not in user_entities(ctx.user_authenticator):
raise ValueError(f"Access denied: Not a member of an entity: {user}")
return get_item(user)
def resolve_quota_left(root, info : ResolveInfo, user):
ctx: ContextProtocol = info.context
if not ctx.user_authenticator.is_admin() and user not in user_entities(ctx.user_authenticator):
raise ValueError(f"Access denied: Not a member of an entity: {user}")
fields = [item.name.value for item in info.field_asts[0].selection_set.selections]
result = {}
if 'vdiSize' in fields:
result['vdi_size'] = check_vdi_size(user)
if 'memory' in fields:
result['memory'] = check_memory(user)
if 'vcpuCount' in fields:
result['vcpu_count'] = check_vcpu_count(user)
if 'vmCount' in fields:
result['vm_count'] = check_vm_count(user)
if 'user' in fields:
result['user_id'] = user
return result
def resolve_quota_usage(root, info : ResolveInfo, user):
ctx: ContextProtocol = info.context
if not ctx.user_authenticator.is_admin() and user not in user_entities(ctx.user_authenticator):
raise ValueError(f"Access denied: Not a member of an entity: {user}")
fields = [item.name.value for item in info.field_asts[0].selection_set.selections]
result = {}
if 'vdiSize' in fields:
result['vdi_size'] = get_used_vdi_size(user)
if 'memory' in fields:
result['memory'] = get_used_memory(user)
if 'vcpuCount' in fields:
result['vcpu_count'] = get_used_vcpu_count(user)
if 'vmCount' in fields:
result['vm_count'] = get_used_vm_count(user)
if 'user' in fields:
result['user_id'] = user
return result
| 32 | 123 | 0.690938 | 446 | 3,200 | 4.748879 | 0.201794 | 0.037771 | 0.0661 | 0.054769 | 0.621341 | 0.556185 | 0.556185 | 0.556185 | 0.508026 | 0.429651 | 0 | 0.000792 | 0.210625 | 3,200 | 99 | 124 | 32.323232 | 0.837688 | 0 | 0 | 0.478873 | 0 | 0 | 0.101753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070423 | false | 0 | 0.140845 | 0 | 0.309859 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0e334dd350b538cbe6369f8de20266f08cd7ab | 23,670 | py | Python | webapp/creators/parse_eml.py | PASTAplus/umbra | 25f179801ab86d6506759b19849de1f7a8bf9e8d | [
"Apache-2.0"
] | null | null | null | webapp/creators/parse_eml.py | PASTAplus/umbra | 25f179801ab86d6506759b19849de1f7a8bf9e8d | [
"Apache-2.0"
] | null | null | null | webapp/creators/parse_eml.py | PASTAplus/umbra | 25f179801ab86d6506759b19849de1f7a8bf9e8d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Mod: propagate_names
:Synopsis: Parse EML files to collect information on the responsible parties, creating RESPONSIBLE_PARTIES_TEXT_FILE.
:Author:
ide
:Created:
6/1/21
"""
from enum import Enum, auto
import glob
import os
import pickle
import daiquiri
from flask import (
Flask, Blueprint, jsonify, request, current_app
)
from recordclass import recordclass
from webapp.config import Config
import webapp.creators.db as db
import webapp.creators.nlp as nlp
from metapype.eml import names
from metapype.model.metapype_io import from_xml
from metapype.model.node import Node
logger = daiquiri.getLogger(Config.LOG_FILE)
def log_info(msg):
app = Flask(__name__)
with app.app_context():
current_app.logger.info(msg)
def log_error(msg):
app = Flask(__name__)
with app.app_context():
current_app.logger.error(msg)
class EMLTextComponents(Enum):
DATASET_TITLE = auto(),
DATASET_ABSTRACT = auto(),
DATASET_KEYWORDS = auto(),
DATATABLE_DESCRIPTIONS = auto(),
DATASET_GEO_DESCRIPTIONS = auto(),
METHOD_STEP_DESCRIPTIONS = auto(),
PROJECT_TITLES = auto(),
PROJECT_ABSTRACTS = auto(),
RELATED_PROJECT_TITLES = auto(),
RELATED_PROJECT_ABSTRACTS = auto()
ProjectText = recordclass(
'ProjectText',
'project_title project_abstract'
)
EMLText = recordclass(
'EMLText',
'dataset_title dataset_abstract dataset_keywords datatable_descriptions dataset_geographic_descriptions method_step_descriptions projects related_projects'
)
eml_text_by_pid = {}
def xml_to_json(filepath):
cwd = os.getcwd()
with open(filepath, 'r') as fp:
xml = fp.read()
try:
return from_xml(xml)
except Exception as err:
print(f'Metapype failed to convert xml to json for file {filepath}. Error:{err}')
return None
def parse_section(node):
text = []
if node.content:
text.append(node.content)
return text
title = node.find_child(names.TITLE)
if title and title.content:
text.append(title.content)
section = node.find_child(names.SECTION)
if section:
text.extend(parse_section(section))
return text
para = node.find_child(names.PARA)
if para:
text.extend(parse_para(para))
return text
return text
def parse_para(node):
text = []
if node.content:
text.append(node.content)
return text
value = node.find_child(names.VALUE)
if value and value.content:
return [value.content]
return text
def parse_text_type(node):
text = []
if node.content:
text.append(node.content)
return text
section = node.find_child(names.SECTION)
if section:
return parse_section(section)
para = node.find_child(names.PARA)
if para:
return parse_para(para)
return text
def get_existing_eml_files():
filelist = glob.glob(f'{Config.EML_FILES_PATH}/*.xml')
return [os.path.basename(x) for x in filelist]
def get_dataset_title(eml_node):
title_node = eml_node.find_single_node_by_path([names.DATASET, names.TITLE, names.VALUE])
if not title_node:
title_node = eml_node.find_single_node_by_path([names.DATASET, names.TITLE])
return [title_node.content]
def get_dataset_abstract(eml_node):
abstract_node = eml_node.find_single_node_by_path([names.DATASET, names.ABSTRACT, names.PARA])
if not abstract_node:
abstract_node = eml_node.find_single_node_by_path([names.DATASET, names.ABSTRACT, names.SECTION, names.PARA])
if abstract_node:
return parse_text_type(abstract_node)
else:
return []
def harvest_projects(eml_node):
project_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT])
project_text = get_project_text(project_nodes)
related_project_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.RELATED_PROJECT])
related_project_text = get_project_text(related_project_nodes)
return project_text, related_project_text
def get_project_text(project_nodes):
project_text = []
for project_node in project_nodes:
title = ''
abstract = ''
title_node = project_node.find_child(names.TITLE)
if title_node:
title = [title_node.content]
abstract_node = project_node.find_child(names.ABSTRACT)
if abstract_node:
abstract = parse_text_type(abstract_node)
project_text.append(ProjectText(
project_title=title,
project_abstract=abstract))
return project_text
def get_project_titles(eml_node):
project_titles = []
title_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.TITLE])
for title_node in title_nodes:
if title_node.content:
project_titles.append([title_node.content])
return project_titles
def get_project_abstracts(eml_node):
project_abstracts = []
abstract_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.ABSTRACT, names.PARA])
for abstract_node in abstract_nodes:
project_abstracts.extend(parse_text_type(abstract_node))
return project_abstracts
def get_keywords(eml_node):
kw = []
keyword_nodes = []
eml_node.find_all_descendants(names.KEYWORD, keyword_nodes)
for keyword_node in keyword_nodes:
kw.append(keyword_node.content)
return kw
def get_all_ranks(eml_node, rank):
rank_nodes = []
eml_node.find_all_descendants(names.TAXONRANKNAME, rank_nodes)
found = set()
for rank_node in rank_nodes:
if rank_node.content.lower() == rank:
parent = rank_node.parent
rank_value = parent.find_child(names.TAXONRANKVALUE).content
found.add(rank_value)
return sorted(found)
def get_all_genera(eml_node):
return get_all_ranks(eml_node, 'genus')
def get_all_species(eml_node):
return get_all_ranks(eml_node, 'species')
def get_children(parent_node, child_name):
children = []
child_nodes = parent_node.find_all_children(child_name)
for child_node in child_nodes:
if child_node.content:
children.append((child_name, child_node.content))
return children
def get_person(rp_node):
person = []
individual_name_node = rp_node.find_child(names.INDIVIDUALNAME)
if individual_name_node:
person.extend(get_children(individual_name_node, names.SALUTATION))
person.extend(get_children(individual_name_node, names.GIVENNAME))
person.extend(get_children(individual_name_node, names.SURNAME))
person.extend(get_children(rp_node, names.ORGANIZATIONNAME))
person.extend(get_children(rp_node, names.POSITIONNAME))
return person
def get_address(rp_node):
address = []
address_node = rp_node.find_child(names.ADDRESS)
if address_node:
address.extend(get_children(address_node, names.DELIVERYPOINT))
address.extend(get_children(address_node, names.CITY))
address.extend(get_children(address_node, names.ADMINISTRATIVEAREA))
address.extend(get_children(address_node, names.POSTALCODE))
address.extend(get_children(address_node, names.COUNTRY))
return address
def get_responsible_party(rp_node):
party = []
party.extend(get_person(rp_node))
party.extend(get_address(rp_node))
party.extend(get_children(rp_node, names.PHONE))
party.extend(get_children(rp_node, names.ELECTRONICMAILADDRESS))
party.extend(get_children(rp_node, names.ONLINEURL))
party.extend(get_children(rp_node, names.USERID))
return party
def get_responsible_parties(pid, eml_node, path):
rp_nodes = eml_node.find_all_nodes_by_path(path)
parties = []
for rp_node in rp_nodes:
party = get_responsible_party(rp_node)
parties.append((pid, path[-1], party))
return parties
def get_creators(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.CREATOR])
def get_contacts(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.CONTACT])
def get_associated_parties(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.ASSOCIATEDPARTY])
def get_metadata_providers(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.METADATAPROVIDER])
def get_project_personnel(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.PROJECT, names.PERSONNEL])
def get_related_project_personnel(pid, eml_node):
return get_responsible_parties(pid, eml_node, [names.DATASET, names.PROJECT, names.RELATED_PROJECT, names.PERSONNEL])
def get_all_responsible_parties(pid, eml_node):
responsible_parties = []
responsible_parties.extend(get_creators(pid, eml_node))
responsible_parties.extend(get_contacts(pid, eml_node))
responsible_parties.extend(get_associated_parties(pid, eml_node))
responsible_parties.extend(get_metadata_providers(pid, eml_node))
responsible_parties.extend(get_project_personnel(pid, eml_node))
responsible_parties.extend(get_related_project_personnel(pid, eml_node))
return responsible_parties
def get_data_table_descriptions(eml_node):
data_table_descriptions = []
description_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.DATATABLE, names.ENTITYDESCRIPTION])
for description_node in description_nodes:
data_table_descriptions.extend(parse_text_type(description_node))
return data_table_descriptions
def get_method_step_descriptions(eml_node):
method_step_descriptions = []
description_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.METHODS,
names.METHODSTEP, names.DESCRIPTION])
for description_node in description_nodes:
method_step_descriptions.extend(parse_text_type(description_node))
return method_step_descriptions
def get_all_titles_and_abstracts(eml_node):
dataset_title = get_dataset_title(eml_node)
dataset_abstract = get_dataset_abstract(eml_node)
project_titles = []
project_abstracts = []
all_text = dataset_title[0] + " "
if dataset_abstract:
all_text += ' '.join(dataset_abstract)
for title in project_titles:
all_text += title[0] + " "
for abstract in project_abstracts:
all_text += ' '.join(dataset_abstract)
return dataset_title, dataset_abstract, project_titles, project_abstracts, all_text
def get_dataset_geographic_descriptions(eml_node):
geographic_descriptions = []
geographic_description_nodes = eml_node.find_all_nodes_by_path([names.DATASET,
names.COVERAGE,
names.GEOGRAPHICCOVERAGE,
names.GEOGRAPHICDESCRIPTION])
for geographic_description_node in geographic_description_nodes:
description = geographic_description_node.content
if description:
geographic_descriptions.append(description)
return geographic_descriptions
def parse_eml_file(filename):
pid = filename[:-4]
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
return pid, eml_node
def collect_responsible_parties(filename, added_package_ids=None, removed_package_ids=None, trace=False):
if added_package_ids == [] and removed_package_ids == []:
return
responsible_parties = db.parse_responsible_parties_file(filename)
db.prune_pids(responsible_parties, removed_package_ids)
# write the existing responsible parties, minus the ones to be removed
output_filename = f'{Config.EML_FILES_PATH}/{filename}'
with open(output_filename, 'w') as output_file:
for _, val in responsible_parties.items():
for line in val:
output_file.write(line)
output_file.write('\n')
# now, append the new responsible parties
with open(output_filename, 'a') as output_file:
filelist = get_existing_eml_files()
if trace:
log_info(f'len(filelist)={len(filelist)}')
for index, filename in enumerate(filelist):
pid = os.path.splitext(filename)[0]
if added_package_ids and pid not in added_package_ids:
continue
pid, eml_node = parse_eml_file(filename)
if eml_node:
if trace:
log_info(f' Adding {index} - {pid}')
responsible_parties = get_all_responsible_parties(pid, eml_node)
for responsible_party in responsible_parties:
output_file.write(str(responsible_party))
output_file.write('\n')
output_file.flush()
# We're done with the JSON model. Delete it so we don't run out of memory.
Node.delete_node_instance(eml_node.id, True)
def collect_titles_and_abstracts(output_filename):
with open(output_filename, 'w') as output_file:
filelist = get_existing_eml_files()
for index, filename in enumerate(filelist):
# if filename.startswith('edi.'): # TEMP
pid = filename[:-4]
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
dataset_title, dataset_abstract, project_titles, project_abstracts, all_text = get_all_titles_and_abstracts(eml_node)
all_text = all_text.replace('\n', '')
output_file.write(f'{pid}\n')
output_file.write(f'{all_text}\n')
def collect_method_step_descriptions(output_filename):
with open(output_filename, 'w') as output_file:
filelist = get_existing_eml_files()
for index, filename in enumerate(filelist):
# if filename.startswith('edi.'): # TEMP
pid = filename[:-4]
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
text = get_data_table_descriptions(eml_node)
text = get_method_step_descriptions(eml_node)
# all_text = all_text.replace('\n', '')
# output_file.write(f'{pid}\n')
# output_file.write(f'{all_text}\n')
def collect_text_for_scope(scope):
text = []
filelist = get_existing_eml_files()
for index, filename in enumerate(filelist):
if filename.startswith(scope):
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
text1 = get_data_table_descriptions(eml_node)
text2 = [] #get_method_step_descriptions(eml_node)
*_, text3 = get_all_titles_and_abstracts(eml_node)
text.append(' '.join(text1) + ' '.join(text2) + text3)
return ' '.join(text)
def collect_text(pids):
text = []
for pid in pids:
filename = pid + '.xml'
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
text1 = [] #get_data_table_descriptions(eml_node)
text2 = [] #get_method_step_descriptions(eml_node)
*_, text3 = get_all_titles_and_abstracts(eml_node)
text.append(' '.join(text1) + ' '.join(text2) + text3)
return ' '.join(text)
def init_eml_text_by_pid():
global eml_text_by_pid
filename = 'eml_text_by_pid.pkl'
filepath = f'{Config.DATA_FILES_PATH}/{filename}'
try:
with open(filepath, 'rb') as pf:
eml_text_by_pid = pickle.load(pf)
print(f'Init harvest EML text... count={len(eml_text_by_pid)}')
return eml_text_by_pid
except FileNotFoundError:
pass
def save_eml_text_by_pid():
global eml_text_by_pid
filename = 'eml_text_by_pid.pkl'
filepath = f'{Config.DATA_FILES_PATH}/{filename}'
with open(filepath, 'wb') as pickle_file:
pickle.dump(eml_text_by_pid, pickle_file)
def clean_projects(projects):
cleaned = []
for project in projects:
project.project_title = clean_list(project.project_title)
project.project_abstract = clean_list(project.project_abstract)
cleaned.append(project)
return cleaned
def clean_list(l):
return [nlp.clean(s, remove_digits=True) for s in l]
def harvest_eml_text(pids=None):
global eml_text_by_pid
if not pids:
pids = db.get_all_pids()
init_eml_text_by_pid()
count = len(eml_text_by_pid)
for pid in pids:
if eml_text_by_pid.get(pid):
continue
filename = pid + '.xml'
filepath = f'{Config.EML_FILES_PATH}/{filename}'
eml_node = xml_to_json(filepath)
if not eml_node:
continue
dataset_title = get_dataset_title(eml_node)
dataset_abstract = get_dataset_abstract(eml_node)
dataset_keywords = get_keywords(eml_node)
datatable_descriptions = get_data_table_descriptions(eml_node)
dataset_geographic_descriptions = get_dataset_geographic_descriptions(eml_node)
method_step_descriptions = get_method_step_descriptions(eml_node)
projects, related_projects = harvest_projects(eml_node)
eml_text_by_pid[pid] = EMLText(
dataset_title=clean_list(dataset_title),
dataset_abstract=clean_list(dataset_abstract),
dataset_keywords=clean_list(dataset_keywords),
datatable_descriptions=clean_list(datatable_descriptions),
dataset_geographic_descriptions=clean_list(dataset_geographic_descriptions),
method_step_descriptions=clean_list(method_step_descriptions),
projects=clean_projects(projects),
related_projects=clean_projects(related_projects)
)
count += 1
if count % 100 == 0:
print(f'Saving... count={count}')
save_eml_text_by_pid()
save_eml_text_by_pid()
def concat_project_text(projects, related_projects,
components=(EMLTextComponents.PROJECT_TITLES,
EMLTextComponents.PROJECT_ABSTRACTS,
EMLTextComponents.RELATED_PROJECT_TITLES,
EMLTextComponents.RELATED_PROJECT_ABSTRACTS)):
project_text = ''
for project in projects:
if EMLTextComponents.PROJECT_TITLES in components:
project_text += ' '.join(project.project_title)
if EMLTextComponents.PROJECT_ABSTRACTS in components:
project_text += ' '.join(project.project_abstract)
for related_project in related_projects:
if EMLTextComponents.PROJECT_TITLES in components:
project_text += ' '.join(related_project.project_title)
if EMLTextComponents.PROJECT_ABSTRACTS in components:
project_text += ' '.join(related_project.project_abstract)
return project_text
def get_eml_text_as_string(pid, components=(EMLTextComponents.DATASET_TITLE,
EMLTextComponents.DATASET_ABSTRACT,
EMLTextComponents.DATASET_KEYWORDS,
EMLTextComponents.DATATABLE_DESCRIPTIONS,
EMLTextComponents.PROJECT_TITLES,
EMLTextComponents.PROJECT_ABSTRACTS,
EMLTextComponents.RELATED_PROJECT_TITLES,
EMLTextComponents.RELATED_PROJECT_ABSTRACTS)):
if not eml_text_by_pid:
init_eml_text_by_pid()
eml_string = ''
eml_text = eml_text_by_pid.get((pid))
if not eml_text:
return ''
if EMLTextComponents.DATASET_TITLE in components:
eml_string += ' '.join(eml_text.dataset_title)
if EMLTextComponents.DATASET_ABSTRACT in components:
eml_string += ' '.join(eml_text.dataset_abstract)
if EMLTextComponents.DATASET_KEYWORDS in components:
eml_string += ' '.join(eml_text.dataset_keywords)
if EMLTextComponents.DATATABLE_DESCRIPTIONS in components:
eml_string += ' '.join(eml_text.datatable_descriptions)
if EMLTextComponents.DATASET_GEO_DESCRIPTIONS in components:
eml_string += ' '.join(eml_text.dataset_geographic_descriptions)
if EMLTextComponents.METHOD_STEP_DESCRIPTIONS in components:
eml_string += ' '.join(eml_text.method_step_descriptions)
eml_string += concat_project_text(eml_text.projects,
eml_text.related_projects,
components)
return eml_string
def get_eml_text_as_string_by_name(givenname, surname,
components=(EMLTextComponents.DATASET_TITLE,
EMLTextComponents.DATASET_ABSTRACT,
EMLTextComponents.DATASET_KEYWORDS,
EMLTextComponents.DATATABLE_DESCRIPTIONS,
EMLTextComponents.PROJECT_TITLES,
EMLTextComponents.PROJECT_ABSTRACTS,
EMLTextComponents.RELATED_PROJECT_TITLES,
EMLTextComponents.RELATED_PROJECT_ABSTRACTS)):
if not eml_text_by_pid:
init_eml_text_by_pid()
pids = db.get_pids_by_name(givenname, surname)
eml_string = ''
for pid in pids:
eml_string += get_eml_text_as_string(pid, components)
return eml_string
def get_eml_keywords_by_name(givenname, surname):
if not eml_text_by_pid:
init_eml_text_by_pid()
pids = db.get_pids_by_name(givenname, surname)
keywords = []
for pid in pids:
eml_text = eml_text_by_pid.get((pid))
if not eml_text:
continue
keywords.extend(eml_text.dataset_keywords)
return keywords
if __name__ == '__main__':
pass
# collect_responsible_parties(f'{EML_FILES_PATH}/responsible_parties.txt')
# harvest_eml_text()
# raise ValueError
#
# from collections import Counter
# givenname = 'Diana'
# surname = 'Wall'
# keywords = get_eml_keywords_by_name(givenname, surname)
# counter = Counter(keywords)
# highest = counter.most_common(20)
#
# text = get_eml_text_as_string_by_name(givenname, surname)
# lemmas = nlp.lemmatize(text)
# counter = Counter(lemmas)
# highest = counter.most_common(30)
# pids = db.get_all_pids()
# harvest_eml_text(pids)
# for pid in pids:
# eml_string = get_eml_text_as_string(pid)
# text = collect_text_for_scope('knb-lter-sbc')
# collect_method_step_descriptions('foo.txt')
# filename = 'knb-lter-fce.1143.2.xml'
# pid, eml_node = parse_eml_file(filename)
# if eml_node:
# text1 = get_data_table_descriptions(eml_node)
# text2 = get_method_step_descriptions(eml_node)
# *_, text3 = get_all_titles_and_abstracts(eml_node)
# all_text = ' '.join(text1) + ' '.join(text1) + text3
# collect_responsible_parties(f'{EML_FILES_PATH}/responsible_parties.txt')
# collect_titles_and_abstracts(f'{EML_FILES_PATH}/titles_and_abstracts.txt')
| 35.170877 | 159 | 0.673722 | 2,855 | 23,670 | 5.239229 | 0.100525 | 0.041182 | 0.015644 | 0.020858 | 0.574007 | 0.518853 | 0.458484 | 0.391496 | 0.334336 | 0.324041 | 0 | 0.00245 | 0.241318 | 23,670 | 672 | 160 | 35.223214 | 0.830493 | 0.075792 | 0 | 0.323045 | 0 | 0 | 0.039316 | 0.021583 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0.004115 | 0.026749 | 0.018519 | 0.257202 | 0.00823 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da0fb1e76df7bb263d04fdeb069e451fb04e547a | 2,756 | py | Python | pp/components/ring_single.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 8 | 2020-08-25T11:25:18.000Z | 2022-03-27T11:32:11.000Z | pp/components/ring_single.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | null | null | null | pp/components/ring_single.py | flaport/gdsfactory | 1f2e844c1fe27b9c6340e2d51500fd3358fa16e5 | [
"MIT"
] | 1 | 2022-03-04T07:03:29.000Z | 2022-03-04T07:03:29.000Z | from typing import Callable
from pp.cell import cell
from pp.component import Component
from pp.components.bend_circular import bend_circular
from pp.components.coupler_ring import coupler_ring
from pp.components.waveguide import waveguide as waveguide_function
from pp.config import call_if_func
from pp.drc import assert_on_2nm_grid
@cell
def ring_single(
wg_width: float = 0.5,
gap: float = 0.2,
bend_radius: float = 10.0,
length_x: float = 4.0,
length_y: float = 0.001,
coupler: Callable = coupler_ring,
waveguide: Callable = waveguide_function,
bend: Callable = bend_circular,
pins: bool = False,
) -> Component:
"""Single bus ring made of a ring coupler (cb: bottom)
connected with two vertical waveguides (wl: left, wr: right)
two bends (bl, br) and horizontal waveguide (wg: top)
Args:
wg_width: waveguide width
gap: gap between for coupler
bend_radius: for the bend and coupler
length_x: ring coupler length
length_y: vertical waveguide length
coupler: ring coupler function
waveguide: waveguide function
bend: bend function
pins: add pins
.. code::
bl-wt-br
| |
wl wr length_y
| |
--==cb==-- gap
length_x
.. plot::
:include-source:
import pp
c = pp.c.ring_single(wg_width=0.5, gap=0.2, length_x=4, length_y=0.1, bend_radius=5)
pp.plotgds(c)
"""
bend_radius = float(bend_radius)
assert_on_2nm_grid(gap)
coupler = call_if_func(
coupler, gap=gap, wg_width=wg_width, bend_radius=bend_radius, length_x=length_x
)
waveguide_side = call_if_func(waveguide, width=wg_width, length=length_y)
waveguide_top = call_if_func(waveguide, width=wg_width, length=length_x)
bend_ref = bend(width=wg_width, radius=bend_radius) if callable(bend) else bend
c = Component()
cb = c << coupler
wl = c << waveguide_side
wr = c << waveguide_side
bl = c << bend_ref
br = c << bend_ref
wt = c << waveguide_top
wl.connect(port="E0", destination=cb.ports["N0"])
bl.connect(port="N0", destination=wl.ports["W0"])
wt.connect(port="W0", destination=bl.ports["W0"])
br.connect(port="N0", destination=wt.ports["E0"])
wr.connect(port="W0", destination=br.ports["W0"])
wr.connect(port="E0", destination=cb.ports["N1"]) # just for netlist
c.add_port("E0", port=cb.ports["E0"])
c.add_port("W0", port=cb.ports["W0"])
if pins:
pp.add_pins_to_references(c)
return c
if __name__ == "__main__":
import pp
c = ring_single()
cc = pp.add_pins(c)
# print(c.settings)
# print(c.get_settings())
pp.show(cc)
| 27.56 | 90 | 0.645501 | 400 | 2,756 | 4.2575 | 0.25 | 0.032883 | 0.023488 | 0.017616 | 0.086905 | 0.086905 | 0.050499 | 0.050499 | 0.050499 | 0 | 0 | 0.018634 | 0.240566 | 2,756 | 99 | 91 | 27.838384 | 0.795031 | 0.286647 | 0 | 0 | 0 | 0 | 0.021552 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 1 | 0.019608 | false | 0 | 0.176471 | 0 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1152e98be68574744964ed8c665a43ee954229 | 13,297 | py | Python | src/services/db/oracle.py | daesnorey/PPRJ | f826eb194f895d13522f61a51a5100a5cdbead99 | [
"Apache-2.0"
] | null | null | null | src/services/db/oracle.py | daesnorey/PPRJ | f826eb194f895d13522f61a51a5100a5cdbead99 | [
"Apache-2.0"
] | null | null | null | src/services/db/oracle.py | daesnorey/PPRJ | f826eb194f895d13522f61a51a5100a5cdbead99 | [
"Apache-2.0"
] | null | null | null | """oracle.py.
db_connection.py file will contain the connection behaviour
to the database
"""
import traceback
import random
import copy
import cx_Oracle
import json
from src.objects.third import Third
from src.services.db.db_types import DbTypes
class Oracle(object):
"""Oracle class will handle the conection to the database."""
def __init__(self):
"""Constructor."""
self.__data_base = None
self.__cursor = None
def __open(self, debug=False):
"""Connect to the database."""
username = 'pre_dnovoa'#'PPRJ'
password = 'w27XYfj5'
hostname = '127.0.0.1'
servicename = 'XE'
port = 1521
dsn_tns = cx_Oracle.makedsn(hostname, port, servicename)
if debug is True:
print(dsn_tns)
try:
self.__data_base = cx_Oracle.connect(username, password, dsn_tns)
except cx_Oracle.DatabaseError as e:
error, = e.args
if error.code == 1017:
print('Please check your credentials.')
# sys.exit()?
else:
print(e)
# Very important part!
raise
# If the database connection succeeded create the cursor
# we-re going to use.
self.__cursor = self.__data_base.cursor()
def __close(self):
if self.__data_base is not None:
self.__data_base.close()
self.__data_base = None
self.__cursor = None
def get_cursor(self):
"""Get cursor connection."""
if self.__cursor is None:
self.__open()
return self.__cursor
def execute(self, query, bindvars={}, commit=False, debug=False):
"""Execute query, return cursor."""
__noramalizate = self.normalize_query(query, bindvars)
__query = __noramalizate[0]
__bindvars = __noramalizate[1]
if debug:
print(query, bindvars)
print("*" * 10)
print(__query, __bindvars)
response = self.get_cursor().execute(__query, __bindvars)
if commit is True:
self.__data_base.commit()
return response
def normalize_query(self, query, bindvars):
"""Method normalize_query."""
if not bindvars or "." not in query:
return [query, bindvars]
new_bindvars = {}
for key in bindvars:
value = bindvars[key]
if DbTypes.exist(value):
continue
if "." in key:
new_key = self.get_condition_key(key)
new_bindvars[new_key] = value
query = query.replace(":" + key, ":" + new_key)
else:
new_bindvars[key] = value
return [query, new_bindvars]
def get_condition_key(self, key):
"""Method get_condition_key."""
dot = "."
new_key = ""
if dot in key:
new_key = str(random.choice('abcdefghij'))
new_key += str(random.randint(0, 1000))
new_key += key.split(dot)[1]
return new_key
def get_join_select(self, fields=None, conditions=None,
join_fields=None, *table):
"""Method get_query.
@param table: table name in database
@param fields: dictionary which contain the fields to affect.
@param condition: dictionary which contain the fields and
values to filter
"""
if not fields:
fields = []
if not conditions:
conditions = {}
if not join_fields:
join_fields = {}
__inst = self.get_join_instruction(fields, len(table), join_fields)
__inst += self.get_conditions(1, conditions)
query = __inst
for number in range(len(table)):
str_replace = ":table" + str(number)
__table = table[number].replace("l__", "")
__table = table[number].replace("r__", "")
query = query.replace(str_replace, table[number])
return query
def get_join_instruction(self, fields, n_tables=1, join=None):
"""get_instruction.
This method will evaluate the action and will return the right
instruction
"""
if not join:
join = []
__ini = "SELECT :fields FROM :table0"
if n_tables > 1:
for index in range(n_tables - 1):
to_join = join[index]
str_table = ":table" + str(index + 1)
str_join = ""
if to_join.startswith("l__"):
__ini += " LEFT JOIN "
elif to_join.startswith("r__"):
__ini += " RIGHT JOIN "
else:
__ini += " INNER JOIN "
__ini += str_table
print("to_join", to_join)
for field in to_join:
print("field", field)
if str_join:
str_join += " AND "
str_join += str_table + "." + field
str_join += "= :table0." + field
__ini += " ON " + str_join
__inst = ""
for field in fields:
if __inst:
__inst += ","
__inst += field
if not fields:
__inst = "*"
response = __ini.replace(":fields", __inst)
return response
def get_query(self, table, fields=None, conditions=None, action=1):
"""Method get_query.
@param table: table name in database
@param fields: dictionary which contain the fields to affect.
@param condition: dictionary which contain the fields and values to
filter
@param action: 0=INSERT, 1=SELECT, 2=UPDATE, 3=DELETE
"""
if not fields:
fields = []
if not conditions:
conditions = {}
__inst = self.get_instruction(action, fields)
__inst += self.get_conditions(action, conditions)
if action == 0:
__inst += " returning :return_id INTO :new_id"
query = __inst.replace(":table", table)
return query
def get_instruction(self, action, fields):
"""get_instruction.
This method will evaluate the action and will return the right
instruction
"""
__ini = ""
if action == 0:
__ini = "INSERT INTO :table (:fields) VALUES (:values)"
elif action == 1:
__ini = "SELECT :fields FROM :table"
elif action == 2:
__ini = "UPDATE :table SET :fields"
elif action == 3:
__ini = "DELETE FROM :table"
return __ini
__inst = ""
__values = ""
for field in fields:
try:
__type = fields[field].get("type")# if isinstance(fields[field], dict) else None
except:
__type = None
if __inst:
__inst += ","
__values += ","
if action == 0:
__inst += field
__values += "TO_DATE(:{0}, 'yyyy-MM-dd')".format(field) if __type == "date" else ":{}".format(field)
elif action == 2:
__inst += "{0}= TO_DATE(:{0}, 'yyyy-MM-dd')".format(field) if __type == "date" else "{0}=:{0}".format(field)
else:
__inst += field
__values += ":" + field
if not fields and action == 1:
__inst = "*"
response = __ini.replace(":fields", __inst).replace(":values",
__values)
return response
def get_conditions(self, action, conditions):
"""Method get_conditions.
this method will evaluate the action and the conditions
if the action is 0 or there are no conditions then it returns an empty
string
otherwise it return the right condition
"""
s_conditions = len(conditions)
if action == 0 or s_conditions == 0:
return ""
__condition = " WHERE "
__cond = ""
for condition in conditions:
try:
__type = conditions[condition].get("type")
except:
__type = None
__value = conditions[condition] if not __type else conditions[condition].get("value")
if not isinstance(__value, list):
__value = [__value]
for __val in __value:
if __cond:
__cond += " AND "
if DbTypes.exist(__val):
__sentence = DbTypes.get_sentence(__val)
if '{}' in __sentence:
__cond += __sentence.format(condition)
else:
__cond += condition + " " + __sentence
else:
__cond += "{0} = TO_DATE(:{0}, 'yyyy-MM-dd')".format(condition) if __type == "date" else "{0}=:{0}".format(condition)
__condition += __cond
return __condition
def save(self, table, generic_object, name_id):
"""Method save.
@attribute table
@attribute generic_object
@attribute name_id
"""
__fields = copy.copy(generic_object)
if name_id in __fields:
del __fields[name_id]
if isinstance(generic_object[name_id], dict):
id_object = generic_object[name_id]['value']
else:
id_object = generic_object[name_id]
else:
id_object = -1
response = {}
try:
response = dict(error=0, text="success")
if id_object > 0:
__condition = {name_id: id_object}
__update_query = self.get_query(table, __fields, __condition,
action=2)
for field in generic_object: generic_object[field] = generic_object[field].get("value")
print(__update_query)
self.execute(__update_query, generic_object, True)
else:
newest_id_wrapper = self.get_cursor().var(cx_Oracle.NUMBER)
__insert_query = self.get_query(table, fields=__fields, action=0)
for field in __fields: __fields[field] = __fields[field].get("value")
__fields["new_id"] = newest_id_wrapper
__insert_query = __insert_query.replace(":return_id", name_id)
print(__insert_query)
self.execute(__insert_query, __fields, True, False)
new_id = newest_id_wrapper.getvalue()
response["id"] = int(new_id)
except Exception as e:
formatted_lines = traceback.format_exc().splitlines()
print(formatted_lines[0])
print(formatted_lines[-1])
print(e)
response = dict(error=1, text="There was an error saving", desc_error=formatted_lines[-1])
return response
def delete(self, table, conditions):
"""Method delete.
@attribute table
@attribute name_id
@attribute id_object
"""
condition_size = len(conditions)
if condition_size == 0:
return dict(error=2, text="Data incomplete at delete")
__delete_query = self.get_query(table, conditions=conditions,
action=3)
response = {}
try:
self.execute(__delete_query, conditions, True)
response = dict(error=0, text="success")
except Exception:
response = dict(error=2, text="There was an error deleting")
return response
def search(self, **options):
table = options.get("table")
if not table:
raise Exception("fuck you")
tmp = {}
if isinstance(table, list):
pass
else:
query = self.get_instruction(1, {}).replace(":table", table)
fields = options.get("fields")
conditions = options.get("conditions")
class_object = options.get("class_object")
for field in fields:
nquery = "{} WHERE".format(query)
for condition in conditions:
if len(condition.strip()) == 0:
continue
nquery += " LOWER({}) LIKE LOWER('%{}%') OR".format(field, condition)
nquery = nquery.strip("OR").strip()
response = self.execute(nquery, {}, debug=False)
if not response:
continue
for row in response.fetchall():
id = row[0]
if not tmp.get(id):
tmp[id] = [row, 1]
else:
tmp[id][1] += 1
if class_object:
result = []
for key in tmp.keys():
item = class_object(tmp[key][0], tmp[key][1])
result.append(item)
result.sort(key=lambda x: x.w, reverse=True)
else:
result = tmp
return result
| 31.88729 | 137 | 0.518087 | 1,379 | 13,297 | 4.704859 | 0.166062 | 0.010789 | 0.012947 | 0.015413 | 0.183724 | 0.156134 | 0.120376 | 0.110512 | 0.083847 | 0.083847 | 0 | 0.009855 | 0.381891 | 13,297 | 416 | 138 | 31.963942 | 0.779535 | 0.106565 | 0 | 0.250883 | 0 | 0 | 0.063891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053004 | false | 0.010601 | 0.024735 | 0 | 0.137809 | 0.045936 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da11fc5980e78cefaeb92357886c125f256182a0 | 439 | py | Python | exercicio_py/ex0007_progressao_aritmetica/main_v1.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | exercicio_py/ex0007_progressao_aritmetica/main_v1.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | exercicio_py/ex0007_progressao_aritmetica/main_v1.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | ########
# autora: danielle8farias@gmail.com
# repositório: https://github.com/danielle8farias
# Descrição: Usuário informa o 1º termo de uma PA e sua razão. O programa retorna os 10 primeiros termos dessa PA.
########
A1 = int(input('Primeiro termo: '))
r = int(input('Razão: '))
i = 1
An = A1
while i < 11:
print(f'{An}', end=' -> ')
#fórmula da Progressão aritmética
An = A1 + i*r
#i = i + 1
i += 1
print('FIM')
| 23.105263 | 114 | 0.610478 | 65 | 439 | 4.123077 | 0.676923 | 0.022388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037791 | 0.216401 | 439 | 18 | 115 | 24.388889 | 0.741279 | 0.537585 | 0 | 0 | 0 | 0 | 0.187845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da12532f996b1734f9456dcccabecc881b1e321b | 2,165 | py | Python | rns/viz.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | rns/viz.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | rns/viz.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | import io
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from rns.constant import W, H
# Plotter functions
PLOT_FUNCS = {}
def register_plotter(func):
PLOT_FUNCS[func.__name__] = func
def func_wrapper(images, **conv_kwargs):
return func(images, **conv_kwargs)
return func_wrapper
def plot(mode, vals, FLAGS, itr=0, save=True, return_buf=False, show=False):
func = PLOT_FUNCS[mode]
path = func(vals, FLAGS, itr=itr)
buf = None
if save:
plt.savefig(path)
if return_buf:
buf = io.BytesIO()
plt.savefig(buf)
buf.seek(0)
if show:
plt.show()
plt.close()
return buf
@register_plotter
def arr(arr, FLAGS, itr=None):
plt.imshow(arr, cmap='binary')
@register_plotter
def in_out_vae(vals, FLAGS, itr=0):
vae_title = '{}-vae.png'.format(itr)
os.makedirs(FLAGS['plot_path'], exist_ok=True)
vae_path = os.path.join(FLAGS['plot_path'], vae_title)
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(vals['img1'])#, cmap='binary')
ax2.imshow(vals['img2'])#, cmap='binary')
return vae_path
@register_plotter
def contour(vals, FLAGS, itr=0):
X, Y, Z, state = vals['X'], vals['Y'], vals['Z'], vals['state']
prob_title = '{}-prob.png'.format(itr)
os.makedirs(FLAGS['plot_path'], exist_ok=True)
prob_path = os.path.join(FLAGS['plot_path'], prob_title)
plt.contour(X,Y,Z[:,:,0])
plt.scatter(state[0,:,0], state[0,:,1])
plt.title(prob_title)
return prob_path
@register_plotter
def samples(vals, FLAGS, itr=0):
samples = vals['samples']
sample_title = '{}-sample.png'.format(itr)
sample_path = os.path.join(FLAGS['plot_path'], sample_title)
sns.jointplot(samples[:,0,0], samples[:,0,1], kind='hex', color='#4cb391', xlim=(-1.0,1.0), ylim=(-1.0,1.0))
return sample_path
@register_plotter
def shapes(vals, FLAGS, itr=None):
dg = vals['dg']
ax = plt.gca(aspect='equal', xlim=W, ylim=H)
rect = mpatches.Rectangle((0,0), W, H, color='C0')
ax.add_patch(rect)
objs = dg.__next__()
for o in objs['shapes']:
o.plot(ax)
| 27.0625 | 112 | 0.639261 | 332 | 2,165 | 4.03012 | 0.298193 | 0.041854 | 0.053812 | 0.038864 | 0.168161 | 0.129297 | 0.129297 | 0.068759 | 0.068759 | 0.068759 | 0 | 0.021155 | 0.192148 | 2,165 | 79 | 113 | 27.405063 | 0.743854 | 0.022633 | 0 | 0.109375 | 0 | 0 | 0.063003 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0.015625 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da160107a31d4d4dd133e4fba3b2b4c6286bd983 | 2,955 | py | Python | pycurb/time_rule.py | azavea/PyCurb | 9492ca40b0639680b73aa7bdfcf9f744f9e75727 | [
"Apache-2.0"
] | null | null | null | pycurb/time_rule.py | azavea/PyCurb | 9492ca40b0639680b73aa7bdfcf9f744f9e75727 | [
"Apache-2.0"
] | 8 | 2020-09-30T17:15:50.000Z | 2020-10-23T21:00:53.000Z | pycurb/time_rule.py | azavea/PyCurb | 9492ca40b0639680b73aa7bdfcf9f744f9e75727 | [
"Apache-2.0"
] | null | null | null | from abc import ABC
from pycurb.utils import (parse_date, parse_day_of_month, parse_day_of_week,
parse_occurrence, parse_time)
class TimeRule(ABC):
pass
class DaysOfWeek(TimeRule):
def __init__(self, days, occurences_in_month=None):
if isinstance(days, str):
days = [days]
self.days = [parse_day_of_week(day) for day in days]
self.occurences_in_month = None
if occurences_in_month:
self.occurences_in_month = [
parse_occurrence(o) for o in occurences_in_month
]
@staticmethod
def from_dict(d):
return DaysOfWeek(d['days'])
def to_dict(self):
return {'days': self.days}
class DaysOfMonth(TimeRule):
def __init__(self, days):
if isinstance(days, 'str'):
days = [days]
self.days = [parse_day_of_month(day) for day in days]
@staticmethod
def from_dict(d):
return DaysOfMonth(d['days'])
def to_dict(self):
return {'days': self.days}
class DesignatedPeriod(TimeRule):
def __init__(self, name, apply):
self.name = name
apply = apply.lower()
self.apply = None
if apply in ('except during', 'only during'):
self.apply = apply
@staticmethod
def from_dict(d):
return DesignatedPeriod(d['name'], d['apply'])
def to_dict(self):
d = {'name': self.name}
if self.apply:
d['apply'] = self.apply
return d
class EffectiveDates(TimeRule):
def __init__(self, date_from, date_to):
self.date_from = parse_date(date_from)
self.date_to = parse_date(date_to)
self.year = False
if len(date_from.split('-')) > 2 and len(date_to.split('-')) > 2:
self.year = True
@staticmethod
def from_dict(d):
return EffectiveDates(d['from'], d['to'])
def to_dict(self):
d = {
'from': '{}-{}'.format(self.date_from.month, self.date_from.day),
'to': '{}-{}'.format(self.date_to.month, self.date_to.day)
}
if self.year:
d['from'] = '{}-'.format(self.date_from.year) + d['from']
d['to'] = '{}-'.format(self.date_to.year) + d['to']
return d
class TimeOfDay(TimeRule):
def __init__(self, time_from, time_to):
self.time_from = parse_time(time_from)
self.time_to = parse_time(time_to)
def is_equal(self, time_of_day):
return self.to_dict() == time_of_day.to_dict()
@staticmethod
def from_dict(d):
return TimeOfDay(d['from'], d['to'])
def to_dict(self):
st_h = str(self.time_from.hour).zfill(2)
st_m = str(self.time_from.minute).zfill(2)
en_h = str(self.time_to.hour).zfill(2)
en_m = str(self.time_to.minute).zfill(2)
return {
'from': '{}:{}'.format(st_h, st_m),
'to': '{}:{}'.format(en_h, en_m)
}
| 26.863636 | 77 | 0.575973 | 394 | 2,955 | 4.068528 | 0.154822 | 0.044916 | 0.046787 | 0.059264 | 0.361822 | 0.25577 | 0.1335 | 0.1335 | 0.107299 | 0.107299 | 0 | 0.002844 | 0.285956 | 2,955 | 109 | 78 | 27.110092 | 0.756872 | 0 | 0 | 0.256098 | 0 | 0 | 0.042301 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.195122 | false | 0.012195 | 0.02439 | 0.097561 | 0.426829 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da170ec47cebeb13d6c068d32835dcf9ac0425e1 | 2,653 | py | Python | amrlib/models/parse_gsii/vocabs.py | plandes/amrlib | c31f92f05a265362367eea85f512e54030860147 | [
"MIT"
] | 103 | 2020-09-04T07:21:09.000Z | 2022-03-31T23:06:41.000Z | amrlib/models/parse_gsii/vocabs.py | plandes/amrlib | c31f92f05a265362367eea85f512e54030860147 | [
"MIT"
] | 39 | 2020-09-03T14:26:22.000Z | 2022-03-08T20:18:59.000Z | amrlib/models/parse_gsii/vocabs.py | plandes/amrlib | c31f92f05a265362367eea85f512e54030860147 | [
"MIT"
] | 19 | 2020-09-30T12:15:08.000Z | 2022-02-18T18:15:31.000Z | import os
PAD, UNK, DUM, NIL, END, CLS = '<PAD>', '<UNK>', '<DUMMY>', '<NULL>', '<END>', '<CLS>'
# Note: for the function that saves the vocabs, see create_vocabs.py
def get_vocabs(vocab_dir):
vocabs = dict()
vocabs['tok'] = Vocab(os.path.join(vocab_dir, 'tok_vocab'), 5, [CLS])
vocabs['lem'] = Vocab(os.path.join(vocab_dir, 'lem_vocab'), 5, [CLS])
vocabs['pos'] = Vocab(os.path.join(vocab_dir, 'pos_vocab'), 5, [CLS])
vocabs['ner'] = Vocab(os.path.join(vocab_dir, 'ner_vocab'), 5, [CLS])
vocabs['predictable_concept'] = Vocab(os.path.join(vocab_dir, 'predictable_concept_vocab'), 5, [DUM, END])
vocabs['concept'] = Vocab(os.path.join(vocab_dir, 'concept_vocab'), 5, [DUM, END])
vocabs['rel'] = Vocab(os.path.join(vocab_dir, 'rel_vocab'), 50, [NIL])
vocabs['word_char'] = Vocab(os.path.join(vocab_dir, 'word_char_vocab'), 100, [CLS, END])
vocabs['concept_char'] = Vocab(os.path.join(vocab_dir, 'concept_char_vocab'), 100, [CLS, END])
return vocabs
class Vocab(object):
def __init__(self, filename, min_occur_cnt, specials = None):
idx2token = [PAD, UNK] + (specials if specials is not None else [])
self._priority = dict()
num_tot_tokens = 0
num_vocab_tokens = 0
with open(filename) as f:
lines = f.readlines()
for line in lines:
try:
token, cnt = line.rstrip('\n').split('\t')
cnt = int(cnt)
num_tot_tokens += cnt
except:
print(line)
if cnt >= min_occur_cnt:
idx2token.append(token)
num_vocab_tokens += cnt
self._priority[token] = int(cnt)
self.coverage = num_vocab_tokens/num_tot_tokens
self._token2idx = dict(zip(idx2token, range(len(idx2token))))
self._idx2token = idx2token
self._padding_idx = self._token2idx[PAD]
self._unk_idx = self._token2idx[UNK]
def priority(self, x):
return self._priority.get(x, 0)
@property
def size(self):
return len(self._idx2token)
@property
def unk_idx(self):
return self._unk_idx
@property
def padding_idx(self):
return self._padding_idx
def idx2token(self, x):
if isinstance(x, list):
return [self.idx2token(i) for i in x]
return self._idx2token[x]
def token2idx(self, x):
if isinstance(x, list):
return [self.token2idx(i) for i in x]
return self._token2idx.get(x, self.unk_idx)
| 37.366197 | 110 | 0.574821 | 344 | 2,653 | 4.235465 | 0.264535 | 0.054907 | 0.067948 | 0.092656 | 0.289636 | 0.264928 | 0.156486 | 0.043926 | 0 | 0 | 0 | 0.017433 | 0.286468 | 2,653 | 70 | 111 | 37.9 | 0.752245 | 0.024878 | 0 | 0.087719 | 0 | 0 | 0.083172 | 0.009671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140351 | false | 0 | 0.017544 | 0.070175 | 0.333333 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1752aa56e3a9f32b692a7cdcc8c12c9105eaac | 1,139 | py | Python | MAR2020/MakingChange.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | MAR2020/MakingChange.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | MAR2020/MakingChange.py | dexterchan/DailyChallenge | 1f38dc3b22983835836a84d6281777d8e20fce7a | [
"Apache-2.0"
] | null | null | null | #Given a list of possible coins in cents, and an amount (in cents) n,
# return the minimum number of coins needed to create the amount n.
# If it is not possible to create the amount using the given coin denomination, return None.
#Here's an example and some starter code:
#ANalysis, sort the list of possible coins O(nlogn) from largest to smallest
#for each cent,
#divide amount by cent value = d, if d >= 1
# amt = amt - d*cent value
# store cent value to list
# iterate for next cent
# at end of list
# if amt > 0 , return None
def make_change(coins, n):
# Fill this in.
lst = []
coinsLst = sorted(coins, reverse=True)
amt = n
for c in coinsLst:
d = amt // c
amt = amt - d * c
for i in range(d):
lst.append(str(c))
if amt > 0:
return None
else:
result = "%d coins (%s)"%(len(lst), "+".join((lst)))
return result
if __name__ == "__main__":
print(make_change([1, 5, 10, 25], 36))
# 3 coins (25 + 10 + 1)
print(make_change([1, 5, 10, 25], 30))
# 2 coins (25 + 5)
print(make_change([1, 5, 10, 25], 27))
# 2 coins (25 + 1 + 1) | 28.475 | 92 | 0.603161 | 191 | 1,139 | 3.534031 | 0.445026 | 0.059259 | 0.066667 | 0.071111 | 0.140741 | 0.093333 | 0.093333 | 0 | 0 | 0 | 0 | 0.050971 | 0.276558 | 1,139 | 40 | 93 | 28.475 | 0.768204 | 0.510975 | 0 | 0 | 0 | 0 | 0.04059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1ae3572abdad23c9e302bb355fe093cb9ac8e8 | 1,981 | py | Python | graphid/util/util_grabdata.py | Erotemic/graphid | 5d04c2eec609f135464a921ba03d9578fa6e22fd | [
"Apache-2.0"
] | 4 | 2019-03-04T02:49:26.000Z | 2021-10-06T00:51:13.000Z | graphid/util/util_grabdata.py | Erotemic/graphid | 5d04c2eec609f135464a921ba03d9578fa6e22fd | [
"Apache-2.0"
] | 1 | 2019-02-15T23:42:26.000Z | 2019-02-15T23:42:26.000Z | graphid/util/util_grabdata.py | Erotemic/graphid | 5d04c2eec609f135464a921ba03d9578fa6e22fd | [
"Apache-2.0"
] | null | null | null | import ubelt as ub
from os.path import exists # NOQA
TESTIMG_URL_DICT = {
'astro.png' : 'https://i.imgur.com/KXhKM72.png', # Use instead of
'carl.jpg' : 'http://i.imgur.com/flTHWFD.jpg',
'grace.jpg' : 'http://i.imgur.com/rgQyu7r.jpg',
'jeff.png' : 'http://i.imgur.com/l00rECD.png',
'ada2.jpg' : 'http://i.imgur.com/zHOpTCb.jpg',
'ada.jpg' : 'http://i.imgur.com/iXNf4Me.jpg',
'easy1.png' : 'http://i.imgur.com/Qqd0VNq.png',
'easy2.png' : 'http://i.imgur.com/BDP8MIu.png',
'easy3.png' : 'http://i.imgur.com/zBcm5mS.png',
'hard3.png' : 'http://i.imgur.com/ST91yBf.png',
'zebra.png' : 'http://i.imgur.com/58hbGcd.png',
'star.png' : 'http://i.imgur.com/d2FHuIU.png',
'patsy.jpg' : 'http://i.imgur.com/C1lNRfT.jpg',
}
def grab_test_imgpath(key='astro.png', allow_external=True, verbose=True):
"""
Gets paths to standard / fun test images.
Downloads them if they dont exits
Args:
key (str): one of the standard test images, e.g. astro.png, carl.jpg, ...
allow_external (bool): if True you can specify existing fpaths
Returns:
str: testimg_fpath - filepath to the downloaded or cached test image.
Example:
>>> testimg_fpath = grab_test_imgpath('carl.jpg')
>>> assert exists(testimg_fpath)
"""
if allow_external and key not in TESTIMG_URL_DICT:
testimg_fpath = key
if not exists(testimg_fpath):
raise AssertionError(
'testimg_fpath={!r} not found did you mean on of {!r}' % (
testimg_fpath, sorted(TESTIMG_URL_DICT.keys())))
else:
testimg_fname = key
testimg_url = TESTIMG_URL_DICT[key]
testimg_fpath = ub.grabdata(testimg_url, fname=testimg_fname, verbose=verbose)
return testimg_fpath
if __name__ == '__main__':
"""
CommandLine:
python -m graphid.util.util_grabdata all
"""
import xdoctest
xdoctest.doctest_module(__file__)
| 34.754386 | 86 | 0.627461 | 273 | 1,981 | 4.399267 | 0.432234 | 0.064946 | 0.097419 | 0.129892 | 0.159867 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01297 | 0.221605 | 1,981 | 56 | 87 | 35.375 | 0.765888 | 0.218576 | 0 | 0 | 0 | 0 | 0.40268 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 1 | 0.03125 | false | 0 | 0.09375 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1d6a02eeb844897d0b4f2d15640a391973f96d | 1,971 | py | Python | ENotePadAlgorithm/strEncrypt/Morse.py | xioacd99/EnhancedNotePad | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | [
"MIT"
] | null | null | null | ENotePadAlgorithm/strEncrypt/Morse.py | xioacd99/EnhancedNotePad | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | [
"MIT"
] | null | null | null | ENotePadAlgorithm/strEncrypt/Morse.py | xioacd99/EnhancedNotePad | b95da1c4d957061ad60015f3b9ab5c445b5a1bc4 | [
"MIT"
] | null | null | null | # encode时会将非ANSII字符变为空格
# decode时会跳过非ANSII字符
# 摩斯电码加密的字符只有字符,数字,标点,不区分大小写
class MorseCoder:
def __init__(self):
self.encode_alphabet = {"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", # 加密对照表
"E": ".", "F": "..-.", "G": "--.", "H": "....",
"I": "..", "J": ".---", "K": "-.-", "L": ".-..",
"M": "--", "N": "-.", "O": "---", "P": ".--.",
"Q": "--.-", "R": ".-.", "S": "...", "T": "-",
"U": "..-", "V": "...-", "W": ".--", "X": "-..-",
"Y": "-.--", "Z": "--..",
"1": ".---", "2": "..---", "3": "...--", "4": "....-",
"5": ".....", "6": "-....", "7": "--...", "8": "---..",
"9": "----.", "0": "-----",
"(": ".--.-", "-": "-....-", "?": "..--..", "/": "-..-.",
".": ".-.-.-", "@": ".--.-."
}
def encode(self, plaintext):
"""Encode AscII chars in plaintext to morse code"""
charList = list(plaintext.upper())
morsecodeList = \
[self.encode_alphabet[char] if char in self.encode_alphabet.keys() else " " for char in charList]
return " ".join(morsecodeList)
def decode(self, morsecode):
morsecodeList = morsecode.split(" ")
charList = \
[self.decode_alphabet[char] if char in self.decode_alphabet.keys() else char for char in morsecodeList]
return "".join(charList)
def get_encode_alphabet(self):
return self.encode_alphabet
def get_decode_alphabet(self):
return self.decode_alphabet
def strEncrypt(self, msg):
return self.encode(msg)
if __name__ == '__main__':
test = MorseCoder()
result = test.strEncrypt('ABCD12345678')
print(result)
| 41.0625 | 115 | 0.367326 | 152 | 1,971 | 4.611842 | 0.526316 | 0.071327 | 0.10271 | 0.051355 | 0.068474 | 0.068474 | 0 | 0 | 0 | 0 | 0 | 0.014377 | 0.364789 | 1,971 | 47 | 116 | 41.93617 | 0.545527 | 0.060883 | 0 | 0 | 0 | 0 | 0.124864 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171429 | false | 0 | 0 | 0.085714 | 0.342857 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da1fedbc0ae28396d7b17794e490d8e258826958 | 4,800 | py | Python | Yolov3_deepsort/Badminton_Service/player.py | Haosam/BadmintonAI | 4a1e837109cd279fb7480b90b31003c259e063cf | [
"Apache-2.0"
] | null | null | null | Yolov3_deepsort/Badminton_Service/player.py | Haosam/BadmintonAI | 4a1e837109cd279fb7480b90b31003c259e063cf | [
"Apache-2.0"
] | null | null | null | Yolov3_deepsort/Badminton_Service/player.py | Haosam/BadmintonAI | 4a1e837109cd279fb7480b90b31003c259e063cf | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import cv2
# Global Variables, can be translated to database if it becomes production
lcw = "Lee Chong Wei"
swh = "Son Wan Ho"
lyd = "Lee Yong Dae"
kgj = "Kim Gi Jung"
ksh = "Ko Sung Hyun"
yys = "Yo Yeon Seong"
csg = "Choi Sol Gyu"
wcl = "Wang Chi-Lin"
chl = "Chen Hung-Lin"
lcw_height = 1.72
swh_height = 1.77
lyd_height = 1.76
kkj_height = 1.79
ksh_height = 1.79
yys_height = 1.81
csg_height = 1.81
wcl_height = 1.86
chl_height = 1.77
################################################################################
player_names1 = ["Player 1",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names2 = ["Player 2",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names3 = ["Player 3",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_names4 = ["Player 4",lcw,swh,lyd,kgj,ksh,yys,csg,wcl,chl]
player_heights = [lcw_height,swh_height,lyd_height,kkj_height,ksh_height,yys_height,csg_height]
#################################################################################
def player_main():
print("If no player is present, please at least select None")
def callback1(selection):
global name_1, height_1
name_1 = selection
height_1 = playercheck(selection)
return(name_1, height_1)
def callback2(selection):
global name_2, height_2
name_2 = selection
height_2 = playercheck(selection)
return(name_1, height_1)
def callback3(selection):
global name_3, height_3
name_3 = selection
height_3 = playercheck(selection)
return(name_3, height_3)
def callback4(selection):
global name_4, height_4
name_4 = selection
height_4 = playercheck(selection)
return(name_4, height_4)
def playercheck(selection):
if selection == "Lee Chong Wei":
return lcw_height
elif selection == "Son Wan Ho":
return swh_height
elif selection == "Lee Yong Dae":
return swh_height
elif selection == "Kim Gi Jung":
return kkj_height
elif selection == "Ko Sung Hyun":
return ksh_height
elif selection == "Yo Yeon Seong":
return yys_height
elif selection == "Choi Sol Gyu":
return csg_height
elif selection == "Wang Chi-Lin":
return wcl_height
elif selection == "Chen Hung-Lin":
return chl_height
elif "None" or "Select Player" or "Player 1" or "Player 2" or "Player 3" or "Player 4":
return 1
else:
return 1
def playerselection():
window = Tk()
window.geometry('400x400')
window.title("Player Selection")
label1 = Label(window, text="Player 1: ")
label1.config(width=10, font=('Helvetica', 10))
label2 = Label(window, text="Player 2: ")
label2.config(width=10, font=('Helvetica', 10))
label3 = Label(window, text="Player 3: ")
label3.config(width=10, font=('Helvetica', 10))
label4 = Label(window, text="Player 4: ")
label4.config(width=10, font=('Helvetica', 10))
label5 = Label(window, text="If no player is present,")
label6 = Label(window, text=", please at least select None")
label1.grid(row=0,column=0)
label2.grid(row=1,column=0)
label3.grid(row=2,column=0)
label4.grid(row=3,column=0)
label5.grid(row=8,column=0)
label6.grid(row=8,column=1)
clicked1 = StringVar()
clicked1.set("Select Player")
clicked2 = StringVar()
clicked2.set("Select Player")
clicked3 = StringVar()
clicked3.set("Select Player")
clicked4 = StringVar()
clicked4.set("Select Player")
drop1 = OptionMenu(window, clicked1, *player_names1, command=callback1)
drop1.config(width=20, font=('Helvetica', 10))
drop2 = OptionMenu(window, clicked2, *player_names2, command=callback2)
drop2.config(width=20, font=('Helvetica', 10))
drop3 = OptionMenu(window, clicked3, *player_names3, command=callback3)
drop3.config(width=20, font=('Helvetica', 10))
drop4 = OptionMenu(window, clicked4, *player_names4, command=callback4)
drop4.config(width=20, font=('Helvetica', 10))
drop1.grid(row=0,column=1)
drop2.grid(row=1,column=1)
drop3.grid(row=2,column=1)
drop4.grid(row=3,column=1)
labelTest1 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest1.grid(row=4,column=1)
labelTest2 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest2.grid(row=5,column=1)
labelTest3 = Label(text="", font=('Helvetica', 8 ), fg='red')
labelTest3.grid(row=6,column=1)
labelTest4 = Label(text="", font=('Helvetica', 8), fg='red')
labelTest4.grid(row=7,column=1)
window.mainloop()
playerselection()
return(name_1,height_1,name_2,height_2,name_3,height_3,name_4,height_4)
# print(name_1,height_1,",", name_2,height_2,",",name_3,height_3,",",name_4,height_4)
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
player_main()
print(name_1)
print(name_2)
print(name_3)
print(name_4) | 33.103448 | 96 | 0.660417 | 692 | 4,800 | 4.453757 | 0.208092 | 0.034069 | 0.049319 | 0.019468 | 0.27255 | 0.209604 | 0.136924 | 0.100584 | 0.073978 | 0.073978 | 0 | 0.055221 | 0.17 | 4,800 | 145 | 97 | 33.103448 | 0.718373 | 0.043333 | 0 | 0.047244 | 0 | 0 | 0.15056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055118 | false | 0 | 0.015748 | 0 | 0.15748 | 0.03937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2374aced1b72eebcc58d79ed22779475feb324 | 4,749 | py | Python | scripts/install.py | discord-package-bot/discord-package-bot | 109603c57a668d75f6939e3c97aae72f2691640e | [
"MIT"
] | 1 | 2021-07-12T05:56:00.000Z | 2021-07-12T05:56:00.000Z | scripts/install.py | discord-package-bot/discord-package-bot | 109603c57a668d75f6939e3c97aae72f2691640e | [
"MIT"
] | null | null | null | scripts/install.py | discord-package-bot/discord-package-bot | 109603c57a668d75f6939e3c97aae72f2691640e | [
"MIT"
] | null | null | null | """
syntax: |
install <パッケージ>
install update:<パッケージ>
install file:<ファイル>
syntax_description: |
パッケージ: インストールするパッケージ。update:をつけると、パッケージが更新されます。
ファイル: エクスポートしたファイルのパス。
---
パッケージをインストールします。
"""
import os
import re
import requests
import shlex
import shutil
import subprocess
import sys
import yaml
import zipfile
from colorama import Fore, Style # , Back
from .utils import command, token
def get_info(repo):
resp = None
repo_data = requests.get(
f"https://api.github.com/repos/{repo}",
headers={"authorization": token.github_token},
)
if repo_data.status_code != 200:
print(Fore.RED + f"パッケージ{repo}が見付かりませんでした。" + Fore.RESET)
return False
resp = requests.get(f"https://raw.githubusercontent.com/{repo}/dpb/dpb.yml")
if resp.status_code == 200:
branch = "dpb"
else:
branch = repo_data.json()["default_branch"]
resp = requests.get(
f"https://raw.githubusercontent.com/{repo}/{branch}/dpb.yml"
)
if resp.status_code != 200:
print(Fore.RED + f"{repo}の情報を取得できませんでした。" + Fore.RESET)
return False
print(Fore.GREEN + f"{repo}の情報を取得しました。" + Fore.RESET)
info = yaml.safe_load(resp.text)
print(Fore.CYAN + f"{repo}の情報" + Fore.RESET)
print(f"名前: {info['name']}")
print(f"作者: {repo.split('/')[0]}")
info["branch"] = branch
return info
def download_repo(repo, info):
if os.path.exists("./savedata/delete-install-tmp"):
try:
subprocess.run(shlex.split("rm -rf ./.install-tmp"))
except PermissionError:
print(Fore.RED + "展開先が使用中のため、インストール出来ませんでした。" + Fore.RESET)
sys.exit(1)
except FileNotFoundError:
os.unlink("./savedata/delete-install-tmp")
else:
os.unlink("./savedata/delete-install-tmp")
print(Fore.LIGHTBLACK_EX + f"{info['name']}をダウンロードしています..." + Fore.RESET)
with requests.get(
f"https://github.com/{repo}/archive/refs/heads/{info['branch']}.zip",
stream=True,
) as r:
with open(".install-tmp.zip", "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
with zipfile.ZipFile(".install-tmp.zip") as existing_zip:
existing_zip.extractall(".install-tmp")
print(Fore.LIGHTBLACK_EX + "インストールしています..." + Fore.RESET)
zip_dir = repo.split("/")[1] + "-" + info["branch"]
shutil.copytree(f"./.install-tmp/{zip_dir}", f"./packages/{repo.replace('/', '@')}")
if info["requirements"] is not None and os.path.exists(
f"./.install-tmp/{zip_dir}/"
+ (info.get("requirements", None) or "dpb_requirements.txt")
):
with open(f"./.install-tmp/{zip_dir}/{info['requirements']}", "r") as f:
requirements = re.sub(r"#.*|\n{2,}", "", f.read())
with open("./savedata/package_requirements.txt", "a") as f:
f.write(f"#!==={repo}===!\n" + requirements.strip() + "\n")
subprocess.run(
shlex.split(command.pip + "install -r ./savedata/package_requirements.txt"),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
try:
subprocess.run(shlex.split("rm -rf ./.install-tmp .install-tmp.zip"))
except PermissionError:
with open("./savedata/delete-install-tmp", "w"):
pass
print(Fore.GREEN + "インストールが完了しました。" + Fore.RESET)
def main():
if len(sys.argv) <= 2:
repos = input("インストールするパッケージを○○/○○で入力して下さい。")
elif sys.argv[2].startswith("file:"):
try:
os.chdir("..")
with open(sys.argv[2][5:]) as f:
repos = re.sub(r"#.*|\n{2,}", "", f.read()).replace("\n", " ")
os.chdir(".main")
except FileNotFoundError:
print(Fore.RED + "ファイルが見付かりませんでした。" + Fore.RESET)
sys.exit(1)
else:
repos = " ".join(sys.argv[2:])
for repo in repos.split():
if os.path.exists(
f"./packages/{repo.replace('/', '@')}"
) and not repo.startswith("update:"):
with open(f"./packages/{repo.replace('/', '@')}/dpb.yml") as f:
info = yaml.safe_load(f)
print(
f"{Fore.RED}パッケージ {Style.BRIGHT}{info['name']}({repo}){Style.NORMAL}はすでにインストールされています。{Fore.RESET}\n"
f"{Fore.CYAN}アップデートするには {Style.BRIGHT}dpb install update:{repo}{Style.NORMAL} を実行して下さい。{Fore.RESET}"
)
continue
if repo.startswith("update:"):
repo = repo.replace("update:", "")
subprocess.run(shlex.split(f"rm -rf ./packages/{repo.replace('/', '@')}"))
info = get_info(repo)
if info is False:
continue
download_repo(repo, info)
| 34.664234 | 116 | 0.579912 | 576 | 4,749 | 4.739583 | 0.28125 | 0.047619 | 0.028571 | 0.024908 | 0.18315 | 0.164469 | 0.106593 | 0.064469 | 0.064469 | 0 | 0 | 0.006683 | 0.243841 | 4,749 | 136 | 117 | 34.919118 | 0.752437 | 0.042114 | 0 | 0.159292 | 0 | 0.017699 | 0.299934 | 0.148646 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026549 | false | 0.00885 | 0.097345 | 0 | 0.150442 | 0.106195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da24c7b3d7ea12e45a63e2df57343289b27d952a | 1,565 | py | Python | rlkeras/utils/memory.py | will-hcau/rlkeras | 9cc36b238dae794197fcb8689a5a1ffa1c0a42c0 | [
"MIT"
] | null | null | null | rlkeras/utils/memory.py | will-hcau/rlkeras | 9cc36b238dae794197fcb8689a5a1ffa1c0a42c0 | [
"MIT"
] | null | null | null | rlkeras/utils/memory.py | will-hcau/rlkeras | 9cc36b238dae794197fcb8689a5a1ffa1c0a42c0 | [
"MIT"
] | null | null | null | from collections import deque
import numpy as np
import random
class RandomReplayBuffer(object):
"""Experience replay buffer that samples uniformly."""
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.buffer = deque(maxlen=buffer_size)
def __len__(self):
return len(self.buffer)
def append(self, state, action, reward, next_state, done):
""" Store transition into replay buffer "D"
Refering to the DQN paper (S, A, R, S t+1, terminate)
should be stored into a buffer with limited size.
When hitting the maximum size of buffer, the oldest
transition will be discard.
"""
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size, num_of_step=1):
""" Sampling
Random sample a minibatch from the replay buffer
"""
sample_data = []
sample_indices = np.random.random_integers(0, len(self.buffer) - num_of_step, size=batch_size)
for s in sample_indices:
n_state = []
n_action = []
n_reward = []
n_next_state = []
n_done = []
for n in range(num_of_step):
exp = self.buffer[s + n]
n_state.append(exp[0])
n_action.append(exp[1])
n_reward.append(exp[2])
n_next_state.append(exp[3])
n_done.append(exp[4])
sample_data.append((n_state, n_action, n_reward, n_next_state, n_done))
return sample_data | 28.454545 | 102 | 0.600639 | 207 | 1,565 | 4.318841 | 0.362319 | 0.0783 | 0.030201 | 0.044743 | 0.145414 | 0.145414 | 0.0783 | 0.0783 | 0.0783 | 0.0783 | 0 | 0.007353 | 0.304792 | 1,565 | 55 | 103 | 28.454545 | 0.814338 | 0.212141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.103448 | 0.034483 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da24d2c86b3410255d8a070349c1d9c6e890f449 | 6,335 | py | Python | PathPlanning/RRTStar/rrt_star.py | cmuehlbacher/PythonRobotics | c66fccc71c681387ff61b59554694b25399ca790 | [
"MIT"
] | 38 | 2019-12-08T12:26:04.000Z | 2022-03-06T11:29:08.000Z | PathPlanning/RRTStar/rrt_star.py | YoungGer/PythonRobotics | 9b8f2bd88a3d516d8deb473693661c1aea59fe68 | [
"MIT"
] | null | null | null | PathPlanning/RRTStar/rrt_star.py | YoungGer/PythonRobotics | 9b8f2bd88a3d516d8deb473693661c1aea59fe68 | [
"MIT"
] | 15 | 2020-02-12T15:57:28.000Z | 2021-08-28T07:39:18.000Z | """
Path planning Sample Code with RRT*
author: Atsushi Sakai(@Atsushi_twi)
"""
import copy
import math
import os
import sys
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.abspath(__file__)) +
"/../RRT/")
try:
from rrt import RRT
except ImportError:
raise
show_animation = True
class RRTStar(RRT):
"""
Class for RRT Star planning
"""
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
def __init__(self, start, goal, obstacle_list, rand_area,
expand_dis=0.5,
goal_sample_rate=20,
max_iter=500,
connect_circle_dist=50.0
):
super().__init__(start, goal, obstacle_list,
rand_area, expand_dis, goal_sample_rate, max_iter)
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Random Sampling Area [min,max]
"""
self.connect_circle_dist = connect_circle_dist
def planning(self, animation=True, search_until_maxiter=True):
"""
rrt star path planning
animation: flag for animation on or off
search_until_maxiter: search until max iteration for path improving or not
"""
self.node_list = [self.start]
for i in range(self.max_iter):
rnd = self.get_random_point()
nearest_ind = self.get_nearest_list_index(self.node_list, rnd)
new_node = self.steer(rnd, self.node_list[nearest_ind])
if self.check_collision(new_node, self.obstacleList):
near_inds = self.find_near_nodes(new_node)
new_node = self.choose_parent(new_node, near_inds)
if new_node:
self.node_list.append(new_node)
self.rewire(new_node, near_inds)
if animation and i % 5 == 0:
self.draw_graph(rnd)
if not search_until_maxiter and new_node: # check reaching the goal
d, _ = self.calc_distance_and_angle(new_node, self.end)
if d <= self.expand_dis:
return self.generate_final_course(len(self.node_list) - 1)
print("reached max iteration")
last_index = self.search_best_goal_node()
if last_index:
return self.generate_final_course(last_index)
return None
def choose_parent(self, new_node, near_inds):
if not near_inds:
return None
# search nearest cost in near_inds
costs = []
for i in near_inds:
d, theta = self.calc_distance_and_angle(self.node_list[i], new_node)
if self.check_collision_extend(self.node_list[i], theta, d):
costs.append(self.node_list[i].cost + d)
else:
costs.append(float("inf")) # the cost of collision node
min_cost = min(costs)
if min_cost == float("inf"):
print("There is no good path.(min_cost is inf)")
return None
new_node.cost = min_cost
min_ind = near_inds[costs.index(min_cost)]
new_node.parent = self.node_list[min_ind]
return new_node
def search_best_goal_node(self):
dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]
goal_inds = [dist_to_goal_list.index(i) for i in dist_to_goal_list if i <= self.expand_dis]
if not goal_inds:
return None
min_cost = min([self.node_list[i].cost for i in goal_inds])
for i in goal_inds:
if self.node_list[i].cost == min_cost:
return i
return None
def find_near_nodes(self, new_node):
nnode = len(self.node_list) + 1
r = self.connect_circle_dist * math.sqrt((math.log(nnode) / nnode))
dist_list = [(node.x - new_node.x) ** 2 +
(node.y - new_node.y) ** 2 for node in self.node_list]
near_inds = [dist_list.index(i) for i in dist_list if i <= r ** 2]
return near_inds
def rewire(self, new_node, near_inds):
for i in near_inds:
near_node = self.node_list[i]
d, theta = self.calc_distance_and_angle(near_node, new_node)
new_cost = new_node.cost + d
if near_node.cost > new_cost:
if self.check_collision_extend(near_node, theta, d):
near_node.parent = new_node
near_node.cost = new_cost
self.propagate_cost_to_leaves(new_node)
def propagate_cost_to_leaves(self, parent_node):
for node in self.node_list:
if node.parent == parent_node:
d, _ = self.calc_distance_and_angle(parent_node, node)
node.cost = parent_node.cost + d
self.propagate_cost_to_leaves(node)
def check_collision_extend(self, near_node, theta, d):
tmp_node = copy.deepcopy(near_node)
for i in range(int(d / self.expand_dis)):
tmp_node.x += self.expand_dis * math.cos(theta)
tmp_node.y += self.expand_dis * math.sin(theta)
if not self.check_collision(tmp_node, self.obstacleList):
return False
return True
def main():
print("Start " + __file__)
# ====Search Path with RRT====
obstacle_list = [
(5, 5, 1),
(3, 6, 2),
(3, 8, 2),
(3, 10, 2),
(7, 5, 2),
(9, 5, 2)
] # [x,y,size(radius)]
# Set Initial parameters
rrt = RRTStar(start=[0, 0],
goal=[10, 10],
rand_area=[-2, 15],
obstacle_list=obstacle_list)
path = rrt.planning(animation=show_animation, search_until_maxiter=False)
if path is None:
print("Cannot find path")
else:
print("found path!!")
# Draw final path
if show_animation:
rrt.draw_graph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main()
| 30.311005 | 99 | 0.572691 | 857 | 6,335 | 3.968495 | 0.199533 | 0.047339 | 0.056454 | 0.022934 | 0.206116 | 0.0788 | 0.051749 | 0.022346 | 0 | 0 | 0 | 0.011762 | 0.328966 | 6,335 | 208 | 100 | 30.456731 | 0.788285 | 0.06693 | 0 | 0.065693 | 0 | 0 | 0.020982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072993 | false | 0 | 0.051095 | 0 | 0.226277 | 0.036496 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2652bdef2ea0254c65e10f3f8343f49c9b32ff | 745 | py | Python | test/commentProcessor_test.py | ponder-lab/GitHub-Issue-Mining | 5cff97bd2322894338c71f5ba7bd743e2e204a72 | [
"MIT"
] | 3 | 2021-04-18T04:07:35.000Z | 2021-12-25T06:35:32.000Z | test/commentProcessor_test.py | ponder-lab/GitHub-Issue-Classifier | 5cff97bd2322894338c71f5ba7bd743e2e204a72 | [
"MIT"
] | 4 | 2021-04-06T01:06:36.000Z | 2021-08-06T00:34:53.000Z | test/commentProcessor_test.py | ponder-lab/GitHub-Issue-Mining | 5cff97bd2322894338c71f5ba7bd743e2e204a72 | [
"MIT"
] | null | null | null | from utils.commentProcessor import processComment
TEST_CASES = [
{
"test": "Hello this is a pre processed string",
"expected_result": "hello pre processed string"
},
{
"test": "This string contains a screen name @y3pio tag",
"expected_result": "this string contains screen name SCREEN_NAME tag"
},
{
"test": "Testing this url string https://test.foo.com token",
"expected_result": "testing url string URL token"
},
{
"test": "> This line is a quote, should expect a single QUOTE token",
"expected_result": "QUOTE"
}
]
def test_comment_processor():
for TEST in TEST_CASES:
assert(processComment(TEST['test'])) == TEST['expected_result'] | 31.041667 | 77 | 0.628188 | 88 | 745 | 5.204545 | 0.431818 | 0.152838 | 0.078603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001808 | 0.257718 | 745 | 24 | 78 | 31.041667 | 0.826401 | 0 | 0 | 0 | 0 | 0 | 0.524129 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da26ac275ef766fda1ea905a5a0277b1855e977b | 7,275 | py | Python | plus_reader/plus_highlighting.py | ShashkovS/plus_reader | e53a7af01ae480f7a63e33d01a0a99ea681e7fee | [
"MIT"
] | 3 | 2017-11-27T10:01:42.000Z | 2018-05-07T09:37:24.000Z | plus_reader/plus_highlighting.py | ShashkovS/plus_reader | e53a7af01ae480f7a63e33d01a0a99ea681e7fee | [
"MIT"
] | 5 | 2017-09-28T09:53:13.000Z | 2017-11-25T20:10:00.000Z | plus_reader/plus_highlighting.py | ShashkovS/plus_reader | e53a7af01ae480f7a63e33d01a0a99ea681e7fee | [
"MIT"
] | 2 | 2017-09-14T11:56:07.000Z | 2017-09-14T12:49:46.000Z | import logging
import sys
import traceback
import numpy as np
from PyQt5.QtGui import QPixmap, QPainter, QMouseEvent
from PyQt5.QtWidgets import QApplication, QWidget, QGridLayout, QMenu, QSlider, QLabel
from PyQt5.QtCore import Qt
sys._excepthook = sys.excepthook
def excepthook(excType, excValue, tracebackobj):
traceback.print_tb(tracebackobj, excType, excValue)
sys.excepthook = excepthook
VIRTUAL_BORDER_WIDTH = 5
class Label(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
self.page = self.parentWidget()
self.p = None
def setPixmap(self, p):
self.p = p
def paintEvent(self, event):
if self.p:
painter = QPainter(self)
painter.setRenderHint(QPainter.SmoothPixmapTransform)
painter.drawPixmap(self.rect(), self.p)
def contextMenuEvent(self, QContextMenuEvent):
cmenu = QMenu(self)
positionx = QContextMenuEvent.x()
positiony = QContextMenuEvent.y()
im_pos_x, im_pos_y = list(
map(int, self.page.image.window_coords_to_image_coords(positionx, positiony, self.width(), self.height())))
logging.info(str(positionx) + ' ' + str(positiony) + ' -> ' + str(im_pos_x) + ' ' + str(im_pos_y))
min_vline_dist = min(abs(im_pos_x - vl) for vl in self.page.image.coords_of_vert_lns) if self.page.image.coords_of_vert_lns\
else float('inf')
min_hline_dist = min(abs(im_pos_y - vl) for vl in self.page.image.coords_of_horiz_lns) if self.page.image.coords_of_horiz_lns\
else float('inf')
self._actions = []
self._actions_objects = []
if min_hline_dist <= VIRTUAL_BORDER_WIDTH * 3:
DelHorAction = cmenu.addAction('Delete Horizontal line here')
self._actions.append('DelHorAction')
self._actions_objects.append(DelHorAction)
else:
AddHorAction = cmenu.addAction('Add Horizontal line here')
self._actions.append('AddHorAction')
self._actions_objects.append(AddHorAction)
if min_vline_dist <= VIRTUAL_BORDER_WIDTH * 3:
DelVertAction = cmenu.addAction('Delete Vertical line here')
self._actions.append('DelVertAction')
self._actions_objects.append(DelVertAction)
else:
AddVertAction = cmenu.addAction('Add Vertical line here')
self._actions.append('AddVertAction')
self._actions_objects.append(AddVertAction)
action = cmenu.exec_(self.mapToGlobal(QContextMenuEvent.pos()))
if action:
selected_action_index = self._actions_objects.index(action)
selected_action = self._actions[selected_action_index]
logging.info(str(selected_action))
# TODO работающих методов ещё нет поэтому этот кусок пока не нужен
method = getattr(self, selected_action)
method((im_pos_x, im_pos_y))
def AddHorAction(self, coords):
logging.info('ДОБАВИТЬ ГОРИЗОНТАЛЬ')
self.page.image.coords_of_horiz_lns.append(coords[1]) # TODO: Сделать бисектом
self.page.image.coords_of_horiz_lns.sort()
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def DelHorAction(self, coords):
logging.info('УДАЛИТЬ ГОРИЗОНТАЛЬ')
min_dist = float('inf')
min_line = float('inf')
for i in self.page.image.coords_of_horiz_lns:
dist = abs(i - coords[1])
if dist < min_dist:
min_dist = dist
min_line = i
self.page.image.coords_of_horiz_lns.remove(min_line)
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def DelVertAction(self, coords):
logging.info('УДАЛИТЬ ВЕРТИКАЛЬ')
min_dist = float('inf')
min_line = float('inf')
for i in self.page.image.coords_of_vert_lns:
dist = abs(i - coords[0])
if dist < min_dist:
min_dist = dist
min_line = i
self.page.image.coords_of_vert_lns.remove(min_line)
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def AddVertAction(self, coords):
logging.info('ДОБАВИТЬ ВЕРТИКАЛЬ')
self.page.image.coords_of_vert_lns.append(coords[0])
self.page.image.coords_of_vert_lns.sort()
self.page.image.find_filled_cells()
self.page.image.initial_mark_filled_cells()
self.page.reload_image()
def mousePressEvent(self, a0: QMouseEvent):
button_pressed = a0.button()
cursor_pos_x = int(a0.x())
cursor_pos_y = int(a0.y())
logging.info(str(cursor_pos_x) + ' ' + str(cursor_pos_y))
if button_pressed == 1:
cell_pos = self.page.image.coord_to_cell(cursor_pos_x, cursor_pos_y, self.width(), self.height())
if cell_pos:
self.page.image.toggle_highlight_cell(*cell_pos)
self.page.reload_image()
class ScannedPageWidget(QWidget):
def __init__(self, image):
super(ScannedPageWidget, self).__init__()
self.image = image
self.initUi()
def reload_image(self, *, update=True):
self.qp.loadFromData(self.image.to_bin())
self.lb.setPixmap(self.qp)
if update:
self.lb.update()
def initUi(self):
self.lay = QGridLayout(self)
self.lay.setSpacing(10)
self.lay.setContentsMargins(0, 0, 0, 0)
self.slide = QSlider(Qt.Horizontal, self)
self.slide.setFocusPolicy(Qt.NoFocus)
self.slide.setTickInterval(5)
self.slide.setMaximum(255)
self.slide.setMinimum(0)
self.slide.setTickPosition(QSlider.TicksBelow)
self.slide.setTickInterval(5)
self.slide.setValue(self.image.black_threshold)
self.slide.valueChanged.connect(self.sliderchange)
self.slide.sliderReleased.connect(self.valuechange)
self.lb = Label(self)
self.qp = QPixmap()
self.reload_image(update=False)
self.slval = QLabel(str(self.slide.sliderPosition()))
self.lay.addWidget(QLabel('Change B/W Threshold'), 0, 0)
self.lay.addWidget(self.slide, 0, 2)
self.lay.addWidget(self.slval, 0, 9)
self.lay.addWidget(self.lb, 1, 0, 10, 10)
self.setLayout(self.lay)
def sliderchange(self):
self.slval.setText(str(self.slide.sliderPosition()))
def valuechange(self):
self.image.black_threshold = self.slide.sliderPosition()
self.image.bitmap_lines_filled_cells_and_marking()
self.reload_image()
def show(image):
app = QApplication(sys.argv)
_, _, screen_w, screen_h = app.primaryScreen().availableGeometry().getRect()
img_scale = max(image.W / screen_w, image.H / screen_h)
w_height, w_width = int(image.H / img_scale), int(image.W / img_scale),
w = ScannedPageWidget(image)
w.resize(w_width, w_height)
w.show()
app.exec_()
def feature_qt(image_cls):
show(image_cls)
return image_cls.filled_cells
if __name__ == '__main__':
pass
| 36.742424 | 134 | 0.650034 | 908 | 7,275 | 4.976872 | 0.227974 | 0.051339 | 0.066165 | 0.050454 | 0.315114 | 0.258022 | 0.193406 | 0.154016 | 0.147378 | 0.1341 | 0 | 0.007063 | 0.240962 | 7,275 | 197 | 135 | 36.928934 | 0.8113 | 0.011959 | 0 | 0.177914 | 0 | 0 | 0.038274 | 0 | 0 | 0 | 0 | 0.005076 | 0 | 1 | 0.104294 | false | 0.006135 | 0.042945 | 0 | 0.165644 | 0.006135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da27bfe1b6414a6b5de205fb3cd12650ba9370f4 | 22,206 | py | Python | dlrnapi_client/shell.py | softwarefactory-project/dlrnapi_client | ad21fe759597968c0f691b37dc681232dcd8f2aa | [
"Apache-2.0"
] | 1 | 2017-10-02T19:36:52.000Z | 2017-10-02T19:36:52.000Z | dlrnapi_client/shell.py | softwarefactory-project/dlrnapi_client | ad21fe759597968c0f691b37dc681232dcd8f2aa | [
"Apache-2.0"
] | 4 | 2018-07-16T20:14:58.000Z | 2022-02-04T07:03:03.000Z | dlrnapi_client/shell.py | softwarefactory-project/dlrnapi_client | ad21fe759597968c0f691b37dc681232dcd8f2aa | [
"Apache-2.0"
] | 1 | 2019-12-09T14:40:47.000Z | 2019-12-09T14:40:47.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import json
import os
import sys
import dlrnapi_client
from dlrnapi_client.rest import ApiException
# Helper class to allow us to convert API response objects into JSON for output
class ResponseEncoder(json.JSONEncoder):
def default(self, obj):
# All the API response objects have a "swagger_types" attribute
if hasattr(obj, 'swagger_types'):
return obj.to_dict()
# Use the default encoder for anything else
return json.JSONEncoder.default(self, obj)
def get_last_tested_repo(api_instance, options):
params = dlrnapi_client.Params() # Params | The JSON params to post
params.max_age = options.max_age
if options.success:
params.success = str(options.success)
if options.component:
params.component = str(options.component)
params.job_id = options.job_id
params.sequential_mode = str(options.sequential)
params.previous_job_id = options.previous_job_id
try:
api_response = api_instance.api_last_tested_repo_get(params)
return api_response
except ApiException as e:
raise e
def post_last_tested_repo(api_instance, options):
params = dlrnapi_client.Params1() # Params1 | The JSON params to post
params.max_age = options.max_age
params.reporting_job_id = options.reporting_job_id
if options.success:
params.success = str(options.success)
if options.component:
params.component = str(options.component)
params.job_id = options.job_id
params.sequential_mode = str(options.sequential)
params.previous_job_id = options.previous_job_id
try:
api_response = api_instance.api_last_tested_repo_post(params)
return api_response
except ApiException as e:
raise e
def repo_status(api_instance, options):
params = dlrnapi_client.Params2() # Params2 | The JSON params to post
params.commit_hash = options.commit_hash
params.distro_hash = options.distro_hash
if options.success:
params.success = str(options.success)
if options.extended_hash and options.extended_hash != 'None':
params.extended_hash = options.extended_hash
try:
api_response = api_instance.api_repo_status_get(params)
return api_response
except ApiException as e:
raise e
def agg_status(api_instance, options):
params = dlrnapi_client.AggQuery() # AggQuery | The JSON params to post
params.aggregate_hash = options.agg_hash
if options.success:
params.success = str(options.success)
try:
api_response = api_instance.api_agg_status_get(params)
return api_response
except ApiException as e:
raise e
def repo_promote(api_instance, options):
params = dlrnapi_client.Promotion() # Promotion | The JSON params to post
params.commit_hash = options.commit_hash
params.distro_hash = options.distro_hash
if options.extended_hash != 'None':
params.extended_hash = options.extended_hash
else:
params.extended_hash = None
params.promote_name = options.promote_name
try:
api_response = api_instance.api_promote_post(params)
return api_response
except ApiException as e:
raise e
def repo_promote_batch(api_instance, options):
params = list()
hash_pairs = options.hash_pairs.split(',')
for pair in hash_pairs:
pair_list = pair.split('_')
commit_hash = pair_list[0]
distro_hash = pair_list[1]
if len(pair_list) > 2:
extended_hash = '_'.join(pair_list[2:])
else:
extended_hash = None
param = dlrnapi_client.Promotion()
param.commit_hash = commit_hash
param.distro_hash = distro_hash
if extended_hash == 'None':
param.extended_hash = None
else:
param.extended_hash = extended_hash
param.promote_name = options.promote_name
params.append(param)
try:
api_response = api_instance.api_promote_batch_post(params)
return api_response
except ApiException as e:
raise e
def get_promotions(api_instance, options):
params = dlrnapi_client.PromotionQuery() # PromotionQuery
if options.commit_hash:
params.commit_hash = options.commit_hash
if options.distro_hash:
params.distro_hash = options.distro_hash
if options.extended_hash and options.extended_hash != 'None':
params.extended_hash = options.extended_hash
if options.agg_hash:
params.aggregate_hash = options.agg_hash
if options.promote_name:
params.promote_name = options.promote_name
if options.offset:
params.offset = options.offset
if options.limit:
params.limit = options.limit
if options.component:
params.component = options.component
try:
api_response = api_instance.api_promotions_get(params)
return api_response
except ApiException as e:
raise e
def report_result(api_instance, options):
params = dlrnapi_client.Params3() # Params3 | The JSON params to post
params.job_id = options.job_id
params.commit_hash = options.commit_hash
params.distro_hash = options.distro_hash
params.aggregate_hash = options.agg_hash
params.success = str(options.success)
params.url = options.info_url
params.timestamp = options.timestamp
params.notes = options.notes
if options.extended_hash and options.extended_hash != 'None':
params.extended_hash = options.extended_hash
if (params.commit_hash and not params.distro_hash) or\
(not params.commit_hash and params.distro_hash):
raise Exception('Both --commit-hash and --distro-hash must be '
'specified together')
if params.aggregate_hash and (params.commit_hash or params.distro_hash):
raise Exception('--agg-hash is mutually exclusive with --commit-hash '
'and --distro-hash')
if (not params.aggregate_hash and not params.commit_hash and
not params.distro_hash):
raise Exception('Must specify either --agg-hash or --commit-hash and '
'--distro-hash')
try:
api_response = api_instance.api_report_result_post(params)
return api_response
except ApiException as e:
raise e
def import_commit(api_instance, options):
params = dlrnapi_client.ModelImport() # ModelImport | JSON params to post
params.repo_url = options.repo_url
try:
api_response = api_instance.api_remote_import_post(params)
return api_response
except ApiException as e:
raise e
def get_metrics_builds(api_instance, options):
# MetricRequest | JSON params to post
params = dlrnapi_client.MetricsRequest()
params.start_date = options.start_date
params.end_date = options.end_date
if options.package_name:
params.package_name = options.package_name
try:
api_response = api_instance.api_build_metrics_get(params)
return api_response
except ApiException as e:
raise e
command_funcs = {
'repo-get': get_last_tested_repo,
'repo-use': post_last_tested_repo,
'repo-status': repo_status,
'agg-status': agg_status,
'report-result': report_result,
'repo-promote': repo_promote,
'repo-promote-batch': repo_promote_batch,
'commit-import': import_commit,
'promotion-get': get_promotions,
'build-metrics': get_metrics_builds,
}
def main():
parser = argparse.ArgumentParser(prog='dlrnapi')
parser.add_argument('--url',
required=True,
help='URL to use')
parser.add_argument('--username', '-u',
help='username for authentication, defaults to '
'"DLRNAPI_USERNAME" environment variable if set',
default=os.getenv('DLRNAPI_USERNAME', None)
)
parser.add_argument('--password', '-p',
help='password for authentication, defaults to '
'"DLRNAPI_PASSWORD" environment variable if set',
default=os.getenv('DLRNAPI_PASSWORD', None)
)
subparsers = parser.add_subparsers(dest='command',
title='subcommands',
description='available subcommands')
# Subcommand repo-get
parser_last = subparsers.add_parser('repo-get',
help='Get last tested repo')
parser_last.add_argument('--max-age', type=int, default=0,
help='max_age')
parser_last.add_argument('--success', type=str, default=None,
help='Find repos with a successful/unsuccessful '
'vote, if true or false are specified')
parser_last.add_argument('--job-id', type=str, default=None,
help='Name of the CI that sent the vote. If not '
'set, no filter will be set on CI')
parser_last.add_argument('--sequential-mode', dest='sequential',
action='store_true',
help='Use the sequential mode algorithm. In this '
'case, return the last tested repo within '
'that timeframe for the CI job described by '
'--previous-job-id')
parser_last.set_defaults(sequential=False)
parser_last.add_argument('--previous-job-id', type=str, default=None,
help='If --sequential-mode is set, look for jobs'
' tested by this CI')
parser_last.add_argument('--component', type=str, default=None,
required=False,
help='Only search for repos related to '
'this component.')
# Subcommand repo-use
parser_use_last = subparsers.add_parser('repo-use',
help='Get the last tested repo '
'since a specific time '
'(optionally for a CI job), '
'and add an "in progress" '
'entry in the CI job table '
'for this.')
parser_use_last.add_argument('--max-age', type=int, default=0,
help='max_age')
parser_use_last.add_argument('--reporting-job-id', type=str, required=True,
help=' Name of the CI that will add the "in '
'progress" entry in the CI job table.')
parser_use_last.add_argument('--success', type=str, default=None,
help='Find repos with a successful/'
'unsuccessful vote, if true or false '
'are specified')
parser_use_last.add_argument('--job-id', type=str, default=None,
help='Name of the CI that sent the vote. If '
'not set, no filter will be set on CI')
parser_use_last.add_argument('--sequential-mode', dest='sequential',
action='store_true',
help='Use the sequential mode algorithm. In '
'this case, return the last tested repo '
'within that timeframe for the CI job '
'described by --previous-job-id')
parser_use_last.set_defaults(sequential=False)
parser_use_last.add_argument('--previous-job-id', type=str, default=None,
help='If --sequential-mode is true, look for '
'jobs tested by this CI')
parser_use_last.add_argument('--component', type=str, default=None,
required=False,
help='Only search for repos related to '
'this component.')
# Subcommand repo-status
parser_st = subparsers.add_parser('repo-status',
help='Get all the CI reports for a '
'specific repository.')
parser_st.add_argument('--commit-hash', type=str, required=True,
help='commit_hash of the repo to fetch '
'information for.')
parser_st.add_argument('--distro-hash', type=str, required=True,
help='distro_hash of the repo to fetch '
'information for.')
parser_st.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of the repo to fetch '
'information for.')
parser_st.add_argument('--success', type=str, default=None,
help='If set to a value (true/false), only return '
'the CI reports with the specified vote. If '
'not set, return all CI reports.')
# Subcommand agg-status
parser_st = subparsers.add_parser('agg-status',
help='Get all the CI reports for a '
'specific aggregated repository.')
parser_st.add_argument('--agg-hash', type=str, required=True,
help='hash of the aggregated repo to fetch '
'information for.')
parser_st.add_argument('--success', type=str, default=None,
help='If set to a value (true/false), only return '
'the CI reports with the specified vote. If '
'not set, return all CI reports.')
# Subcommand report-result
parser_rep = subparsers.add_parser('report-result',
help='Report the result of a CI job')
parser_rep.add_argument('--job-id', type=str, required=True,
help='Name of the CI sending the vote')
parser_rep.add_argument('--commit-hash', type=str, required=False,
help='commit_hash of tested repo')
parser_rep.add_argument('--distro-hash', type=str, required=False,
help='distro_hash of tested repo')
parser_rep.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of tested repo')
parser_rep.add_argument('--agg-hash', type=str, required=False,
help='hash of the tested aggregated repo. Note '
'that either --commit-hash and --distro-hash or'
' --agg-hash must be specified.')
parser_rep.add_argument('--info-url', type=str, required=True,
help='URL where to find additional information '
'from the CI execution')
parser_rep.add_argument('--timestamp', type=str, required=True,
help='Timestamp (in seconds since the epoch)')
parser_rep.add_argument('--success', type=str, required=True,
help='Was the CI execution successful? Set to '
'true or false.')
parser_rep.add_argument('--notes', type=str,
help='Additional notes')
# Subcommand promote
parser_prom = subparsers.add_parser('repo-promote',
help='Promote a repository')
parser_prom.add_argument('--commit-hash', type=str, required=True,
help='commit_hash of the repo to be promoted')
parser_prom.add_argument('--distro-hash', type=str, required=True,
help='distro_hash of the repo to be promoted')
parser_prom.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of the repo to be promoted')
parser_prom.add_argument('--promote-name', type=str, required=True,
help='Name to be used for the promotion')
# Subcommand repo-promote-batch
parser_prom = subparsers.add_parser('repo-promote-batch',
help='Promote multiple repositories '
'at the same time, as an atomic '
'operation.')
parser_prom.add_argument('--hash-pairs', type=str, required=True,
help='commit_hash+distro_hash or '
'commit_hash+distro_hash+extended_hash of '
'the repos to be promoted, specified as a '
'comma-separated list of commit_distro or '
'commit_distro_extended hash groups. If no '
'extended hash is included, the latest '
'commit matching the commit and distro '
'hashes will be promoted.')
parser_prom.add_argument('--promote-name', type=str, required=True,
help='Name to be used for the promotion')
# Subcommand promotion-get
parser_promget = subparsers.add_parser('promotion-get',
help='Get information about '
'promotions')
parser_promget.add_argument('--commit-hash', type=str, required=False,
help='commit_hash of the repo to search '
'promotions for. Requires --distro-hash '
'if specified.')
parser_promget.add_argument('--distro-hash', type=str, required=False,
help='distro_hash of the repo to search '
'promotions for. Requires --commit-hash '
'if specified.')
parser_promget.add_argument('--extended-hash', type=str, required=False,
help='extended_hash of the repo to search '
'promotions for. Requires --commit-hash '
'and --distro-hash if specified.')
parser_promget.add_argument('--agg-hash', type=str, required=False,
help='hash of the tested aggregated repo.')
parser_promget.add_argument('--promote-name', type=str, required=False,
help='Filter results for this promotion name.')
parser_promget.add_argument('--offset', type=int, required=False,
help='Show results after this offset. Each '
'query will only return 100 entries by '
'default.')
parser_promget.add_argument('--limit', type=int, required=False,
help='Limit the results to the first limit '
'items')
parser_promget.add_argument('--component', type=str, required=False,
help='Only search for promotions related to '
'this component.')
# Subcommand commit-import
parser_imp = subparsers.add_parser('commit-import',
help='Import a commit built by another'
' instance')
parser_imp.add_argument('--repo-url', type=str, required=True,
help='Base repository URL for the remote repo '
'to import')
# Subcommand build-metrics
parser_metrics = subparsers.add_parser(
'build-metrics',
help='Fetch build metrics in a time period')
parser_metrics.add_argument(
'--start-date', type=str, required=True,
help='Start date for the query, in YYYY-MM-DD format')
parser_metrics.add_argument(
'--end-date', type=str, required=True,
help='End date for the query, in YYYY-MM-DD format')
parser_metrics.add_argument(
'--package-name', type=str, required=False,
help='If specified, only fetch metrics for this package name')
options, args = parser.parse_known_args(sys.argv[1:])
# create an instance of the API class
api_client = dlrnapi_client.ApiClient(host=options.url)
dlrnapi_client.configuration.username = options.username
dlrnapi_client.configuration.password = options.password
api_instance = dlrnapi_client.DefaultApi(api_client=api_client)
try:
api_response = command_funcs[options.command](api_instance, options)
print(json.dumps(api_response, cls=ResponseEncoder, indent=2,
sort_keys=True))
except ApiException as e:
# Handle 404 exceptions gracefully
if e.status == 404:
print("ERROR: Got error 404, probably endpoint %s is not available"
% options.url)
return 1
else:
raise
except Exception as e:
raise e
| 45.880165 | 79 | 0.577141 | 2,483 | 22,206 | 4.996778 | 0.125654 | 0.043443 | 0.035061 | 0.024502 | 0.610623 | 0.556541 | 0.497139 | 0.455307 | 0.416136 | 0.400983 | 0 | 0.002111 | 0.338737 | 22,206 | 483 | 80 | 45.975155 | 0.842822 | 0.058453 | 0 | 0.313433 | 0 | 0 | 0.231219 | 0.005031 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0.012438 | 0.034826 | 0 | 0.099502 | 0.007463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da27fd506b778e15d02b14a496203d5d175a39c3 | 1,051 | py | Python | python-mundo3/ex094.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | 1 | 2021-08-15T18:18:43.000Z | 2021-08-15T18:18:43.000Z | python-mundo3/ex094.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | python-mundo3/ex094.py | abm-astro/estudos-python | c0dcd71489e528d445efa25d4986bf2fd08f8fe6 | [
"MIT"
] | null | null | null | cadastro = dict()
pessoas = list()
soma = 0
while True:
cadastro.clear()
cadastro['nome'] = str(input('Nome: ')).capitalize()
while True:
cadastro['sexo'] = str(input('Sexo [M/F]: ')).upper()
if cadastro['sexo'] in 'MF':
break
print('ERRO! Digite apenas M ou F!')
cadastro['idade'] = int(input('Idade: '))
soma += cadastro['idade']
pessoas.append(cadastro.copy())
while True:
res = str(input('Quer continuar? [S/N] ')).upper()
if res in 'SN':
break
print('ERRO! Digite apenas S ou N!')
if res in "Nn":
break
print(20*'-=')
media = soma / len(pessoas)
print(f'- Ao todo temos {len(pessoas)} pessoas cadastradas.')
print(f'- A média de idade é de {media:5.2f} anos.')
print('- As mulheres cadastradas foram:', end=' ')
for m in pessoas:
if m['sexo'] in 'F':
print(f"{m['nome']}", end=" ; ")
print()
print('- A lista de pessoas acima da média:')
for p in pessoas:
print(' ')
for k, v in p.items():
if p['idade'] > media:
print(f'{k} = {v}', end=' ')
print('\n>> ENCERRADO <<')
| 27.657895 | 61 | 0.586108 | 157 | 1,051 | 3.923567 | 0.407643 | 0.038961 | 0.055195 | 0.064935 | 0.084416 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005988 | 0.205519 | 1,051 | 37 | 62 | 28.405405 | 0.731737 | 0 | 0 | 0.162162 | 0 | 0 | 0.32921 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.324324 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da296592fb077c1bc7a27382f8604a31a8ab30e3 | 520 | py | Python | task2C.py | jfs60/Group-147-PartIA-Flood-Warning-System | 3fb52e3e028ec8e0b70ccb1cfc61bcf76b42f2c1 | [
"MIT"
] | null | null | null | task2C.py | jfs60/Group-147-PartIA-Flood-Warning-System | 3fb52e3e028ec8e0b70ccb1cfc61bcf76b42f2c1 | [
"MIT"
] | null | null | null | task2C.py | jfs60/Group-147-PartIA-Flood-Warning-System | 3fb52e3e028ec8e0b70ccb1cfc61bcf76b42f2c1 | [
"MIT"
] | 1 | 2022-02-06T06:45:15.000Z | 2022-02-06T06:45:15.000Z | from floodsystem.station import MonitoringStation
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level, stations_level_over_threshold
def run ():
stations = build_station_list()
update_water_levels(stations)
list = stations_highest_rel_level(stations, 9)
return(list)
stations_Task_2C = run()
print (stations_Task_2C)
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run() | 30.588235 | 87 | 0.775 | 67 | 520 | 5.58209 | 0.492537 | 0.120321 | 0.085562 | 0.117647 | 0.342246 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0.008989 | 0.144231 | 520 | 17 | 88 | 30.588235 | 0.831461 | 0 | 0 | 0 | 0 | 0 | 0.111324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2e43a4657d302992b18fd2e6651b3dd93dac4f | 6,112 | py | Python | docs/examples/viz_emwave_animation.py | iamansoni/fury | 2e7971a176c2540e10a9a6da861097583d08cb4a | [
"BSD-3-Clause"
] | 149 | 2018-09-20T18:36:16.000Z | 2022-03-29T05:16:25.000Z | docs/examples/viz_emwave_animation.py | iamansoni/fury | 2e7971a176c2540e10a9a6da861097583d08cb4a | [
"BSD-3-Clause"
] | 523 | 2018-09-20T16:57:16.000Z | 2022-03-31T18:52:41.000Z | docs/examples/viz_emwave_animation.py | iamansoni/fury | 2e7971a176c2540e10a9a6da861097583d08cb4a | [
"BSD-3-Clause"
] | 150 | 2018-10-10T07:21:27.000Z | 2022-03-29T08:33:17.000Z | """
===============================================
Electromagnetic Wave Propagation Animation
===============================================
A linearly polarized sinusoidal electromagnetic wave, propagating in the
direction +x through a homogeneous, isotropic, dissipationless medium,
such as vacuum. The electric field (blue arrows) oscillates in the
±z-direction, and the orthogonal magnetic field (red arrows) oscillates in
phase with the electric field, but in the ±y-direction.
Function of the sinusoid used in the animation = sin(k*x - w*t + d)
Where, k:wavenumber, x:abscissa, w:angular frequency, t:time, d:phase angle
Importing necessary modules
"""
from fury import window, actor, utils, ui
import numpy as np
import itertools
###############################################################################
# function that updates and returns the coordinates of the waves which are
# changing with time
def update_coordinates(wavenumber, ang_frq, time, phase_angle):
x = np.linspace(-3, 3, npoints)
y = np.sin(wavenumber*x - ang_frq*time + phase_angle)
z = np.array([0 for i in range(npoints)])
return x, y, z
###############################################################################
# Variable(s) and their description-
# npoints: For high quality rendering, keep the number of npoints high
# but kindly note that higher values for npoints will slow down the
# rendering process (default = 800)
# wavelength : wavelength of the wave (default = 2)
# wavenumber : 2*pi/wavelength
# time: time (default time i.e. time at beginning of the animation = 0)
# incre_time: value by which time is incremented for each call of
# timer_callback (default = 0.1)
# angular_frq: angular frequency (default = 0.1)
# phase_angle: phase angle (default = 0.002)
npoints = 800
wavelength = 2
wavenumber = 2*np.pi/wavelength
time = 0
incre_time = 0.1
angular_frq = 0.1
phase_angle = 0.002
###############################################################################
# Creating a scene object and configuring the camera's position
scene = window.Scene()
scene.set_camera(position=(-6, 5, -10), focal_point=(0.0, 0.0, 0.0),
view_up=(0.0, 0.0, 0.0))
showm = window.ShowManager(scene,
size=(800, 600), reset_camera=True,
order_transparent=True)
showm.initialize()
###############################################################################
# Creating a yellow colored arrow to show the direction of propagation of
# electromagnetic wave
centers = np.array([[3, 0, 0]])
directions = np.array([[-1, 0, 0]])
heights = np.array([6.4])
arrow_actor = actor.arrow(centers, directions, window.colors.yellow, heights,
resolution=20, tip_length=0.06, tip_radius=0.012,
shaft_radius=0.005)
scene.add(arrow_actor)
###############################################################################
# Creating point actor that renders the magnetic field
x = np.linspace(-3, 3, npoints)
y = np.sin(wavenumber*x - angular_frq*time + phase_angle)
z = np.array([0 for i in range(npoints)])
pts = np.array([(a, b, c) for (a, b, c) in zip(x, y, z)])
pts = [pts]
colors = window.colors.red
wave_actor1 = actor.line(pts, colors, linewidth=3)
scene.add(wave_actor1)
vertices = utils.vertices_from_actor(wave_actor1)
vcolors = utils.colors_from_actor(wave_actor1, 'colors')
no_vertices_per_point = len(vertices)/npoints
initial_vertices = vertices.copy() - \
np.repeat(pts, no_vertices_per_point, axis=0)
###############################################################################
# Creating point actor that renders the electric field
xx = np.linspace(-3, 3, npoints)
yy = np.array([0 for i in range(npoints)])
zz = np.sin(wavenumber*xx - angular_frq*time + phase_angle)
pts2 = np.array([(a, b, c) for (a, b, c) in zip(xx, yy, zz)])
pts2 = [pts2]
colors2 = window.colors.blue
wave_actor2 = actor.line(pts2, colors2, linewidth=3)
scene.add(wave_actor2)
vertices2 = utils.vertices_from_actor(wave_actor2)
vcolors2 = utils.colors_from_actor(wave_actor2, 'colors')
no_vertices_per_point2 = len(vertices2)/npoints
initial_vertices2 = vertices2.copy() - \
np.repeat(pts2, no_vertices_per_point2, axis=0)
###############################################################################
# Initializing text box to display the title of the animation
tb = ui.TextBlock2D(bold=True, position=(160, 90))
tb.message = "Electromagnetic Wave"
scene.add(tb)
###############################################################################
# end is used to decide when to end the animation
end = 300
###############################################################################
# Initializing counter
counter = itertools.count()
###############################################################################
# Coordinates to be plotted are changed everytime timer_callback is called by
# using the update_coordinates function. The wave is rendered here.
def timer_callback(_obj, _event):
global pts, pts2, time, time_incre, angular_frq, phase_angle, wavenumber
time += incre_time
cnt = next(counter)
x, y, z = update_coordinates(wavenumber, angular_frq, phase_angle, time)
pts = np.array([(a, b, c) for (a, b, c) in zip(x, y, z)])
vertices[:] = initial_vertices + \
np.repeat(pts, no_vertices_per_point, axis=0)
utils.update_actor(wave_actor1)
xx, zz, yy = update_coordinates(wavenumber, angular_frq, phase_angle, time)
pts2 = np.array([(a, b, c) for (a, b, c) in zip(xx, yy, zz)])
vertices2[:] = initial_vertices2 + \
np.repeat(pts2, no_vertices_per_point2, axis=0)
utils.update_actor(wave_actor2)
showm.render()
# to end the animation
if cnt == end:
showm.exit()
###############################################################################
# Run every 25 milliseconds
showm.add_timer_callback(True, 25, timer_callback)
interactive = False
if interactive:
showm.start()
window.record(showm.scene, size=(800, 600), out_path="viz_emwave.png")
| 34.925714 | 79 | 0.59375 | 786 | 6,112 | 4.507634 | 0.301527 | 0.006774 | 0.006774 | 0.006774 | 0.248095 | 0.186847 | 0.154107 | 0.154107 | 0.117979 | 0.078465 | 0 | 0.026153 | 0.155432 | 6,112 | 174 | 80 | 35.126437 | 0.659822 | 0.310046 | 0 | 0.146341 | 0 | 0 | 0.013872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.036585 | 0 | 0.073171 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da2f601feb319bbef64c8038bd332c6cea544cb4 | 3,243 | py | Python | report_templates.py | averlarque/l1-zabbix-reporter | 5d8ea4d432b7b518f954d806a86fe5bcafca3f9d | [
"MIT"
] | 1 | 2017-03-27T02:59:09.000Z | 2017-03-27T02:59:09.000Z | report_templates.py | averlarque/l1-zabbix-reporter | 5d8ea4d432b7b518f954d806a86fe5bcafca3f9d | [
"MIT"
] | 1 | 2018-01-16T04:56:16.000Z | 2018-01-16T04:56:16.000Z | report_templates.py | averlarque/l1-zabbix-reporter | 5d8ea4d432b7b518f954d806a86fe5bcafca3f9d | [
"MIT"
] | 2 | 2016-11-24T07:06:51.000Z | 2019-11-16T15:12:54.000Z | from report_generator import *
class PeriodReport:
"""
Parent class for time periods reports
"""
def __init__(self, since, till, report_format='count', report_type='txt'):
# Define time limits
self.since = since
self.till = till
self.report_type = report_type
self.report_format = report_format
# Generate a title for a report
self.report_name = self.get_report_name(self.report_format + '_report_all')
# According to the db_path and redefinition of child classes
self.report_class = self.choose_report_class()
# Generating the data for the report
self.report_data = self.report_class.generate_report_data()
# For further generating a report the self.generate_report() should be called
def get_report_name(self, slug):
time_format = '%H.%M_%d%m%y'
since = self.since.strftime(time_format)
till = self.till.strftime(time_format)
time_alias = since + '-' + till
report_name = slug + '(' + time_alias + ')'
return report_name
def choose_report_class(self):
if self.report_format == 'count':
report_class = CountPeriodReport(self.since, self.till)
elif self.report_format == 'event':
report_class = EventPeriodReport(self.since, self.till)
else:
report_class = CountPeriodReport(self.since, self.till)
return report_class
def generate_report(self):
"""
Main reporting function
:return: None
"""
if self.report_type == 'txt':
self.report_class.create_txt_report(self.report_data, self.report_name)
elif self.report_type == 'html':
self.report_class.create_html_report(self.report_data, self.report_name)
else:
self.report_class.create_txt_report(self.report_data, self.report_name)
class ProjectPeriodReport(PeriodReport):
def __init__(self, since, till, project, report_format='count', report_type='txt'):
self.project = project
super().__init__(since, till, report_format=report_format, report_type=report_type)
# Redefines report name according to the sibling class alias
self.report_name = self.get_report_name(self.report_format + '_' + self.project + '_project_report')
def choose_report_class(self):
if self.report_format == 'count':
report_class = ProjectCountPeriodReport(self.since, self.till, self.project)
elif self.report_format == 'event':
report_class = ProjectEventPeriodReport(self.since, self.till, self.project)
else:
report_class = ProjectCountPeriodReport(self.since, self.till, self.project)
return report_class
class ItemPeriodReport(PeriodReport):
def __init__(self, since, till, item, report_format='count', report_type='txt'):
self.item = item
super().__init__(since, till, report_format=report_format, report_type=report_type)
# Redefines report name according to the sibling class alias
self.report_name = self.get_report_name(self.report_format + '_' + self.item + '_item_report')
def choose_report_class(self):
if self.report_format == 'count':
report_class = ItemCountPeriodReport(self.since, self.till, self.item)
elif self.report_format == 'event':
report_class = ItemEventPeriodReport(self.since, self.till, self.item)
self.report_name = 'event_' + self.report_name
else:
report_class = ItemCountPeriodReport(self.since, self.till, self.item)
return report_class
| 38.152941 | 102 | 0.755782 | 442 | 3,243 | 5.266968 | 0.171946 | 0.128866 | 0.055842 | 0.065722 | 0.592354 | 0.583763 | 0.507732 | 0.383591 | 0.383591 | 0.289948 | 0 | 0 | 0.136602 | 3,243 | 84 | 103 | 38.607143 | 0.831429 | 0.127351 | 0 | 0.440678 | 0 | 0 | 0.043556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.016949 | 0 | 0.271186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da378e0eafeab07a79465181d9fc51e82389ac0a | 6,674 | py | Python | officevideo/officevideo.py | introp-software/xblock-officevideo | 6e475df782a4b0a2d2d9f7e2e5b9bae441b56024 | [
"MIT"
] | 4 | 2020-02-09T09:39:06.000Z | 2021-12-30T09:50:57.000Z | officevideo/officevideo.py | acidburn0zzz/xblock-officevideo | 6e475df782a4b0a2d2d9f7e2e5b9bae441b56024 | [
"MIT"
] | null | null | null | officevideo/officevideo.py | acidburn0zzz/xblock-officevideo | 6e475df782a4b0a2d2d9f7e2e5b9bae441b56024 | [
"MIT"
] | 8 | 2019-11-02T21:34:20.000Z | 2021-12-30T09:50:59.000Z | """ Copyright (c) Microsoft Corporation. All Rights Reserved. """
""" Licensed under the MIT license. See LICENSE file on the project webpage for details. """
import textwrap
import pkg_resources
import urllib2
import mimetypes
import urlparse, requests, json
import xml.etree.ElementTree as ET
from xblock.core import XBlock
from xblock.fragment import Fragment
from xblock.fields import Scope, String
from django.conf import settings
from django.contrib.auth.models import User
from social.apps.django_app.utils import load_strategy
import logging
LOG = logging.getLogger(__name__)
import time
import re
from urlparse import parse_qs, urlsplit, urlunsplit
from urllib import urlencode
"""test url: https://wwedudemo17.sharepoint.com/portals/hub/_layouts/15/PointPublishing.aspx?app=video&p=p&chid=4fe89746-6fd9-4a2b-9a42-ea41c5853a53&vid=70113d75-9a34-494a-972d-dc498c12168f """
"""
<iframe width=640 height=360
src='https://wwedudemo17.sharepoint.com/portals/hub/_layouts/15/VideoEmbedHost.aspx?chId=4fe89746%2D6fd9%2D4a2b%2D9a42%2Dea41c5853a53&vId=70113d75%2D9a34%2D494a%2D972d%2Ddc498c12168f&width=640&height=360&autoPlay=false&showInfo=true' allowfullscreen></iframe>
"""
DEFAULT_VIDEO_URL = ('https://www.youtube.com/embed/uXsJ_9lQubc')
class OfficeVideoXBlock(XBlock):
EMBED_CODE_TEMPLATE = textwrap.dedent("""
<iframe
src="{}"
width="640"
height="360"
allowfullscreen>
</iframe>
""")
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
scope=Scope.settings,
default="OfficeVideo",
)
video_url = String(
display_name="Video URL",
help="Navigate to the video in your browser and ensure that it is accessible to your intended audience. Copy its URL or embed code and paste it into this field.",
scope=Scope.settings,
default=EMBED_CODE_TEMPLATE.format(DEFAULT_VIDEO_URL)
)
output_code = String(
display_name="Output Iframe Embed Code",
help="Copy the embed code into this field.",
scope=Scope.settings,
default=EMBED_CODE_TEMPLATE.format(DEFAULT_VIDEO_URL)
)
message = String(
display_name="video display status message",
help="Message to help students in case of errors.",
scope=Scope.settings,
default=""
)
message_display_state = String(
display_name="Whether to display the status message",
help="Determines whether to display the message to help students in case of errors.",
scope=Scope.settings,
default="block"
)
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
"""
The primary view of the OfficeVideoXBlock, shown to students
when viewing courses.
"""
embed_code = self.output_code
if embed_code == '':
embed_code = self.get_officevideo_embed_code(officevideo_url=self.video_url)
html = self.resource_string("static/html/officevideo.html")
frag = Fragment(html.format(embed_code=embed_code, message=self.message, message_display_state=self.message_display_state))
frag.add_css(self.resource_string("static/css/officevideo.css"))
frag.add_javascript(self.resource_string("static/js/src/officevideo.js"))
frag.initialize_js('OfficeVideoXBlock')
return frag
def studio_view(self, context=None):
"""
he primary view of the OfficeVideoXBlock, shown to teachers
when viewing courses.
"""
html = self.resource_string("static/html/officevideo_edit.html")
frag = Fragment(html.format(self=self))
frag.add_css(self.resource_string("static/css/officevideo.css"))
frag.add_javascript(self.resource_string("static/js/src/officevideo_edit.js"))
frag.initialize_js('OfficeVideoXBlock')
return frag
@XBlock.json_handler
def studio_submit(self, submissions, suffix=''): # pylint: disable=unused-argument
"""
Change the settings for this XBlock given by the Studio user
"""
if not isinstance(submissions, dict):
LOG.error("submissions object from Studio is not a dict - %r", submissions)
return {
'result': 'error'
}
self.display_name = submissions['display_name']
self.video_url = submissions['video_url']
# Check if user have entered embed code
embed_code_regex = '<iframe '
matched = re.match(embed_code_regex, self.video_url, re.IGNORECASE)
if matched is not None:
self.output_code = self.video_url
else:
self.output_code = ''
self.message = ""
self.message_display_state = "block"
return {'result': 'success'}
def get_officevideo_embed_code(self, officevideo_url):
embed_code = ''
try:
django_user_social = User.objects.get(id=self.xmodule_runtime.user_id).social_auth.get(provider='azuread-oauth2')
if int(django_user_social.extra_data['expires_on']) < int(time.time()):
django_user_social.refresh_token(load_strategy())
django_user_social = User.objects.get(id=self.xmodule_runtime.user_id).social_auth.get(provider='azuread-oauth2')
url = self.video_url
parsed = urlparse.urlparse(url)
query_params = urlparse.parse_qs(parsed.query)
resp = requests.get("https://" + parsed.netloc + "/portals/hub/_api/VideoService/Channels('" + query_params['chid'][0] + "')/Videos('" + query_params['vid'][0] + "')/GetVideoEmbedCode",
headers={'Authorization': 'Bearer ' + django_user_social.tokens,
'Content-Type': 'application/json;odata=verbose'})
root = ET.fromstring(resp._content)
embed_code = unicode(root.text, "utf-8")
except:
embed_code = '<a target="_blank" href="'+ officevideo_url +'">Office 365 Video</a>'
return embed_code
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("OfficeVideoXBlock",
"""<vertical_demo>
<officevideo/>
<officevideo/>
</vertical_demo>
"""),
]
| 38.578035 | 284 | 0.657177 | 791 | 6,674 | 5.384324 | 0.353982 | 0.042263 | 0.025358 | 0.033811 | 0.251702 | 0.239493 | 0.239493 | 0.179385 | 0.156844 | 0.156844 | 0 | 0.026491 | 0.23644 | 6,674 | 172 | 285 | 38.802326 | 0.809262 | 0.067726 | 0 | 0.125 | 0 | 0.008333 | 0.234288 | 0.045556 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.141667 | 0 | 0.308333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da37fa1cf8aa9ced7ef291cee98575d2cbc3ace4 | 4,594 | py | Python | stn/task.py | anenriquez/STNU | a02a13730cc0f31521e01e186c158533479090f5 | [
"Unlicense"
] | null | null | null | stn/task.py | anenriquez/STNU | a02a13730cc0f31521e01e186c158533479090f5 | [
"Unlicense"
] | null | null | null | stn/task.py | anenriquez/STNU | a02a13730cc0f31521e01e186c158533479090f5 | [
"Unlicense"
] | null | null | null | from stn.utils.as_dict import AsDictMixin
class Edge(AsDictMixin):
def __init__(self, name, mean, variance, **kwargs):
self.name = name
self.mean = round(mean, 3)
self.variance = round(variance, 3)
self.standard_dev = round(variance ** 0.5, 3)
def __str__(self):
to_print = ""
to_print += "{}: N({}, {})".format(self.name, self.mean, self.standard_dev)
return to_print
def __sub__(self, other):
# Difference of two independent random variables
mean = self.mean - other.mean
variance = self.variance + other.variance
return mean, variance
def __add__(self, other):
# Addition of two independent random variables
mean = self.mean + other.mean
variance = self.variance + other.variance
return mean, variance
class Timepoint(AsDictMixin):
"""
r_earliest_time (float): earliest time relative to a ztp
r_latest_time (float): latest time relative to a ztp
"""
def __init__(self, name, r_earliest_time, r_latest_time, **kwargs):
self.name = name
self.r_earliest_time = round(r_earliest_time, 3)
self.r_latest_time = round(r_latest_time, 3)
def __str__(self):
to_print = ""
to_print += "{}: [{}, {}]".format(self.name, self.r_earliest_time, self.r_latest_time)
return to_print
class Task(AsDictMixin):
def __init__(self, task_id, timepoints, edges, pickup_action_id, delivery_action_id):
""" Constructor for the Task object
Args:
task_id (UUID): An instance of an UUID object
timepoints (list): list of timepoints (Timepoints)
Edges (list): list of edges (Edges)
pickup_action_id (UUID): Action id of the pickup action
delivery_action_id (UUID): Action id of te delivery action
"""
self.task_id = task_id
self.timepoints = timepoints
self.edges = edges
self.pickup_action_id = pickup_action_id
self.delivery_action_id = delivery_action_id
def __str__(self):
to_print = ""
to_print += "{} \n".format(self.task_id)
to_print += "Timepoints: \n"
for timepoint in self.timepoints:
to_print += str(timepoint) + "\t"
to_print += "\n Edges: \n"
for edge in self.edges:
to_print += str(edge) + "\t"
to_print += "\n Pickup action:" + str(self.pickup_action_id)
to_print += "\n Delivery action:" + str(self.delivery_action_id)
return to_print
def get_timepoint(self, timepoint_name):
for timepoint in self.timepoints:
if timepoint.name == timepoint_name:
return timepoint
def get_edge(self, edge_name):
for edge in self.edges:
if edge.name == edge_name:
return edge
def update_timepoint(self, timepoint_name, r_earliest_time, r_latest_time=float('inf')):
in_list = False
for timepoint in self.timepoints:
if timepoint.name == timepoint_name:
in_list = True
timepoint.r_earliest_time = r_earliest_time
timepoint.r_latest_time = r_latest_time
if not in_list:
self.timepoints.append(Timepoint(timepoint_name, r_earliest_time, r_latest_time))
def update_edge(self, edge_name, mean, variance):
in_list = False
for edge in self.edges:
if edge.name == edge_name:
in_list = True
edge.mean = round(mean, 3)
edge.variance = round(variance, 3)
edge.standard_dev = round(variance ** 0.5, 3)
if not in_list:
self.edges.append(Edge(name=edge_name, mean=mean, variance=variance))
def to_dict(self):
dict_repr = super().to_dict()
timepoints = list()
edges = list()
for t in self.timepoints:
timepoints.append(t.to_dict())
for e in self.edges:
edges.append(e.to_dict())
dict_repr.update(timepoints=timepoints)
dict_repr.update(edges=edges)
return dict_repr
@classmethod
def to_attrs(cls, dict_repr):
attrs = super().to_attrs(dict_repr)
timepoints = list()
edges = list()
for t in attrs.get("timepoints"):
timepoints.append(Timepoint.from_dict(t))
for e in attrs.get("edges"):
edges.append(Edge.from_dict(e))
attrs.update(timepoints=timepoints)
attrs.update(edges=edges)
return attrs
| 34.80303 | 94 | 0.60579 | 581 | 4,594 | 4.538726 | 0.137694 | 0.039818 | 0.044369 | 0.021236 | 0.372014 | 0.28176 | 0.265074 | 0.212363 | 0.174441 | 0.174441 | 0 | 0.003706 | 0.295168 | 4,594 | 131 | 95 | 35.068702 | 0.810686 | 0.110797 | 0 | 0.360825 | 0 | 0 | 0.028557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14433 | false | 0 | 0.010309 | 0 | 0.278351 | 0.154639 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da382dde5d81096600a758eea608666b31d3c7b7 | 2,959 | py | Python | src/bxgateway/messages/btc/data_btc_message.py | blockchain-development-resources/bxgateway | 761b5085f9c7c6527f0b9aaae06d2f70f3786db2 | [
"MIT"
] | 1 | 2021-11-26T07:49:24.000Z | 2021-11-26T07:49:24.000Z | src/bxgateway/messages/btc/data_btc_message.py | beepool/bxgateway | 761b5085f9c7c6527f0b9aaae06d2f70f3786db2 | [
"MIT"
] | null | null | null | src/bxgateway/messages/btc/data_btc_message.py | beepool/bxgateway | 761b5085f9c7c6527f0b9aaae06d2f70f3786db2 | [
"MIT"
] | 1 | 2021-09-06T02:10:08.000Z | 2021-09-06T02:10:08.000Z | import struct
from bxgateway.btc_constants import BTC_HDR_COMMON_OFF, BTC_SHA_HASH_LEN
from bxgateway.messages.btc.btc_message import BtcMessage
from bxgateway.messages.btc.btc_message_type import BtcMessageType
from bxgateway.messages.btc.btc_messages_util import btc_varint_to_int, pack_int_to_btc_varint
from bxgateway.utils.btc.btc_object_hash import BtcObjectHash
class DataBtcMessage(BtcMessage):
def __init__(self, magic=None, version=None, hashes=None, hash_stop=None, command=None, buf=None):
if hashes is None:
hashes = []
if buf is None:
buf = bytearray(BTC_HDR_COMMON_OFF + 9 + (len(hashes) + 1) * 32)
self.buf = buf
off = BTC_HDR_COMMON_OFF
struct.pack_into("<I", buf, off, version)
off += 4
off += pack_int_to_btc_varint(len(hashes), buf, off)
for hash_val in hashes:
buf[off:off + 32] = hash_val.get_big_endian()
off += 32
buf[off:off + 32] = hash_stop.get_big_endian()
off += 32
BtcMessage.__init__(self, magic, command, off - BTC_HDR_COMMON_OFF, buf)
else:
self.buf = buf
self._memoryview = memoryview(buf)
self._magic = self._command = self._payload_len = self._checksum = None
self._payload = None
self._version = self._hash_count = self._hashes = self._hash_stop = None
def version(self):
if self._version is None:
self._version, = struct.unpack_from("<I", self.buf, BTC_HDR_COMMON_OFF)
return self._version
def hash_count(self):
if self._hash_count is None:
off = BTC_HDR_COMMON_OFF + 4
self._hash_count, size = btc_varint_to_int(self.buf, off)
return self._hash_count
def __iter__(self):
off = BTC_HDR_COMMON_OFF + 4 # For the version field.
b_count, size = btc_varint_to_int(self.buf, off)
off += size
for i in range(b_count):
yield BtcObjectHash(buf=self.buf, offset=off, length=BTC_SHA_HASH_LEN)
off += 32
def hash_stop(self):
return BtcObjectHash(buf=self.buf, offset=BTC_HDR_COMMON_OFF + self.payload_len() - 32, length=BTC_SHA_HASH_LEN)
class GetHeadersBtcMessage(DataBtcMessage):
MESSAGE_TYPE = BtcMessageType.GET_HEADERS
def __init__(self, magic=None, version=None, hashes=None, hash_stop=None, buf=None):
if hashes is None:
hashes = []
super(GetHeadersBtcMessage, self).__init__(magic, version, hashes, hash_stop, self.MESSAGE_TYPE, buf)
class GetBlocksBtcMessage(DataBtcMessage):
MESSAGE_TYPE = BtcMessageType.GET_BLOCKS
def __init__(self, magic=None, version=None, hashes=None, hash_stop=None, buf=None):
if hashes is None:
hashes = []
super(GetBlocksBtcMessage, self).__init__(magic, version, hashes, hash_stop, self.MESSAGE_TYPE, buf)
| 36.9875 | 120 | 0.657993 | 396 | 2,959 | 4.580808 | 0.184343 | 0.026461 | 0.052922 | 0.066152 | 0.481257 | 0.292172 | 0.233738 | 0.233738 | 0.216648 | 0.180265 | 0 | 0.00857 | 0.25076 | 2,959 | 79 | 121 | 37.455696 | 0.809653 | 0.007435 | 0 | 0.254237 | 0 | 0 | 0.001363 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118644 | false | 0 | 0.101695 | 0.016949 | 0.355932 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da39df4b22e6c016cc25f0ba477072a025a6794f | 8,188 | py | Python | alexber/utils/_ymlparsers_extra.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | null | null | null | alexber/utils/_ymlparsers_extra.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | 8 | 2019-12-15T05:13:27.000Z | 2021-02-16T20:03:40.000Z | alexber/utils/_ymlparsers_extra.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | 2 | 2019-12-12T03:52:37.000Z | 2021-05-21T21:14:34.000Z | """
This module adopts its behavior dependent on availability of Python packages.
This module optionally depends on ymlparseser module.
Method format_template() is used in emails module.
Note: This module will work if you have only standard Python package. You just can't change delimiters values.
Note: API and implementation of this module is unstable and can change without prior notice.
"""
import warnings
def format_template(template, **kwargs):
"""
This is main method of this module.
Note: API of this method is unstable and can change without prior notice.
Template is expected to be compatible with Jinja2 one.
Current implementation make delimiters compatible with str.format() and use it.
:param template: str, typically with {{my_variable}}
:param jinja2ctx: Jinja2 Environment that is consulted what is delimiter for variable's names.
if is not provided, HiYaPyCo.jinja2ctx is used. See ymlparsers.initConfig().
if is not provided, than defaults are used (see jinja2.defaults).
:param jinja2Lock: Lock to be used to atomically get variable_start_string and variable_end_string from jinja2ctx.
if is not provided, HiYaPyCo.jinja2Lock is used.. See ymlparsers.initConfig().
:return: fromated str
"""
if template is None:
return None
s = _convert_template_to_string_format(template, **kwargs)
ret = s.format(template, **kwargs)
return ret
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=r'.*?yaml*?', module=r'.*?ymlparsers.*?')
from . ymlparsers import HiYaPyCo
_isHiYaPyCoAvailable = True
except ImportError:
_isHiYaPyCoAvailable = False
_a1 = None
_a2 = None
try:
try:
from jinja2.defaults import VARIABLE_START_STRING as _a1, VARIABLE_END_STRING as _a2
_isJinja2DefaultAvailable = True
except ImportError:
try:
from jinja2.environment import VARIABLE_START_STRING as _a1, VARIABLE_END_STRING as _a2
_isJinja2DefaultAvailable = True
except ImportError:
_isJinja2DefaultAvailable = False
finally:
del _a1
del _a2
_VARIABLE_START_STRING = None
_VARIABLE_END_STRING = None
def _init_globals():
"""
This method is called during module import.
This method is idempotent.
"""
global _VARIABLE_START_STRING, _VARIABLE_END_STRING
if _isJinja2DefaultAvailable:
p1 = None
p2 = None
try:
from jinja2.defaults import VARIABLE_START_STRING as p1, VARIABLE_END_STRING as p2
except ImportError:
from jinja2.environment import VARIABLE_START_STRING as p1, VARIABLE_END_STRING as p2
if p1 is None or p2 is None:
raise ImportError('VARIABLE_START_STRING or VARIABLE_END_STRING are not defined')
_VARIABLE_START_STRING = p1
_VARIABLE_END_STRING = p2
else:
_VARIABLE_START_STRING = '{{'
_VARIABLE_END_STRING = '}}'
_init_globals()
def _normalize_var_name(text, start_del, end_del):
"""
Search&replace all pairs of (start_del, end_del) with pairs of ({, }).
:param text: str to normalize
:param start_del: delimiter that indicates start of variable name, typically {{
:param end_del: delimiter that indicates end of variable name, typically }}
:return:
"""
if start_del is None or start_del not in text or end_del not in text:
return text
first_ind = 0
len_end_del = len(end_del)
while True:
first_ind = text.find(start_del, first_ind)
if first_ind < 0:
break
last_ind = text.find(end_del, first_ind)
var = text[first_ind:last_ind+len_end_del]
var = var.replace('.', '_')
#text[first_ind:last_ind] = var
text = text[:first_ind]+var+text[last_ind+len_end_del:]
first_ind = last_ind+len_end_del
return text
def __convert_template_to_string_format(template, **kwargs):
"""
This is utility method that make template usable with string format.
:param template: str, typically with {{my_variable}}
:param default_start: Typically {{ but can be any other delimiter that points to start of the token variable.
:param default_end: Typically }} but can be any other delimiter that points to end of the token variable.
:return: template: str with {my_variable}
"""
if template is None:
return None
default_start = kwargs.pop('default_start', None)
default_end = kwargs.pop('default_end', None)
template = _normalize_var_name(template, default_start, default_end)
ret = template.replace(f'{default_start} ', f'{default_start}') \
.replace(f'{default_start}', '{') \
.replace(f' {default_end}', f'{default_end}') \
.replace(f'{default_end}', '}')
return ret
def _convert_template_to_string_format(template, **kwargs):
"""
This is utility method that make template usable with string format.
if both jinja2ctx and jinja2Lock are provided, than they are used to determine various delimiters
(jinja2Lock is used to read the values from jinja2ctx atomically).
if both jinja2ctx and jinja2Lock are not provided, than
If ymlparsers is usable (it's 3rd party dependencies are available, one if each is jinja2)
than it's jinja2ctx (Jinja2's Environment) will be consulted for the various delimiters.
Otherwise, if jinja2 is available than we will use it's defaults for constricting Jinja2's Environment
for the various delimiters.
Otherwise, some sensible defaults (default values from some version of Jinja2) will be used.
You can't provide jinja2Lock without providing jinja2ctx (you can't provide your jinja2Lock for HiYaPyCo.jinja2ctx).
You can provide jinja2ctx without jinja2Lock. Than you will give up atomicity for determining various delimiters.
:param template: str, typically with {{my_variable}}
:param jinja2ctx: Jinja2 Environment that is consulted what is delimiter for variable's names.
if is not provided, HiYaPyCo.jinja2ctx is used. See ymlparsers.initConfig().
if is not provided, than defaults are used (see jinja2.defaults).
:param jinja2Lock: Lock to be used to atomically get variable_start_string and variable_end_string from jinja2ctx.
if is not provided, HiYaPyCo.jinja2Lock is used.. See ymlparsers.initConfig().
:return: template: str with {my_variable}
"""
if template is None:
return None
jinja2ctx = kwargs.pop('jinja2ctx', None)
jinja2Lock = kwargs.pop('jinja2Lock', None)
if _isHiYaPyCoAvailable and jinja2ctx is None and jinja2Lock is not None:
raise ValueError("You can't provide your jinja2Lock for HiYaPyCo.jinja2ctx")
if _isHiYaPyCoAvailable and jinja2ctx is None:
jinja2ctx = HiYaPyCo.jinja2ctx
jinja2Lock = HiYaPyCo.jinja2Lock #we should use HiYaPyCo.jinja2Lock for HiYaPyCo.jinja2ctx
#default_start, default_end
if jinja2ctx is None:
if jinja2Lock is None:
default_start = _VARIABLE_START_STRING
default_end = _VARIABLE_END_STRING
else:
with jinja2Lock:
default_start = _VARIABLE_START_STRING
default_end = _VARIABLE_END_STRING
else:
if _isHiYaPyCoAvailable and HiYaPyCo.jinja2ctx is not None and HiYaPyCo.jinja2Lock is None:
raise ValueError('HiYaPyCo.jinja2ctx is not None, but HiYaPyCo.jinja2Lock is None')
if jinja2Lock is None:
# jinja2ctx was provided, but jinja2Lock wasn't, it is ok
# (maybe jinja2ctx is local variable?)
default_start = jinja2ctx.variable_start_string
default_end = jinja2ctx.variable_end_string
else:
with jinja2Lock:
default_start = jinja2ctx.variable_start_string
default_end = jinja2ctx.variable_end_string
ret = __convert_template_to_string_format(template, default_start=default_start, default_end=default_end)
return ret
| 38.261682 | 120 | 0.69553 | 1,052 | 8,188 | 5.230989 | 0.173954 | 0.035435 | 0.05179 | 0.016355 | 0.491914 | 0.463747 | 0.396329 | 0.379793 | 0.343812 | 0.318735 | 0 | 0.014947 | 0.240107 | 8,188 | 213 | 121 | 38.441315 | 0.869495 | 0.447606 | 0 | 0.365385 | 0 | 0 | 0.080886 | 0.004895 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048077 | false | 0 | 0.105769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da3cdf32ec5c491bbbd379b8659c9adccf3080ca | 2,653 | py | Python | models/house_water_drain.py | susundberg/python-freecad-3dparts | 248e6f5eb4ce3d1921b3d4875e9c1d112f7b7498 | [
"MIT"
] | null | null | null | models/house_water_drain.py | susundberg/python-freecad-3dparts | 248e6f5eb4ce3d1921b3d4875e9c1d112f7b7498 | [
"MIT"
] | null | null | null | models/house_water_drain.py | susundberg/python-freecad-3dparts | 248e6f5eb4ce3d1921b3d4875e9c1d112f7b7498 | [
"MIT"
] | null | null | null | import supalib
TOLE=0.2
OUTSIZE=60.0
SIZE_CONST=25.0
SIZE_DROP=20.0
ANGLE_DROP=45.0
BASE_THICK=5.0
BASE_WIDE=20.0
PIPE_RAD=OUTSIZE/2.0 + TOLE
hole = supalib.create_cyl( radius=PIPE_RAD , size_z = OUTSIZE, place=(0, PIPE_RAD + 1.0, -OUTSIZE/2.0) )
outer_hole = supalib.create_cyl( radius=PIPE_RAD + 5.0 , size_z = OUTSIZE, place=(0, PIPE_RAD + 5.0, -OUTSIZE/2.0) )
tr1 = supalib.create_triangle( SIZE_DROP, BASE_THICK, BASE_WIDE/2.0 )
tr2 = supalib.create_triangle( SIZE_DROP, BASE_THICK, BASE_WIDE/2.0,rotate=(1,0,0,180),place=(0,+BASE_THICK,0) )
drop = supalib.create_union( (tr1, tr2) )
drop = supalib.relocate( drop, rotate=(0,1,0,90) )
drop = supalib.create_cut( drop, hole )
drop = supalib.relocate( drop, rotate=(1,0,0,30) )
drop = supalib.relocate( drop, place=(0,0,SIZE_CONST + SIZE_DROP ) )
drop = supalib.relocate( drop, place=(0,-9,-1) )
base = supalib.create_box( (BASE_WIDE,BASE_THICK,SIZE_CONST + 4.0), place = ( -BASE_WIDE/2.0,0.0,0.0) )
base = supalib.create_intersection( ( base, outer_hole ) )
base = supalib.create_union( ( base, drop ) )
base = supalib.create_cut( base, hole )
base.Label="house_drain"
holder_rad = PIPE_RAD + 0.5 + TOLE
HOLDER_SIZE=5.0
outer_hole2 = supalib.create_cyl( radius=holder_rad , size_z = HOLDER_SIZE, place=(0, 0, 0) )
outer_hole3 = supalib.create_cyl( radius=holder_rad + 1.0 , size_z = HOLDER_SIZE, place=(0, 0, 0) )
outer_holder = supalib.create_cut( outer_hole3, outer_hole2 )
outer_holder = supalib.relocate( outer_holder, place=(0,+holder_rad,0) )
outer_holder.Label="house_holder"
thight = supalib.create_box( (BASE_WIDE,BASE_THICK,10), place = ( -BASE_WIDE/2.0,0.0,0.0) )
thight = supalib.create_cut( thight, hole )
thight = supalib.create_intersection( ( thight, outer_hole ) )
thight = supalib.relocate( thight, rotate=(0,0,1,180), place=(0,2*holder_rad,0) )
thight.Label = "house_wedge"
parts = [ thight, outer_holder, base ]
for p in parts:
supalib.creta_mesh_from( p, save_to="/home/pauli/", version=3 )
#hole_app = supalib.create_box( (0.5,0.25 + TOLE,5.0) , place=(offset - 0.25, 5.0 - rad_size/2.0 - 2*TOLE, 2.5 ) )
#offset += rad_size + RADS[loop+1] + 2.0
#holes.append(hole)
#hole_adds.append( hole_app )
#holes = supalib.create_union( holes )
#hole_adds = supalib.create_union( hole_adds )
#box_bound = supalib.create_box( (offset, 10.0, 10 ) )
#box_bound = supalib.create_fillet( box_bound )
#box_bound = supalib.create_cut( box_bound, holes )
#box_bound = supalib.create_union( (box_bound,hole_adds) )
#box_bound.Label="Tool_holder"
#mesh = supalib.creta_mesh_from( box_bound, save_to="/home/pauli/", version=1 )
supalib.finish()
| 35.851351 | 119 | 0.710516 | 449 | 2,653 | 3.973274 | 0.167038 | 0.167601 | 0.013453 | 0.049327 | 0.33296 | 0.275785 | 0.20852 | 0.106502 | 0.106502 | 0.053812 | 0 | 0.058185 | 0.131926 | 2,653 | 73 | 120 | 36.342466 | 0.716457 | 0.223897 | 0 | 0 | 0 | 0 | 0.022483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.025 | 0 | 0.025 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da3d5161d87de56a9bc36edca5bba5b60b08bd39 | 6,556 | py | Python | dace/libraries/standard/nodes/gearbox.py | thobauma/dace | 668e4c49e476437e1ea3b272e9dbefca2b92d2e7 | [
"BSD-3-Clause"
] | null | null | null | dace/libraries/standard/nodes/gearbox.py | thobauma/dace | 668e4c49e476437e1ea3b272e9dbefca2b92d2e7 | [
"BSD-3-Clause"
] | null | null | null | dace/libraries/standard/nodes/gearbox.py | thobauma/dace | 668e4c49e476437e1ea3b272e9dbefca2b92d2e7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import collections
import copy
import dace
@dace.library.expansion
class ExpandGearbox(dace.transformation.ExpandTransformation):
environments = []
@staticmethod
def expansion(node: "Gearbox", parent_state: dace.SDFGState,
parent_sdfg: dace.SDFG):
(in_edge, in_desc, out_edge, out_desc, is_pack,
gear_factor) = node.validate(parent_sdfg, parent_state)
if is_pack:
vtype = out_desc.dtype
else:
vtype = in_desc.dtype
sdfg = dace.SDFG("gearbox")
in_desc_inner = copy.deepcopy(in_desc)
in_desc_inner.transient = False
sdfg.add_datadesc(in_edge.dst_conn, in_desc_inner)
out_desc_inner = copy.deepcopy(out_desc)
out_desc_inner.transient = False
sdfg.add_datadesc(out_edge.src_conn, out_desc_inner)
sdfg.add_array("gearbox_buffer", (1, ),
vtype,
storage=in_desc.storage,
transient=True)
state = sdfg.add_state("gearbox")
buffer_read = state.add_read("gearbox_buffer")
buffer_write = state.add_write("gearbox_buffer")
input_read = state.add_read(in_edge.dst_conn)
output_write = state.add_write(out_edge.src_conn)
iteration_space = {
"_gearbox_i": f"0:{node.size}",
"_gearbox_w": f"0:{gear_factor}"
}
entry, exit = state.add_map("gearbox",
iteration_space,
schedule=node.schedule)
tasklet = state.add_tasklet(
"gearbox", {
"val_in",
"buffer_in"
}, {
"val_out",
"buffer_out"
}, f"""\
wide = buffer_in
wide[_gearbox_w] = val_in
if _gearbox_w == {gear_factor} - 1:
val_out = wide
buffer_out = wide""" if is_pack else """\
wide = val_in if _gearbox_w == 0 else buffer_in
val_out = wide[_gearbox_w]
buffer_out = wide""")
state.add_memlet_path(input_read,
entry,
tasklet,
dst_conn="val_in",
memlet=dace.Memlet(f"{in_edge.dst_conn}[0]",
dynamic=not is_pack))
state.add_memlet_path(buffer_read,
entry,
tasklet,
dst_conn="buffer_in",
memlet=dace.Memlet(f"gearbox_buffer[0]"))
state.add_memlet_path(tasklet,
exit,
output_write,
src_conn="val_out",
memlet=dace.Memlet(f"{out_edge.src_conn}[0]",
dynamic=is_pack))
state.add_memlet_path(tasklet,
exit,
buffer_write,
src_conn="buffer_out",
memlet=dace.Memlet(f"gearbox_buffer[0]"))
return sdfg
@dace.library.node
class Gearbox(dace.sdfg.nodes.LibraryNode):
"""
Provides a library node that converts from a stream of type
vector(vector(dtype, w0)) to a stream of type vector(dtype, w1), or vice
versa. This is useful for achieving efficient memory reads on Xilinx FPGAs,
where modules accessing memories should always read or write 512-bit
vectors, which then potentially need to be narrowed down to the vector width
of the computational kernel.
The node expects to have a single input and a single output, where one end
is of type vector(vector(dtype, w0)), and the other is of type
vector(dtype, w1).
"""
implementations = {
"pure": ExpandGearbox,
}
default_implementation = "pure"
# Properties
size = dace.properties.SymbolicProperty(
desc="Number of wide vectors to convert to/from narrow vectors.",
default=0)
def __init__(self, size, name=None, schedule=None, **kwargs):
"""
:param size: Number of wide vectors to convert to/from narrow vectors.
For example, if converting n/16 reads (vector size 16) from
memory into n/4 elements (vector size 4), this parameter
should be set to n/16.
"""
super().__init__(name=name or "gearbox",
schedule=schedule or dace.ScheduleType.FPGA_Device,
**kwargs)
self.size = size
if schedule is not None:
self.schedule = schedule
def validate(self, sdfg: dace.SDFG, state: dace.SDFGState):
try:
size = dace.symbolic.evaluate(self.size, sdfg.constants)
if size < 1:
raise ValueError(f"Invalid size parameter for {self}: {size}")
except TypeError:
pass # Not a constant
in_edge = state.in_edges(self)
if len(in_edge) != 1:
raise ValueError(
f"Expected only one input edge, found {len(in_edge)} edges.")
out_edge = state.out_edges(self)
if len(out_edge) != 1:
raise ValueError(
f"Expected only one input edge, found {len(out_edge)} edges.")
in_edge = in_edge[0]
in_desc = sdfg.arrays[in_edge.data.data]
if not isinstance(in_desc, dace.data.Stream):
raise TypeError(
f"Expected input to be a stream, got {type(in_desc)}.")
out_edge = out_edge[0]
out_desc = sdfg.arrays[out_edge.data.data]
if not isinstance(out_desc, dace.data.Stream):
raise TypeError(
f"Expected input to be a stream, got {type(out_desc)}.")
# The type of one side must be a vector of the other
if (isinstance(in_desc.dtype, dace.vector)
and in_desc.dtype.base_type == out_desc.dtype):
is_pack = False # Is unpack
gear_factor = in_desc.dtype.veclen
elif (isinstance(out_desc.dtype, dace.vector)
and out_desc.dtype.base_type == in_desc.dtype):
is_pack = True
gear_factor = out_desc.dtype.veclen
else:
raise TypeError(
f"Cannot gearbox between {in_desc.dtype} and {out_desc.dtype}.")
return (in_edge, in_desc, out_edge, out_desc, is_pack, gear_factor)
| 39.257485 | 80 | 0.558115 | 783 | 6,556 | 4.466156 | 0.240102 | 0.027452 | 0.020589 | 0.020589 | 0.278524 | 0.212182 | 0.157278 | 0.117815 | 0.117815 | 0.117815 | 0 | 0.008975 | 0.354179 | 6,556 | 166 | 81 | 39.493976 | 0.816958 | 0.145363 | 0 | 0.130769 | 0 | 0 | 0.157703 | 0.007813 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023077 | false | 0.007692 | 0.023077 | 0 | 0.107692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da3fd113ae5463775113a2aa795b9fc22645ae0c | 5,662 | py | Python | reprlearn/data/samplers/kshot_sampler.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | reprlearn/data/samplers/kshot_sampler.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | reprlearn/data/samplers/kshot_sampler.py | cocoaaa/ReprLearn | 58dc682aa62dbd59201ccc55b9b26480ff3d6773 | [
"MIT"
] | null | null | null | from reprlearn.data.datasets.base import ImageDataset
from collections import defaultdict
from typing import Iterable, Optional, Callable, List, Dict, Tuple
import numpy as np
# ===============
# Returns a list of datapoints from the dataset so that
# the list contains the same number of datapoints per class (if possible)
# ===============
class KShotSampler():
def __init__(self, shuffle:bool=True) -> None:
"""Given the dataset of labelled images, return the indices for sampling
the same number of datapts per class for each class the dataset's targets.
If shuffle, we shuffle the indices of the dataset before collecting
the datapoints.
Args
----
dset : ImageDataset
k_shot: int
number of images per class
shuffle : bool (default True)
"""
pass
def get_sample_inds_per_class(self,
dset: ImageDataset,
num_per_class: int,
shuffle: bool=True,
verify: bool=True,
):
"""Given the dataset of labelled images, return the indices for sampling
`num_per_class` number of images per class in the dataset's classes.
If shuffle, we shuffle the indices of the dset for each call to the iterator
Returns:
(List[int]) : indices to the datapts to sample for this iteration
"""
unique_classes = np.unique(dset.targets)
n_ways = len(unique_classes)
if num_per_class * n_ways > len(dset.targets):
raise ValueError
inds = list(range(len(dset)))
if shuffle:
np.random.shuffle(inds) # shuffle in-place
inds_per_class = {c:[] for c in unique_classes}
done_for_class = {c:False for c in unique_classes}
for i in inds:
c = dset.targets[i]
if not done_for_class[c]: # len(inds_per_class[c]) < num_per_class:
inds_per_class[c].append(i)
if len(inds_per_class[c]) == num_per_class:
done_for_class[c] = True # done collecting dpts for this class
if np.alltrue(np.fromiter(done_for_class.values(), dtype=bool)):
break
print("Done collecting datapts for each class...")
if verify:
for c in np.unique(dset.targets):
inds = inds_per_class[c]
if len(inds) != num_per_class:
raise ValueError
return inds_per_class
def sample(self,
dset: ImageDataset,
num_per_class: int,
shuffle: bool=True,
collate_fn: Optional[Callable]=None
) -> List[Tuple]: # [(x,y),...] #List[int]:
"""Given the dataset of labelled images, return the collection/list
of datapoints from the dataset; the collection of datapoints (aka. sample)
contains equal number of datapoints per class (with best effort)
Args
----
dset : ImageDataset
source dataset to sample datapoints from
num_per_class : int
k in k-shot
shuffle : bool
if shuffle, shuffle the indices of the dataset before collecting
the datapoints
collate_fn : Callable
Similar to the collating function in torch's DataLoader argument;
It take a list of datapoints and apply it to turn the list into a
desired form of 'batch'
Returns:
(Batch or List[datapts]) : A collection of datapts sampled
"""
inds_per_class = self.get_sample_inds_per_class(dset, num_per_class, shuffle)
sample_inds = np.stack(
[np.fromiter(ilist, dtype=int) for ilist in inds_per_class.values()]
).flatten()
# we don't want to load imgs for one-class all in a row,
# and then next class's images in a row, etc
np.random.shuffle(sample_inds)
sample = [dset[i] for i in sample_inds] # apply current dataset's image transform if specified
if collate_fn is not None:
sample = collate_fn(sample)
return sample
def get_support_and_query(
self,
dset: ImageDataset,
num_per_class: int,
shuffle: bool=True,
collate_fn: Optional[Callable] = None
) -> Dict:
inds_per_class = self.get_sample_inds_per_class(dset, 2*num_per_class, shuffle)
n_way = len(np.unique(dset.targets))
support_inds = []
query_inds = []
for clabel, cinds in inds_per_class.items():
cids = np.fromiter(cinds, dtype=int)
support_inds.append(cids[:num_per_class])
query_inds.append(cinds[num_per_class:])
support_inds = np.array(support_inds)
query_inds = np.array(query_inds)
# we don't want to load imgs for one-class all in a row,
# and then next class's images in a row, etc
np.random.shuffle(support_inds)
support_sample = [dset[i] for i in support_inds] # apply current dataset's image transform if specified
if collate_fn is not None:
support_sample = collate_fn(support_sample)
# Similarly for the query sample
np.random.shuffle(query_inds)
query_sample = [dset[i] for i in query_inds] # apply current dataset's image transform if specified
if collate_fn is not None:
query_sample = collate_fn(query_sample)
return {'support': support_sample,
'query': query_sample}
| 39.048276 | 112 | 0.598905 | 733 | 5,662 | 4.472033 | 0.210096 | 0.075656 | 0.04759 | 0.019829 | 0.397804 | 0.350519 | 0.316657 | 0.316657 | 0.274558 | 0.274558 | 0 | 0.000261 | 0.32409 | 5,662 | 144 | 113 | 39.319444 | 0.856284 | 0.347051 | 0 | 0.202532 | 0 | 0 | 0.01575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0.012658 | 0.050633 | 0 | 0.151899 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da40b32e1d6bf126d545d746d9c0416f4eb38e0a | 7,117 | py | Python | phaseprep/workflows/preprocess_phase_wf.py | ostanley/phaseprep | 6e721ea43755f10eb8569b1f4d4461efa3d85a1a | [
"Apache-2.0"
] | 1 | 2019-10-11T17:04:25.000Z | 2019-10-11T17:04:25.000Z | phaseprep/workflows/preprocess_phase_wf.py | ostanley/phaseprep | 6e721ea43755f10eb8569b1f4d4461efa3d85a1a | [
"Apache-2.0"
] | 2 | 2019-10-16T13:13:52.000Z | 2019-12-10T19:38:39.000Z | phaseprep/workflows/preprocess_phase_wf.py | ostanley/phaseprep | 6e721ea43755f10eb8569b1f4d4461efa3d85a1a | [
"Apache-2.0"
] | 2 | 2019-11-18T19:21:44.000Z | 2021-10-19T18:01:03.000Z | import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.afni as afni
import phaseprep.interfaces as pp
import nipype.interfaces.utility as ul
def findscalingarg(in_file, bit_depth=12):
import nibabel as nb
import numpy as np
img = nb.load(in_file)
if img.dataobj.slope != 1.0:
print('Removing rescale before conversion')
mul = np.pi/(2**(bit_depth-1)*img.dataobj.slope)
sub = np.pi*((img.dataobj.slope+1)/(2**(bit_depth-1)*img.dataobj.slope))
return '-mul %s -sub %s' % (mul, sub)
def create_preprocess_phase_wf():
"""Create's phase preprocessing workflow with the following steps:
1) Convert data to float
2) Determine scaling required for radians
3) Apply radian scaling
4) Convert to real and imaginary
5) Apply magnitude motion correction parameters
6) Correct geometry changes (AFNI issue)
7) Convert back to phase
8) Unwrap and detrend data
9) Mask data using magnitude mask
10) Calculate noise from data
"""
preprocphase = pe.Workflow(name="preprocphase")
preprocphase.config['execution']['remove_unnecessary_outputs'] = False
# define inputs
inputspec = pe.Node(ul.IdentityInterface(fields=['input_phase', # raw phase data
'input_mag', # raw mag data
'motion_par', # afni transform concatenated from magnitude data
'mask_file', # bet mask from magnitude data
'rest', # volumes of rest in block design
'task', # volumes of task in block design
]),
name='inputspec')
# 1) Convert data to float
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float', op_string='', suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
# 2) Determine radian scaling required
findscaling = pe.MapNode(interface=ul.Function(input_names=['in_file'],
output_names=['scaling_arg'],
function=findscalingarg),
name='findscaling', iterfield=['in_file'])
# 3) Apply radian scaling
convert2rad = pe.MapNode(interface=fsl.maths.MathsCommand(),
name='convert2rad', iterfield=['in_file', 'args'])
# 4) Convert to real and imaginary (2 step process)
# modified from fslcomplex to fslmaths in Sep 2020, bonus also preserves geometry info
convert2real = pe.MapNode(interface=fsl.maths.MultiImageMaths(op_string=' -cos -mul %s'), name='convert2real', iterfield=['in_file','operand_files'])
convert2imag = pe.MapNode(interface=fsl.maths.MultiImageMaths(op_string=' -sin -mul %s'), name='convert2imag', iterfield=['in_file','operand_files'])
# 5) Apply magnitude motion correction parameters
mocoreal = pe.MapNode(interface=afni.Allineate(), name='mocoreal',
iterfield=['in_file', 'in_matrix'])
mocoreal.inputs.outputtype = 'NIFTI_GZ'
mocoreal.inputs.out_file = 'mocophase.nii.gz'
mocoreal.inputs.num_threads = 2
mocoimag = mocoreal.clone('mocoimag')
# 6) Correct geometry changes (AFNI issue)
cpgeommocoreal = pe.MapNode(interface=fsl.CopyGeom(), name='cpgeommoco', iterfield=['dest_file', 'in_file'])
cpgeommocoimag = cpgeommocoreal.clone('cpgeommocoimag')
# 7) Convert back to phase custom interface to use atan2 and avoid sign ambiguity
convert2phase = pe.MapNode(interface=pp.Convert2Phase(), name='convert2phase', iterfield=['real_image','imaginary_image'])
# 8) Remove first volume, unwrap and detrend phase data
prepphase = pe.MapNode(interface=pp.PreprocessPhase(), name='prepphase', iterfield=['phase'])
# 9) Mask data using magnitude mask
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc')
# 10) Calculate noise from data
calcSNR = pe.MapNode(interface=pp.RestAverage(), name='calcSNR', iterfield=['func', 'rest', 'task'])
# outputspec
outputspec = pe.Node(ul.IdentityInterface(fields=['proc_phase', 'uw_phase', 'delta_phase','std_phase']),
name='outputspec')
preprocphase = pe.Workflow(name='preprocphase')
preprocphase.connect([(inputspec, img2float, [('input_phase', 'in_file')]), # 1
(inputspec, findscaling, [('input_phase', 'in_file')]), # 2
(findscaling, convert2rad, [('scaling_arg', 'args')]),
(img2float, convert2rad, [('out_file', 'in_file')]),
(convert2rad, convert2real, [('out_file', 'in_file')]),
(convert2rad, convert2imag, [('out_file', 'in_file')]),
(inputspec, convert2real, [('input_mag', 'operand_files')]),
(inputspec, convert2imag, [('input_mag', 'operand_files')]),
(inputspec, mocoreal, [('motion_par', 'in_matrix')]), # 5 real
(convert2real, mocoreal, [('out_file', 'in_file')]),
(mocoreal, cpgeommocoreal, [('out_file','dest_file')]), #6 real
(img2float, cpgeommocoreal, [('out_file', 'in_file')]),
(inputspec, mocoimag, [('motion_par', 'in_matrix')]), # 5 imag
(convert2imag, mocoimag, [('out_file', 'in_file')]),
(mocoimag, cpgeommocoimag, [('out_file','dest_file')]), # 6 imag
(img2float, cpgeommocoimag, [('out_file', 'in_file')]),
(cpgeommocoimag, convert2phase, [('out_file', 'imaginary_image')]), # 7
(cpgeommocoreal, convert2phase, [('out_file', 'real_image')]),
(convert2phase, prepphase, [('phase_image', 'phase')]), # 8
(prepphase, maskfunc, [('detrended_phase', 'in_file')]), # 9
(inputspec, maskfunc, [('mask_file', 'in_file2')]),
(maskfunc, outputspec, [('out_file', 'proc_phase')]),
(prepphase, outputspec, [('uw_phase', 'uw_phase')]),
(prepphase, outputspec, [('delta_phase', 'delta_phase')]),
(inputspec, calcSNR, [('rest', 'rest'), # 10
('task', 'task')]),
(prepphase, calcSNR, [('detrended_phase', 'func')]),
(calcSNR, outputspec, [('noise', 'std_phase')])
])
return preprocphase
if __name__ == "__main__":
workflow = create_preprocess_phase_wf()
| 53.511278 | 153 | 0.559646 | 701 | 7,117 | 5.53067 | 0.28388 | 0.032499 | 0.05107 | 0.023472 | 0.269796 | 0.128966 | 0.038174 | 0.025277 | 0 | 0 | 0 | 0.015925 | 0.311789 | 7,117 | 132 | 154 | 53.916667 | 0.775623 | 0.159618 | 0 | 0.023256 | 0 | 0 | 0.179292 | 0.004406 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.081395 | 0 | 0.127907 | 0.011628 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da426e6fadffb074142a9d08e0b642ab357e46fc | 2,514 | py | Python | zorg/buildbot/builders/AnnotatedBuilder.py | DalavanCloud/zorg | d55f03740e589d504dbfe2d5dc9fbc5d551f31fb | [
"Apache-2.0"
] | 1 | 2019-02-10T03:05:05.000Z | 2019-02-10T03:05:05.000Z | zorg/buildbot/builders/AnnotatedBuilder.py | DalavanCloud/llvm-zorg | 14d347a312d5a19bec421f553a3c1cbe1735b273 | [
"Apache-2.0"
] | null | null | null | zorg/buildbot/builders/AnnotatedBuilder.py | DalavanCloud/llvm-zorg | 14d347a312d5a19bec421f553a3c1cbe1735b273 | [
"Apache-2.0"
] | null | null | null | import os
import buildbot
from buildbot.process.properties import WithProperties
from buildbot.steps.shell import SetProperty, ShellCommand
from buildbot.steps.source import SVN
from zorg.buildbot.commands.AnnotatedCommand import AnnotatedCommand
from zorg.buildbot.process.factory import LLVMBuildFactory
def getAnnotatedBuildFactory(
script,
clean=False,
depends_on_projects=None,
env=None,
timeout=1200):
"""
Returns a new build factory that uses AnnotatedCommand, which
allows the build to be run by version-controlled scripts that do
not require a buildmaster restart to update.
"""
f = LLVMBuildFactory(
depends_on_projects=depends_on_projects,
llvm_srcdir='llvm.src')
if clean:
f.addStep(SetProperty(property='clean', command='echo 1'))
# We normally use the clean property to indicate that we want a
# clean build, but AnnotatedCommand uses the clobber property
# instead. Therefore, set clobber if clean is set to a truthy
# value. This will cause AnnotatedCommand to set
# BUILDBOT_CLOBBER=1 in the environment, which is how we
# communicate to the script that we need a clean build.
f.addStep(SetProperty(
property='clobber',
command='echo 1',
doStepIf=lambda step: step.build.getProperty('clean', False)))
merged_env = {
'TERM': 'dumb' # Be cautious and disable color output from all tools.
}
if env is not None:
# Overwrite pre-set items with the given ones, so user can set
# anything.
merged_env.update(env)
scripts_dir = "annotated"
f.addStep(SVN(name='update-annotate-scripts',
mode='update',
svnurl='http://llvm.org/svn/llvm-project/zorg/trunk/'
'zorg/buildbot/builders/annotated',
workdir=scripts_dir,
alwaysUseLatest=True))
# Explicitly use '/' as separator, because it works on *nix and Windows.
script_path = "../%s/%s" % (scripts_dir, script)
f.addStep(AnnotatedCommand(name="annotate",
description="annotate",
timeout=timeout,
haltOnFailure=True,
command=WithProperties(
"python %(script)s --jobs=%(jobs:-)s",
script=lambda _: script_path),
env=merged_env))
return f
| 36.970588 | 78 | 0.622514 | 285 | 2,514 | 5.431579 | 0.477193 | 0.020672 | 0.032946 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00395 | 0.295147 | 2,514 | 67 | 79 | 37.522388 | 0.869639 | 0.280827 | 0 | 0 | 0 | 0 | 0.122817 | 0.030986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.159091 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4804a69488cffc401e2be47069232bee7d172a | 4,828 | py | Python | train.py | yazar1993/TextBoxes-mxnet | 89fbf4151473ab4575a032871683e76978deec0a | [
"MIT"
] | 1 | 2019-02-04T19:03:27.000Z | 2019-02-04T19:03:27.000Z | train.py | yazar1993/TextBoxes-mxnet | 89fbf4151473ab4575a032871683e76978deec0a | [
"MIT"
] | null | null | null | train.py | yazar1993/TextBoxes-mxnet | 89fbf4151473ab4575a032871683e76978deec0a | [
"MIT"
] | null | null | null | import time
from matplotlib import pyplot as plt
import numpy as np
import mxnet as mx
from mxnet import autograd, gluon
import gluoncv as gcv
from gluoncv.utils import download, viz
from model import model_zoo
import argparse
def get_dataloader(net, train_dataset, data_shape, batch_size, num_workers):
from gluoncv.data.batchify import Tuple, Stack, Pad
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
width, height = data_shape, data_shape
with autograd.train_mode():
_, _, anchors = net(mx.nd.zeros((1, 3, height, width)))
batchify_fn = Tuple(Stack(), Stack(), Stack()) # stack image, cls_targets, box_targets
train_loader = gluon.data.DataLoader(
train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers)
return train_loader
parser = argparse.ArgumentParser()
parser.add_argument('--images_root',type=str,help='root folder of images')
parser.add_argument('--LSTpath', type=str, help= 'path to LST file')
parser.add_argument('--batch_size', default = 16, type=int)
parser.add_argument('--num_epochs', default = 100, type=int)
parser.add_argument('--lr', type=float, default = 0.001, help='learning rate')
parser.add_argument('--wd', type=float, default = 0.0005)
parser.add_argument('--momentum',type=float,default = 0.9)
parser.add_argument('--netName', type=str, help='name of network to train')
parser.add_argument('--gpu_ind', type=str, help='comma seperated gpu indicies', default = '0')
parser.add_argument('--finetune_model',type=str, help='path to model to finetune from', default = '')
args = parser.parse_args()
images_root = args.images_root
LSTpath = args.LSTpath
classes = ['text']
batch_size = args.batch_size
num_epochs = args.num_epochs
lr = args.lr
wd = args.wd
momentum = args.momentum
netName = args.netName
gpu_ind=args.gpu_ind
path_to_model = args.finetune_model
# load dataset from Lst file
dataset = gcv.data.LstDetection(LSTpath, root=images_root)
print(dataset)
image= dataset[0][0]
label = dataset[0][1]
print('label:', label)
# display image and label
ax = viz.plot_bbox(image, bboxes=label[:, :4], labels=label[:, 4:5], class_names=classes)
plt.savefig('labeled_image.jpg')
#initalize model
net, input_size = model_zoo.get_model(netName, pretrained=False, classes=classes)
if finetune_model == '':
net.initialize()
net.reset_class(classes)
else:
net.load_parameters(path_to_model)
net.reset_class(classes)
print(net)
train_data = get_dataloader(net, dataset, input_size, batch_size, 0)
#############################################################################################
# Try use GPU for training
try:
gpu_ind = gpu_ind.split(',')
ctx = []
for cur_gpu in gpu_ind:
cur_gpu = int(cur_gpu)
a = mx.nd.zeros((1,), ctx=mx.gpu(cur_gpu))
ctx.append(mx.gpu(cur_gpu))
print('gpu mode is used')
except:
print('cpu mode is used')
ctx = [mx.cpu()]
#############################################################################################
# Start training
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(
net.collect_params(), 'sgd',
{'learning_rate': lr, 'wd': wd, 'momentum': momentum})
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
for epoch in range(0, num_epochs):
ce_metric.reset()
smoothl1_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True, static_shape=True)
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
cls_pred, box_pred, _ = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
sum_loss, cls_loss, box_loss = mbox_loss(
cls_preds, box_preds, cls_targets, box_targets)
autograd.backward(sum_loss)
trainer.step(1)
ce_metric.update(0, [l * batch_size for l in cls_loss])
smoothl1_metric.update(0, [l * batch_size for l in box_loss])
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
if i % 20 == 0:
print('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(
epoch, i, batch_size/(time.time()-btic), name1, loss1, name2, loss2))
btic = time.time()
net.save_parameters(netName + '_icdar2013.params')
| 37.426357 | 101 | 0.665493 | 673 | 4,828 | 4.579495 | 0.283804 | 0.029202 | 0.055159 | 0.016548 | 0.095717 | 0.069111 | 0.040883 | 0.018819 | 0.018819 | 0 | 0 | 0.015178 | 0.167564 | 4,828 | 128 | 102 | 37.71875 | 0.75168 | 0.029619 | 0 | 0.037383 | 0 | 0 | 0.095948 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009346 | false | 0 | 0.102804 | 0 | 0.121495 | 0.056075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da48925dd8d85e25b1591c7ad7324c1b91372e21 | 484 | py | Python | aws/build_saint_features.py | fabien-vavrand/kaggle-riiid | 3302955980e0d4bb2dbc72bcd369000b0724f1e7 | [
"MIT"
] | null | null | null | aws/build_saint_features.py | fabien-vavrand/kaggle-riiid | 3302955980e0d4bb2dbc72bcd369000b0724f1e7 | [
"MIT"
] | null | null | null | aws/build_saint_features.py | fabien-vavrand/kaggle-riiid | 3302955980e0d4bb2dbc72bcd369000b0724f1e7 | [
"MIT"
] | null | null | null | from doppel import DoppelProject
from riiid.utils import configure_console_logging
from riiid.config import SRC_PATH
from riiid.aws.config import CONTEXT, PACKAGES
configure_console_logging()
project = DoppelProject(
name='riiid-saint-features',
path=SRC_PATH,
entry_point='-m riiid.aws.build_saint_features',
packages=PACKAGES,
python='3.7.6',
n_instances=1,
min_memory=128,
env_vars={'PYTHONHASHSEED': '1'},
context=CONTEXT
)
project.start()
| 22 | 52 | 0.743802 | 64 | 484 | 5.4375 | 0.578125 | 0.077586 | 0.132184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019512 | 0.152893 | 484 | 21 | 53 | 23.047619 | 0.829268 | 0 | 0 | 0 | 0 | 0 | 0.150826 | 0.061983 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4a7e68c0832aca421b9ec0a6a9d00a1f584040 | 1,933 | py | Python | src/sentry/incidents/endpoints/organization_alert_rule_trigger_details.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/incidents/endpoints/organization_alert_rule_trigger_details.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/incidents/endpoints/organization_alert_rule_trigger_details.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from rest_framework import status
from rest_framework.response import Response
from sentry.api.serializers import serialize
from sentry.api.serializers.models.alert_rule_trigger import DetailedAlertRuleTriggerSerializer
from sentry.incidents.endpoints.bases import OrganizationAlertRuleTriggerEndpoint
from sentry.incidents.endpoints.serializers import AlertRuleTriggerSerializer
from sentry.incidents.logic import AlreadyDeletedError, delete_alert_rule_trigger
class OrganizationAlertRuleTriggerDetailsEndpoint(OrganizationAlertRuleTriggerEndpoint):
def get(self, request, organization, alert_rule, alert_rule_trigger):
"""
Fetch an alert rule trigger.
``````````````````
:auth: required
"""
data = serialize(alert_rule_trigger, request.user, DetailedAlertRuleTriggerSerializer())
return Response(data)
def put(self, request, organization, alert_rule, alert_rule_trigger):
serializer = AlertRuleTriggerSerializer(
context={
"organization": organization,
"alert_rule": alert_rule,
"access": request.access,
},
instance=alert_rule_trigger,
data=request.data,
partial=True,
)
if serializer.is_valid():
trigger = serializer.save()
return Response(serialize(trigger, request.user), status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, organization, alert_rule, alert_rule_trigger):
try:
delete_alert_rule_trigger(alert_rule_trigger)
return Response(status=status.HTTP_204_NO_CONTENT)
except AlreadyDeletedError:
return Response(
"This trigger has already been deleted", status=status.HTTP_400_BAD_REQUEST
)
| 39.44898 | 96 | 0.702535 | 188 | 1,933 | 6.989362 | 0.351064 | 0.10274 | 0.121766 | 0.079148 | 0.17656 | 0.153729 | 0.109589 | 0.109589 | 0 | 0 | 0 | 0.008 | 0.224004 | 1,933 | 48 | 97 | 40.270833 | 0.868 | 0.032592 | 0 | 0 | 0 | 0 | 0.035519 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.228571 | 0 | 0.485714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4c1af35b78bb185c69f2e2ce2c1d8ceee1a22d | 667 | py | Python | chapter03/knock25.py | m-star18/NLP100 | e199814f81943f7fb693fd5fe87d6df21da07f5b | [
"MIT"
] | 1 | 2020-07-15T17:21:13.000Z | 2020-07-15T17:21:13.000Z | chapter03/knock25.py | m-star18/NLP100 | e199814f81943f7fb693fd5fe87d6df21da07f5b | [
"MIT"
] | 1 | 2021-05-04T01:04:57.000Z | 2021-05-04T01:05:32.000Z | chapter03/knock25.py | m-star18/NLP100 | e199814f81943f7fb693fd5fe87d6df21da07f5b | [
"MIT"
] | null | null | null | import re
import pandas as pd
df = pd.read_json('jawiki-country.json', lines=True)
text = df.query('title=="イギリス"')['text'].values[0].split('\n')
memo, flag = [], False
template = '基礎情報'
check = re.compile('\|(.+?)\s=\s(.+)')
check1 = re.compile('\{\{' + template)
check2 = re.compile('\}\}')
check3 = re.compile('\|')
check4 = re.compile('<ref(\s|>).+?(</ref>|$)')
for t in text:
if flag:
if check2.match(t):
break
if check3.match(t):
memo.append(check4.sub('', t.strip()))
if check1.match(t):
flag = True
ans = {}
for tmp in [check.match(m) for m in memo]:
ans[tmp.group(1)] = tmp.group(2)
print(ans)
| 23.821429 | 62 | 0.55922 | 98 | 667 | 3.795918 | 0.5 | 0.120968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020677 | 0.202399 | 667 | 27 | 63 | 24.703704 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0.136432 | 0.034483 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4c202f2a4d50150a0f027ce75d19d6e0f3d28d | 316 | py | Python | constants.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 19 | 2018-06-08T05:33:47.000Z | 2021-04-26T16:19:32.000Z | constants.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | null | null | null | constants.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 13 | 2018-09-24T21:52:06.000Z | 2021-02-26T10:40:25.000Z | # Datasets
TRAIN = 'trn'
VAL = 'val'
TEST = 'tst'
FULL = 'full'
# File extensions
JPG = '.jpg'
TIF = '.tif'
PNG = '.png'
GIF = '.gif'
BCOLZ = '.bc'
CSV = '.csv'
# PyTorch
MODEL_EXT = '.mdl'
WEIGHTS_EXT = '.th'
OPTIM_EXT = '.th'
# Data Aug
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225] | 14.363636 | 37 | 0.591772 | 51 | 316 | 3.568627 | 0.686275 | 0.054945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094118 | 0.193038 | 316 | 22 | 38 | 14.363636 | 0.619608 | 0.129747 | 0 | 0 | 0 | 0 | 0.169742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4d25bd823544d3dde8ed32e826fbbb55bcbd80 | 1,226 | py | Python | a10sdk/core/maximum/maximum_paths.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/maximum/maximum_paths.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/maximum/maximum_paths.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class MaximumPaths(A10BaseClass):
"""Class Description::
Set maximum number of route multipaths installed into FIB.
Class maximum-paths supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param path: {"description": "supported multipath numbers", "format": "number", "default": 4, "optional": true, "maximum": 64, "minimum": 1, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/maximum-paths`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "maximum-paths"
self.a10_url="/axapi/v3/maximum-paths"
self.DeviceProxy = ""
self.path = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 32.263158 | 168 | 0.638662 | 145 | 1,226 | 5.344828 | 0.565517 | 0.061935 | 0.036129 | 0.049032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023037 | 0.221044 | 1,226 | 37 | 169 | 33.135135 | 0.788482 | 0.601958 | 0 | 0 | 0 | 0 | 0.084309 | 0.053864 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4d50c0cd6f0dd5e191b086879be35c23707ff8 | 331 | py | Python | ocun.py | jpcyrino/chunker_dm | 1afde2400b81d0fbc351dcb4658546ef018d2640 | [
"MIT"
] | 1 | 2022-02-23T12:33:01.000Z | 2022-02-23T12:33:01.000Z | ocun.py | jpcyrino/chunker_dm | 1afde2400b81d0fbc351dcb4658546ef018d2640 | [
"MIT"
] | null | null | null | ocun.py | jpcyrino/chunker_dm | 1afde2400b81d0fbc351dcb4658546ef018d2640 | [
"MIT"
] | null | null | null | import sys
filename = sys.argv[1]
fileout = sys.argv[2]
with open(filename, encoding="utf-8", mode="r") as file:
lines = file.read().split("\n")
data_lines = [lines[i] for i in range(0,len(lines),3)]
print(data_lines)
with open(fileout, encoding="utf-8", mode="w") as file:
for line in data_lines:
file.write(line + '\n')
| 20.6875 | 56 | 0.667674 | 59 | 331 | 3.694915 | 0.542373 | 0.123853 | 0.110092 | 0.146789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021127 | 0.141994 | 331 | 15 | 57 | 22.066667 | 0.746479 | 0 | 0 | 0 | 0 | 0 | 0.048485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4f18d031bec1129d069479e75d9c035f860d1d | 2,412 | py | Python | galaxy/main/urls.py | changelox/galaxy | fc8e11b36de0b78e55c13c05ffc3a3fcaf8b39dc | [
"Apache-2.0"
] | null | null | null | galaxy/main/urls.py | changelox/galaxy | fc8e11b36de0b78e55c13c05ffc3a3fcaf8b39dc | [
"Apache-2.0"
] | null | null | null | galaxy/main/urls.py | changelox/galaxy | fc8e11b36de0b78e55c13c05ffc3a3fcaf8b39dc | [
"Apache-2.0"
] | null | null | null | # (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.conf.urls import url
from django.conf import settings
from django.views.decorators.cache import never_cache
from django.contrib.staticfiles.views import serve as serve_staticfiles
from django.views.static import serve as serve_static
from galaxy.main import views
urlpatterns = [
# Non-secure URLs
url(r'^$', views.home, name='home'),
url(r'^explore$', views.explore, name='explore'),
url(r'^intro$', views.intro, name='intro'),
url(r'^accounts/landing[/]?$', views.accounts_landing,
name='accounts-landing'),
url(r'^list$', views.list_category, name='list-category'),
url(r'^detail$', views.detail_category, name='detail-category'),
url(r'^roleadd$', views.role_add_view, name='role-add-category'),
url(r'^imports$', views.import_status_view, name='import-status'),
url(r'^stars$', views.stars_list_view, name='stars-list'),
# Logged in/secured URLs
url(r'^accounts/connect/$', views.accounts_connect),
url(r'^accounts/connect/success/$', views.accounts_connect_success,
name='accounts-connect-success'),
url(r'^accounts/profile/$', views.accounts_profile,
name='accounts-profile'),
url(r'^authors/$', views.NamespaceListView.as_view(),
name='namespace-list'),
url(r'^([\w\-._+]+)/$', views.RoleListView.as_view(), name='role-list'),
url(r'^([\w\-._+]+)/([\w\-._+]+)/$',
views.RoleDetailView.as_view(), name='role-detail'),
]
# FIX
if settings.DEBUG:
urlpatterns += [
url(r'^static/(?P<path>.*)$',
never_cache(serve_staticfiles))
]
else:
urlpatterns += [
url(r'^static/(?P<path>.*)$', serve_static,
kwargs={'document_root': settings.STATIC_ROOT})
]
| 37.107692 | 76 | 0.681177 | 331 | 2,412 | 4.882175 | 0.380665 | 0.042079 | 0.029703 | 0.022277 | 0.032178 | 0.032178 | 0 | 0 | 0 | 0 | 0 | 0.004464 | 0.164179 | 2,412 | 64 | 77 | 37.6875 | 0.797123 | 0.285655 | 0 | 0.052632 | 0 | 0 | 0.250147 | 0.083969 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.184211 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da58277b5c2af60a518ecbd9a3ef1bdee746623d | 1,306 | py | Python | python3/ais_sdk/utils.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | python3/ais_sdk/utils.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | python3/ais_sdk/utils.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import base64
import urllib.request
import ais_sdk.ais as ais
_ENDPOINT = {
'image': {
'cn-north-1':'image.cn-north-1.myhuaweicloud.com',
'ap-southeast-1':'image.ap-southeast-1.myhuaweicloud.com'
},
'moderation': {
'cn-north-1':'moderation.cn-north-1.myhuaweicloud.com',
'ap-southeast-1':'moderation.ap-southeast-1.myhuaweicloud.com'
}
}
def encode_to_base64(filename):
"""
encoding file to base64 encoded stream text
:param filename:
:return:
"""
imgstr = ""
with open(filename, 'rb') as file:
imgstr = base64.b64encode(file.read())
return imgstr
def download_url_base64(url):
return base64.b64encode(urllib.request.urlopen(url).read())
def decode_to_wave_file(base64_encoded_str, filename):
'''
decode base64 stream to wave file
:param base64_encoded_str:
:return:
'''
wave_data = base64.b64decode(base64_encoded_str)
wf = open(filename, 'wb')
wf.write(wave_data)
wf.close()
def get_endpoint(type):
region_name = get_region()
return _ENDPOINT[type].get(region_name)
def get_region():
return os.environ.get(ais.AisService.REGION_MSG)
def init_global_env(region):
os.environ[ais.AisService.REGION_MSG] = region
| 24.641509 | 70 | 0.666156 | 171 | 1,306 | 4.929825 | 0.350877 | 0.033215 | 0.03796 | 0.030842 | 0.151839 | 0.085409 | 0.085409 | 0.085409 | 0 | 0 | 0 | 0.035373 | 0.199081 | 1,306 | 52 | 71 | 25.115385 | 0.770554 | 0.123277 | 0 | 0 | 0 | 0 | 0.201275 | 0.140255 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.121212 | 0.060606 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5863a5ec445793ea40d771aa319962f8ec9010 | 609 | py | Python | GUI/dialogs/propulsion_dialogs/propulsion_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | 7 | 2021-03-17T01:23:28.000Z | 2021-05-06T20:41:21.000Z | GUI/dialogs/propulsion_dialogs/propulsion_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | null | null | null | GUI/dialogs/propulsion_dialogs/propulsion_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | null | null | null | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from GUI.tabs.propulsion_tab.propulsion_tab import propulsion_tab
class propulsion_dialog(QDialog):
def __init__(self):
super().__init__()
self.tab = propulsion_tab()
self.layout =self.tab.create_widget()
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
self.layout.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.setLayout(self.layout)
| 30.45 | 65 | 0.689655 | 68 | 609 | 5.970588 | 0.470588 | 0.128079 | 0.078818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004175 | 0.213465 | 609 | 19 | 66 | 32.052632 | 0.843424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da58d75367a4513d4ada4db3e0cf52dc127dc010 | 726 | py | Python | blind_75/06_removeNthFromEnd.py | NursultanBeken/leetcode_practice | 8aa8a033f95110aafa6acd9ebf842d716fd7552b | [
"MIT"
] | 1 | 2020-09-20T03:55:00.000Z | 2020-09-20T03:55:00.000Z | blind_75/06_removeNthFromEnd.py | NursultanBeken/leetcode_practice | 8aa8a033f95110aafa6acd9ebf842d716fd7552b | [
"MIT"
] | null | null | null | blind_75/06_removeNthFromEnd.py | NursultanBeken/leetcode_practice | 8aa8a033f95110aafa6acd9ebf842d716fd7552b | [
"MIT"
] | null | null | null | """
Dummy node, two pointers, swap nodes
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
dummy = ListNode(0, head)
left = dummy
right = head
while n>0 and right:
right = right.next
n -=1
while right:
right = right.next
left = left.next
left.next = left.next.next
return dummy.next | 22 | 43 | 0.479339 | 77 | 726 | 4.467532 | 0.441558 | 0.116279 | 0.087209 | 0.110465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009547 | 0.422865 | 726 | 33 | 44 | 22 | 0.811456 | 0.326446 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5d5dda91394d5fcd0bc5d32616b3e16dc5d436 | 875 | py | Python | ethsential/__main__.py | 1140251/Ethsential | 1de423358f5a0ba8b84d80fa63bce09552bca9fd | [
"Apache-2.0"
] | 7 | 2021-10-11T12:07:08.000Z | 2022-01-10T01:19:36.000Z | ethsential/__main__.py | 1140251/Ethsential | 1de423358f5a0ba8b84d80fa63bce09552bca9fd | [
"Apache-2.0"
] | null | null | null | ethsential/__main__.py | 1140251/Ethsential | 1de423358f5a0ba8b84d80fa63bce09552bca9fd | [
"Apache-2.0"
] | null | null | null | import sys
from .src.applications.server import ETHSENTIAL
from .src.applications.cli import CLI
from .src.parser import create_parser
def main():
parser = create_parser()
args = parser.parse_args()
if args.action == 'cli':
try:
CLI.exec_cmd(args)
except Exception as e:
if hasattr(e, 'message'):
print(getattr(e, 'message', repr(e)))
else:
print(e)
sys.exit(0)
elif args.action == 'install':
try:
CLI.install()
except Exception as e:
if hasattr(e, 'message'):
print(getattr(e, 'message', repr(e)))
else:
print(e)
elif args.action == 'tcp':
ETHSENTIAL.start_tcp(args.host, args.port)
else:
ETHSENTIAL.start_io()
if __name__ == '__main__':
main()
| 24.305556 | 53 | 0.537143 | 101 | 875 | 4.514851 | 0.386139 | 0.070175 | 0.083333 | 0.078947 | 0.307018 | 0.307018 | 0.307018 | 0.307018 | 0.307018 | 0.307018 | 0 | 0.001742 | 0.344 | 875 | 35 | 54 | 25 | 0.792683 | 0 | 0 | 0.433333 | 0 | 0 | 0.056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.133333 | 0 | 0.166667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5ea178b4528bc2e8ee17e0a8132d23a6388e83 | 2,322 | py | Python | scripts/msig_prods_update_tag.py | xenbo/eosforce | f77a73c2b49f40f8af5c11a13b0a7eb069e02b5f | [
"MIT"
] | 117 | 2018-06-22T08:49:36.000Z | 2022-01-30T17:08:29.000Z | scripts/msig_prods_update_tag.py | xenbo/eosforce | f77a73c2b49f40f8af5c11a13b0a7eb069e02b5f | [
"MIT"
] | 17 | 2018-07-05T04:06:47.000Z | 2020-09-07T06:19:25.000Z | scripts/msig_prods_update_tag.py | xenbo/eosforce | f77a73c2b49f40f8af5c11a13b0a7eb069e02b5f | [
"MIT"
] | 42 | 2018-06-22T08:57:42.000Z | 2022-03-28T13:08:02.000Z | #!/usr/bin/env python3
import argparse
import json
import os
import re
import subprocess
import sys
import time
enable_push = True # True to push on chain
cleos = '../build/programs/cleos/cleos --wallet-url http://127.0.0.1:6666 --url http://127.0.0.1:8001 '
wallet_password = ''
wallet_name = 'testc'
active_account = 'testc'
funcs_to_open = [
( 'f.cprod', 10000000 ),
( 'f.votagen', 10000010 )
]
tx_expire_hours = 120 # 5days
def jsonArg(a):
return " '" + json.dumps(a) + "' "
def run(args):
print('', args)
if subprocess.call(args, shell=True):
print(' exiting because of error')
sys.exit(1)
def runone(args):
print('', args)
subprocess.call(args, shell=True)
def getOutput(args):
print('', args)
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
return proc.communicate()[0].decode('utf-8')
def getJsonOutput(args):
return json.loads(getOutput(args))
def getbps():
bpsa = []
bpsj = getJsonOutput(cleos + " get schedule -j ")
for bp in bpsj["active"]["producers"]:
bpsa.append(bp["producer_name"])
return bpsa
def msigProposeUpdateTag(proposer, bps, func_name, open_block_num, expirehours):
requestedPermissions = []
for i in range(0, len(bps)):
requestedPermissions.append({'actor': bps[i], 'permission': 'active'})
trxPermissions = [{'actor': 'eosio', 'permission': 'active'}]
action_name = 'setconfig'
data = {
'typ': func_name,
'num': open_block_num,
'key': '',
'fee': '0.0000 EOS'
}
run(cleos + 'multisig propose '
+ func_name + jsonArg(requestedPermissions) + jsonArg(trxPermissions)
+ 'eosio ' + action_name + jsonArg(data) + ' '
+ proposer + ' ' + str(expirehours) + ' -p ' + proposer)
# ---------------------------------------------------------------------------------------------------
# msig to update system contract
# unlock wallet
unlockwallet_str = 'cleos wallet unlock -n ' + wallet_name + ' --password ' + wallet_password
runone(unlockwallet_str)
# get schedule active bps
active_bps = getbps()
for ( func_name, func_block_num ) in funcs_to_open:
msigProposeUpdateTag(active_account, active_bps, func_name, func_block_num, tx_expire_hours)
time.sleep(3)
| 26.089888 | 103 | 0.615418 | 275 | 2,322 | 5.069091 | 0.443636 | 0.028694 | 0.027977 | 0.015782 | 0.086083 | 0.018651 | 0 | 0 | 0 | 0 | 0 | 0.027657 | 0.205857 | 2,322 | 88 | 104 | 26.386364 | 0.728308 | 0.093885 | 0 | 0.04918 | 0 | 0.016393 | 0.162136 | 0.013829 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0.032787 | 0.114754 | 0.032787 | 0.295082 | 0.065574 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5fc436cce22928bf1e7b8ba50df3169ca33055 | 7,027 | py | Python | maskrcnn/preprocess/download_googlestaticmap.py | JBoshoff/Replicate-night-light | 5bdfbb99fe38f98f61f733f4e847be2bb6f559ef | [
"MIT"
] | 8 | 2020-08-26T21:05:32.000Z | 2021-08-18T06:55:24.000Z | maskrcnn/preprocess/download_googlestaticmap.py | JBoshoff/Replicate-night-light | 5bdfbb99fe38f98f61f733f4e847be2bb6f559ef | [
"MIT"
] | null | null | null | maskrcnn/preprocess/download_googlestaticmap.py | JBoshoff/Replicate-night-light | 5bdfbb99fe38f98f61f733f4e847be2bb6f559ef | [
"MIT"
] | 2 | 2021-10-20T12:43:00.000Z | 2022-01-04T19:40:16.000Z | """This downloader downloads satellite images from the Google Static Maps API.
Usage:
$ python download_googlestaticmap.py \
> --log LOG_FILE.csv \
> --initialize INIT_FILE.csv
$ nohup python download_googlestaticmap.py \
> --log LOG_FILE.csv \
> --num 3 \
> --download-dir DIR \
> > logs/download_googlestaticmap.log &
"""
import os
import pandas as pd
import requests
from argparse import ArgumentParser
from tqdm import tqdm
class Downloader(object):
"""This class keeps a log of the downloading process,
checks for duplicates and manages bad HTTP requests.
Args:
queue (pandas.DataFrame): Log of the downloaded objects.
"""
def __init__(self, queue=None):
# if downloading for the first time
if queue is None:
# create an empty queue
self.queue = pd.DataFrame(columns=['index', 'url', 'status'])
self.queue.set_index('index', inplace=True)
self.queue.index.name = 'index'
# if not, load previous log
else:
self.queue = queue
def request(self, indices, mapping):
"""This method requests objects to be downloaded and adds them to the queue.
Args:
indices (numpy.array): unique id for each object in the queue.
mapping (callable): takes in the indices and generates the urls.
"""
urls = [mapping(index) for index in indices]
subqueue = pd.DataFrame(
{'url': urls,
'status': False},
index=indices)
subqueue.index.name = 'index'
try:
self.queue = pd.concat([self.queue, subqueue],
verify_integrity=True)
print('{} new requests initiated.'.format(subqueue.shape[0]))
except ValueError:
raise Exception('Overlapping new requests with existing requests.')
def download(self, num, download_dir,
test_page='https://www.google.com',
suffix='.png', min_size=20000):
"""This method downloads objects.
Args:
num (int): number of downloads to perform.
download_dir (str): downloading directory.
test_page (str): url to try in order to check internet connection.
suffix (str): suffix for saved files.
min_size (int): minimum file size. Helps drop NA images.
"""
# check local directory
if not os.path.isdir(download_dir):
raise Exception('Download directory does not exist.')
# check internet connection
_ = requests.get(test_page, timeout=1)
# extract items already downloaded
mask = self.queue['status']
if not mask.all():
# number of files to be downloaded
update_num = min((~mask).sum(), num)
print('Preparing to download {} files.'.format(update_num))
idxs = self.queue[~mask].index.copy()
idxs = idxs[0:update_num]
# downloading starts
for idx in tqdm(idxs):
# fetch url
url = self.queue.loc[idx, 'url']
# construct file names
file_name = os.path.join(download_dir, ''.join([idx, suffix]))
# check if file exists already
if os.path.isfile(file_name):
# update status
self.queue.loc[idx, 'status'] = True
print('{} already exists.'.format(file_name))
else:
r = requests.get(url)
if int(r.headers['Content-Length']) > min_size:
with open(file_name, 'wb') as f:
_ = f.write(r.content)
# update status
self.queue.loc[idx, 'status'] = True
print('{} successfully downloaded.'.format(file_name))
else:
print('{} skipped - file too small: {} bytes.'.format(
file_name, int(r.headers['Content-Length'])))
print(url)
self.queue.drop(idx, inplace=True)
if mask.all():
print('Downloading completed.')
def make_url(idx, df, GOOGLE_API_KEY):
"""Helper function to generate the urls for the Google Static Maps API.
Args:
index (str): Identifies an image.
df (pandas.DataFrame): Stores image info.
GOOGLE_API_KEY (str)
Returns:
url (str): The URL to the image.
"""
params = {
'center': ('{:.6f},{:.6f}'
.format(df.loc[idx, 'lat'], df.loc[idx, 'lon'])),
'zoom': '19',
'size': '640x640',
'scale': '2',
'maptype': 'satellite',
'key': GOOGLE_API_KEY}
params_str = '&'.join(['{}={}'.format(k, v) for k, v in params.items()])
return '?'.join(['https://maps.googleapis.com/maps/api/staticmap',
params_str])
def run(args):
"""Runs the script.
Args:
args (argparse.Namespace): Command line arguments.
"""
assert args.log is not None, 'Input log file path!'
# parse and make url list
if args.initialize is not None:
downloader = Downloader()
# fetch authentication key
with open(args.api_key, 'r') as f:
GOOGLE_API_KEY = f.read()
# read coordinates and index
df = pd.read_csv(args.initialize, index_col='index')
df = df.filter(items=['lon', 'lat'])
downloader.request(indices=df.index.values,
mapping=lambda x: make_url(x, df, GOOGLE_API_KEY))
else:
queue = pd.read_csv(args.log, index_col='index')
downloader = Downloader(queue=queue)
# download
if args.num is not None:
assert args.download_dir is not None, 'Input download directory!'
downloader.download(num=args.num, download_dir=args.download_dir)
# save the log
downloader.queue.to_csv(args.log)
if __name__ == '__main__':
# parse arguments passed from the command line
parser = ArgumentParser(
description='Downloads satellite images from Google Statics Maps API.')
parser.add_argument('--log', default=None, type=str,
help='name of log file (.csv)')
# request
parser.add_argument('--initialize', default=None, type=str,
help='a new list of files to be downloaded')
parser.add_argument(
'--api-key', default='GOOGLE_API_KEY.txt',
help='file that stores the API key, defaults to GOOGLE_API_KEY.txt')
# download
parser.add_argument(
'--num', default=None, type=int,
help='number of downloads to perform, this flag turns on downloading')
parser.add_argument('--download-dir', default=None, type=str,
help='downloading directory')
# parse
args = parser.parse_args()
run(args)
| 36.035897 | 84 | 0.565675 | 819 | 7,027 | 4.763126 | 0.295482 | 0.029992 | 0.021533 | 0.011536 | 0.10869 | 0.044091 | 0.044091 | 0.044091 | 0.021533 | 0 | 0 | 0.004203 | 0.322897 | 7,027 | 194 | 85 | 36.221649 | 0.815679 | 0.259713 | 0 | 0.076923 | 0 | 0 | 0.174419 | 0 | 0 | 0 | 0 | 0 | 0.019231 | 1 | 0.048077 | false | 0 | 0.048077 | 0 | 0.115385 | 0.067308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da60447f22ba4eba74abcb47b3cadec2e06136d2 | 9,826 | py | Python | NeuroMechFly/experiments/kinematic_replay/kinematic_replay_no_support.py | NeLy-EPFL/NeuroMechFly | 69f9e2d86caac561a50e3e060d007dd50a20d481 | [
"Apache-2.0"
] | 12 | 2021-05-07T15:27:11.000Z | 2022-01-29T04:26:36.000Z | NeuroMechFly/experiments/kinematic_replay/kinematic_replay_no_support.py | NeLy-EPFL/NeuroMechFly | 69f9e2d86caac561a50e3e060d007dd50a20d481 | [
"Apache-2.0"
] | 15 | 2021-05-07T14:58:04.000Z | 2021-11-10T21:30:58.000Z | NeuroMechFly/experiments/kinematic_replay/kinematic_replay_no_support.py | NeLy-EPFL/NeuroMechFly | 69f9e2d86caac561a50e3e060d007dd50a20d481 | [
"Apache-2.0"
] | 1 | 2022-01-13T16:08:49.000Z | 2022-01-13T16:08:49.000Z | """ Drosophila simulation class for kinematic replay without body support. """
import numpy as np
import pandas as pd
import pybullet as p
from NeuroMechFly.sdf.units import SimulationUnitScaling
from NeuroMechFly.simulation.bullet_simulation import BulletSimulation
# Random number seed
np.random.seed(seed=321)
def add_perturbation(
size, initial_position, target_position, time, units
):
""" Shoot a ball to perturb the target system at a specified
velocity
Parameters
----------
size: <float>
Radius of the ball
initial_position: <array>
3D position of the ball
target_position: <array>
3D position of the target
time: <float>
Time before reaching the target position
Returns
-------
ball : <int>
Pybullet ID for the ball
"""
# Init
initial_position = np.asarray(initial_position) * units.meters
target_position = np.asarray(target_position) * units.meters
# Load ball
ball = p.loadURDF(
"../data/design/sdf/sphere_1cm.urdf", initial_position,
globalScaling=size * units.meters,
useMaximalCoordinates=True
)
# Change dynamics to remove damping and friction
p.changeDynamics(
ball, -1, linearDamping=0, angularDamping=0,
rollingFriction=0, spinningFriction=0
)
p.changeVisualShape(ball, -1, rgbaColor=[0.8, 0.8, 0.8, 1])
# Compute initial velocity
velocity = (
target_position - initial_position -
0.5 * np.asarray([0, 0, -9.81 * units.gravity]) * time**2
) / time
# Reset base velocity
p.resetBaseVelocity(ball, velocity)
return ball
class DrosophilaSimulation(BulletSimulation):
""" Drosophila Simulation Class for kinematic replay.
Parameters
----------
container: <Container>
Instance of the Container class.
sim_options: <dict>
Dictionary containing the simulation options.
kp: <float>
Proportional gain of the position controller.
kv: <float>
Derivative gain of the position controller.
position_path: <str>
Path of the joint position .pkl file.
velocity_path: <str>
Path of the joint velocity .pkl file.
add_perturbation: <bool>
Activate/deactivate the ball perturbation.
units: <obj>
Instance of SimulationUnitScaling object to scale up the units during calculations.
"""
def __init__(
self, container, sim_options, kp, kv,
angles_path, velocity_path,
add_perturbation,
starting_time=0.0,
fixed_positions=None,
units=SimulationUnitScaling(meters=1000, kilograms=1000)
):
super().__init__(container, units, **sim_options)
self.last_draw = []
self.kp = kp
self.kv = kv
self.pose = [0] * self.num_joints
self.vel = [0] * self.num_joints
self.angles = self.load_data(angles_path, starting_time)
self.velocities = self.load_data(velocity_path, starting_time)
self.impulse_sign = 1
self.add_perturbation = add_perturbation
self.fixed_positions = fixed_positions
self.pball = None
self.fixed_positions = fixed_positions
def load_data(self, data_path, starting_time):
""" Function that loads the pickle format joint angle or velocity gile.
Parameters
----------
data_path : <str>
Path of the .pkl file.
starting_time : <float>
Experiment's time from which the simulation will start.
Returns
-------
dict
Returns the joint angles in a dictionary.
"""
names_equivalence = {
'ThC_pitch': 'Coxa',
'ThC_yaw': 'Coxa_yaw',
'ThC_roll': 'Coxa_roll',
'CTr_pitch': 'Femur',
'CTr_roll': 'Femur_roll',
'FTi_pitch': 'Tibia',
'TiTa_pitch': 'Tarsus1'
}
converted_dict = {}
try:
data = pd.read_pickle(data_path)
start = int(np.round(starting_time / self.time_step))
for leg, joints in data.items():
for joint_name, val in joints.items():
new_name = 'joint_' + leg[:2] + \
names_equivalence[joint_name]
converted_dict[new_name] = val[start:]
return converted_dict
except BaseException:
FileNotFoundError(f"File {data_path} not found!")
def controller_to_actuator(self, t):
"""
Code that glues the controller the actuator in the system.
If there are muscles then contoller actuates the muscles.
If not then the controller directly actuates the joints.
Parameters
----------
t : int
Time running in the physics engine.
"""
# Throw mini balls at the fly during kinematic replay
if self.add_perturbation:
if ((t + 1) % (0.5 / self.time_step)) == 0:
print("Adding perturbation")
self.pball = add_perturbation(
size=5e-2,
initial_position=np.asarray(
[0, self.impulse_sign * 2e-3, 0.0]) + self.base_position,
target_position=self.base_position,
time=20e-3, units=self.units
)
self.impulse_sign *= -1
if ((t + 1) % (3.0 / self.time_step)
) == 0 and t < (3.012 / self.time_step):
radius = 20e-2
self.pball = add_perturbation(
size=radius,
initial_position=np.asarray(
[radius * 0.05, radius * 0.05, 1e-3]) + self.base_position,
target_position=[self.base_position[0], self.base_position[1], 0.0],
time=20e-3, units=self.units
)
p.changeDynamics(self.pball, -1, 0.3)
# Setting the joint angular positions joints
# Setting the joint angular positions of the fixed joints
if not self.fixed_positions:
self.fixed_positions = {
'joint_LAntenna': 35,
'joint_RAntenna': -35,
}
for joint_name, joint_pos in self.fixed_positions.items():
self.pose[self.joint_id[joint_name]] = np.deg2rad(joint_pos)
# Setting the joint angular positions of leg DOFs based on pose estimation
for joint_name, joint_pos in self.angles.items():
self.pose[self.joint_id[joint_name]] = joint_pos[t]
# Setting the joint angular velocities of leg DOFs based on pose estimation
for joint_name, joint_vel in self.velocities.items():
self.vel[self.joint_id[joint_name]] = joint_vel[t]
# Control the joints through position controller
# Velocity can be discarded if not available and gains can be changed
for joint in range(self.num_joints):
p.setJointMotorControl2(
self.animal, joint,
controlMode=p.POSITION_CONTROL,
targetPosition=self.pose[joint],
targetVelocity=self.vel[joint],
positionGain=self.kp,
velocityGain=self.kv,
maxVelocity=1e8
)
p.changeDynamics(self.animal, joint, maxJointVelocity=1e8)
# Change the color of the colliding body segments
if self.draw_collisions:
draw = []
if self.behavior == 'walking':
links_contact = self.get_current_contacts()
link_names = list(self.link_id.keys())
link_ids = list(self.link_id.values())
for i in links_contact:
link1 = link_names[link_ids.index(i)]
if link1 not in draw:
draw.append(link1)
self.change_color(link1, self.color_collision)
for link in self.last_draw:
if link not in draw:
self.change_color(link, self.color_legs)
elif self.behavior == 'grooming':
# Don't consider the ground sensors
collision_forces = self.contact_normal_force[len(
self.ground_contacts):, :]
links_contact = np.where(
np.linalg.norm(collision_forces, axis=1) > 0
)[0]
for i in links_contact:
link1 = self.self_collisions[i][0]
link2 = self.self_collisions[i][1]
if link1 not in draw:
draw.append(link1)
self.change_color(link1, self.color_collision)
if link2 not in draw:
draw.append(link2)
self.change_color(link2, self.color_collision)
for link in self.last_draw:
if link not in draw:
if 'Antenna' in link:
self.change_color(link, self.color_body)
else:
self.change_color(link, self.color_legs)
self.last_draw = draw
def change_color(self, identity, color):
""" Change color of a given body segment. """
p.changeVisualShape(
self.animal,
self.link_id[identity],
rgbaColor=color)
def feedback_to_controller(self):
"""
Code that glues the sensors/feedback to controller in the system.
"""
def update_parameters(self, params):
""" Update parameters. """
def optimization_check(self):
""" Optimization check. """
| 36.258303 | 91 | 0.569408 | 1,096 | 9,826 | 4.958029 | 0.265511 | 0.010121 | 0.016562 | 0.016194 | 0.230217 | 0.179426 | 0.106551 | 0.088333 | 0.059257 | 0.059257 | 0 | 0.017509 | 0.343171 | 9,826 | 270 | 92 | 36.392593 | 0.82445 | 0.239161 | 0 | 0.15 | 0 | 0 | 0.0344 | 0.004793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.03125 | 0 | 0.1 | 0.00625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da6071c120cc4c6108f42d5833b8ae67a673f55d | 3,845 | py | Python | hw/ip/otbn/dv/otbnsim/sim/isa.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/sim/isa.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/sim/isa.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from enum import IntEnum
import sys
from typing import Dict
from riscvmodel.types import Immediate # type: ignore
from shared.insn_yaml import Insn, load_insns_yaml
from .model import OTBNModel
# Load the insns.yml file at module load time: we'll use its data while
# declaring the classes. The point is that an OTBNInsn below is an instance of
# a particular Insn object from shared.insn_yaml, so we want a class variable
# on the OTBNInsn that points at the corresponding Insn.
try:
_INSNS_FILE = load_insns_yaml()
except RuntimeError as err:
sys.stderr.write('{}\n'.format(err))
sys.exit(1)
class DummyInsn(Insn):
'''A dummy instruction that will never be decoded. Used for the insn class
variable in the OTBNInsn base class.
'''
def __init__(self) -> None:
fake_yml = {
'mnemonic': 'dummy-insn',
'operands': []
}
super().__init__(fake_yml, None)
def insn_for_mnemonic(mnemonic: str, num_operands: int) -> Insn:
'''Look up the named instruction in the loaded YAML data.
As a sanity check, make sure it has the expected number of operands. If we
fail to find the right instruction, print a message to stderr and exit
(rather than raising a RuntimeError: this happens on module load time, so
it's a lot clearer to the user what's going on this way).
'''
insn = _INSNS_FILE.mnemonic_to_insn.get(mnemonic)
if insn is None:
sys.stderr.write('Failed to find an instruction for mnemonic {!r} in '
'insns.yml.\n'
.format(mnemonic))
sys.exit(1)
if len(insn.operands) != num_operands:
sys.stderr.write('The instruction for mnemonic {!r} in insns.yml has '
'{} operands, but we expected {}.\n'
.format(mnemonic, len(insn.operands), num_operands))
sys.exit(1)
return insn
class OTBNInsn:
'''A decoded OTBN instruction.
'''
# A class variable that holds the Insn subclass corresponding to this
# instruction.
insn = DummyInsn() # type: Insn
def __init__(self, op_vals: Dict[str, int]):
self.op_vals = op_vals
def execute(self, model: OTBNModel) -> None:
raise NotImplementedError('OTBNInsn.execute')
def disassemble(self, pc: int) -> str:
'''Generate an assembly listing for this instruction'''
return self.insn.disassemble(self.op_vals, 12)
class RV32RegReg(OTBNInsn):
'''A general class for register-register insns from the RV32I ISA'''
def __init__(self, op_vals: Dict[str, int]):
super().__init__(op_vals)
self.grd = op_vals['grd']
self.grs1 = op_vals['grs1']
self.grs2 = op_vals['grs2']
class RV32RegImm(OTBNInsn):
'''A general class for register-immediate insns from the RV32I ISA'''
def __init__(self, op_vals: Dict[str, int]):
super().__init__(op_vals)
self.grd = op_vals['grd']
self.grs1 = op_vals['grs1']
self.imm = op_vals['imm']
class RV32ImmShift(OTBNInsn):
'''A general class for immediate shift insns from the RV32I ISA'''
def __init__(self, op_vals: Dict[str, int]):
super().__init__(op_vals)
self.grd = op_vals['grd']
self.grs1 = op_vals['grs1']
self.shamt = op_vals['shamt']
class ShiftType(IntEnum):
LSL = 0 # logical shift left
LSR = 1 # logical shift right
def ShiftReg(reg: int, shift_type: int, shift_bytes: Immediate) -> int:
assert 0 <= int(shift_bytes)
shift_bits = int(shift_bytes << 3)
return (reg << shift_bits
if shift_type == ShiftType.LSL
else reg >> shift_bits)
| 31.008065 | 78 | 0.649935 | 537 | 3,845 | 4.500931 | 0.325885 | 0.047166 | 0.024824 | 0.021514 | 0.223004 | 0.213074 | 0.162598 | 0.135292 | 0.124121 | 0.124121 | 0 | 0.011431 | 0.249155 | 3,845 | 123 | 79 | 31.260163 | 0.825771 | 0.33238 | 0 | 0.242424 | 0 | 0 | 0.091275 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da63d798dfe9c2ea59c6459800d52786ae4db56c | 2,668 | py | Python | tests/features/steps/environment_steps.py | candango/pyclicksign | d709122867cfa5c6fce4322b55a033bc82126e1c | [
"Apache-2.0"
] | null | null | null | tests/features/steps/environment_steps.py | candango/pyclicksign | d709122867cfa5c6fce4322b55a033bc82126e1c | [
"Apache-2.0"
] | 9 | 2022-01-15T19:43:46.000Z | 2022-03-24T06:04:25.000Z | tests/features/steps/environment_steps.py | candango/pyclicksign | d709122867cfa5c6fce4322b55a033bc82126e1c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021-2022 Flávio Gonçalves Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from behave import given, when, then, step
from cartola import fs
from tornado.escape import json_encode, json_decode
import os
def get_absolute_path(directory):
return os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "..", directory)
)
def create_file(path, content, binary=False):
real_path = get_absolute_path(path)
fs.write(real_path, content, binary)
os.chmod(real_path, 0o600)
return real_path
@then("Podemos converter {index} de dict para texto")
def step_arquivo_criado_com_sucesso(context, index):
data = getattr(context, index)
setattr(context, index, json_encode(data))
@then("Podemos converter {index} de texto para dict")
def step_arquivo_criado_com_sucesso(context, index):
data = getattr(context, index)
setattr(context, index, json_decode(data))
@then("Arquivo de {index} é criado com sucesso em {path}")
def step_arquivo_criado_com_sucesso(context, index, path):
data = getattr(context, index)
if isinstance(data, dict):
data = json_encode(data)
if isinstance(data, str):
data = data.encode()
real_path = create_file(path, data, True)
context.tester.assertTrue(os.path.exists(real_path))
context.tester.assertTrue(os.path.isfile(real_path))
@given("Arquivo de {index} existe em {path}")
def step_arquivo_existe(context, index, path):
real_path = get_absolute_path(path)
context.tester.assertTrue(os.path.exists(real_path))
context.tester.assertTrue(os.path.isfile(real_path))
setattr(context, index, real_path)
print(getattr(context, index))
@given("Ler dados de {index} sucedeu")
def step_arquivo_existe(context, index):
real_path = getattr(context, index)
setattr(context, index, fs.read(real_path))
@then("File at {path} removed")
def step_file_at_removed(context, path):
real_path = get_absolute_path(path)
context.tester.assertTrue(os.path.exists(real_path))
context.tester.assertTrue(os.path.isfile(real_path))
os.remove(real_path)
context.tester.assertFalse(os.path.exists(real_path))
| 33.35 | 74 | 0.737631 | 385 | 2,668 | 4.966234 | 0.34026 | 0.075314 | 0.072176 | 0.078452 | 0.393828 | 0.348849 | 0.281381 | 0.281381 | 0.259414 | 0.259414 | 0 | 0.007092 | 0.154423 | 2,668 | 79 | 75 | 33.772152 | 0.840426 | 0.211394 | 0 | 0.285714 | 0 | 0 | 0.108134 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.163265 | false | 0 | 0.081633 | 0.020408 | 0.285714 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da648cdce4097a5f15c459cc9d3dc08716cd7f4a | 2,967 | py | Python | photo_album_src/models_bk.py | chrisjen83/k3s-labs | b283c2500b272be0de1025ef541a46d7c4591cc1 | [
"MIT"
] | 1 | 2020-04-01T22:05:28.000Z | 2020-04-01T22:05:28.000Z | photo_album_src/models_bk.py | chrisjen83/k3s-labs | b283c2500b272be0de1025ef541a46d7c4591cc1 | [
"MIT"
] | null | null | null | photo_album_src/models_bk.py | chrisjen83/k3s-labs | b283c2500b272be0de1025ef541a46d7c4591cc1 | [
"MIT"
] | 5 | 2020-02-21T22:47:35.000Z | 2022-02-03T15:21:39.000Z | #!/usr/bin/env python3
# Import modules required for app
import os
import boto3
import json
from pymongo import MongoClient
from werkzeug import secure_filename
from PIL import Image
from config import ecs_test_drive
#Get from K8s ConfigMap values for MongoDB Database
MONGO_SERVER = os.environ.get( "MONGO_SERVER", None)
DB_NAME = os.environ.get( "DB_NAME", None)
client = MongoClient( MONGO_SERVER, 27017)
# Get database connection with database name
db = client[DB_NAME]
# Remove any existing documents in photos collection
# db.photos.delete_many({}) # Comment this line if you don't want to remove documents each time you start the app
# Retrieve all photos records from database
def get_photos():
return db.photos.find({})
# Insert form fields into database
def insert_photo(request):
title = request.form['title']
comments = request.form['comments']
filename = secure_filename(request.files['photo'].filename)
thumbfile = filename.rsplit(".", 1)[0] + "-thumb.jpg"
photo_url = "http://" + ecs_test_drive['ecs_access_key_id'].split(
'@')[0] + ".public.ecstestdrive.com/" + ecs_test_drive['ecs_bucket_name'] + "/" + filename
thumbnail_url = "http://" + ecs_test_drive['ecs_access_key_id'].split(
'@')[0] + ".public.ecstestdrive.com/" + ecs_test_drive['ecs_bucket_name'] + "/" + thumbfile
db.photos.insert_one({'title': title, 'comments': comments,
'photo': photo_url, 'thumb': thumbnail_url})
def upload_photo(file):
# Get ECS credentials from external config file
ecs_endpoint_url = ecs_test_drive['ecs_endpoint_url']
ecs_access_key_id = ecs_test_drive['ecs_access_key_id']
ecs_secret_key = ecs_test_drive['ecs_secret_key']
ecs_bucket_name = ecs_test_drive['ecs_bucket_name']
# Open a session with ECS using the S3 API
session = boto3.resource(service_name='s3', aws_access_key_id=ecs_access_key_id,
aws_secret_access_key=ecs_secret_key, endpoint_url=ecs_endpoint_url)
# Remove unsupported characters from filename
filename = secure_filename(file.filename)
# First save the file locally
file.save(os.path.join("uploads", filename))
# Create a thumbnail
size = 225, 225
with open("uploads/" + filename, 'rb') as f:
img = Image.open(f)
img.thumbnail(size)
thumbfile = filename.rsplit(".", 1)[0] + "-thumb.jpg"
img.save("uploads/" + thumbfile, "JPEG")
img.close()
# Empty the variables to prevent memory leaks
img = None
# Upload the original image to ECS
session.Object(ecs_bucket_name, filename).put(
Body=open("uploads/" + filename, 'rb'), ACL='public-read')
# Upload the thumbnail to ECS
session.Object(ecs_bucket_name, thumbfile).put(
Body=open("uploads/" + thumbfile, 'rb'), ACL='public-read')
# Delete the local files
os.remove("uploads/" + filename)
os.remove("uploads/" + thumbfile)
| 34.905882 | 115 | 0.691271 | 405 | 2,967 | 4.859259 | 0.34321 | 0.032012 | 0.054878 | 0.060976 | 0.185976 | 0.177337 | 0.164634 | 0.086382 | 0.086382 | 0.086382 | 0 | 0.009583 | 0.191102 | 2,967 | 84 | 116 | 35.321429 | 0.810417 | 0.232895 | 0 | 0.042553 | 0 | 0 | 0.161647 | 0.022143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.148936 | 0.021277 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da67084ae53c45931e9876b1394dc1aa92e625de | 12,051 | py | Python | pymothoa/llvm_backend/backend.py | sklam/pymothoa | 330bd70666ccf761f39c75f3acb70aa7e0a92ac6 | [
"BSD-2-Clause"
] | 2 | 2017-03-23T19:44:03.000Z | 2020-11-28T17:01:49.000Z | pymothoa/llvm_backend/backend.py | sklam/pymothoa | 330bd70666ccf761f39c75f3acb70aa7e0a92ac6 | [
"BSD-2-Clause"
] | null | null | null | pymothoa/llvm_backend/backend.py | sklam/pymothoa | 330bd70666ccf761f39c75f3acb70aa7e0a92ac6 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012, Siu Kwan Lam
# All rights reserved.
import logging
import ast
from contextlib import contextmanager
from pymothoa.util.descriptor import Descriptor, instanceof
from pymothoa import dialect
from pymothoa.compiler_errors import *
from pymothoa.backend import CodeGenerationBase
from types import *
from values import *
import llvm # binding
logger = logging.getLogger(__name__)
class LLVMCodeGenerator(CodeGenerationBase):
retty = Descriptor(constant=True)
argtys = Descriptor(constant=True)
function = Descriptor(constant=True)
entry_block = Descriptor(constant=True)
def __init__(self, fnobj, retty, argtys, symbols):
super(LLVMCodeGenerator, self).__init__(symbols)
self.function = fnobj
self.retty = retty
self.argtys = argtys
@contextmanager
def generate_function(self, name):
if not self.function.valid():
raise FunctionDeclarationError(
self.current_node,
self.jit_engine.last_error()
)
self.symbols[name] = self.function
# make basic block
self.entry_block = self.function.append_basic_block("entry")
self.__blockcounter = 0
# make instruction builder
self.builder = llvm.Builder()
bb_body = self.function.append_basic_block("body")
self.builder.insert_at(bb_body)
yield # wait until args & body are generated
# link entry to body
bb_last = self.builder.get_basic_block() # remember last block
self.builder.insert_at(self.entry_block) # goto entry block
self.builder.branch(bb_body) # branch to body
self.builder.insert_at(bb_last) # return to last block
# close function
if not self.builder.is_block_closed():
if isinstance(self.retty, types.Void):
# no return
self.builder.ret_void()
else:
raise MissingReturnError(self.current_node)
def generate_function_arguments(self, arguments):
with self.relocate_to_entry():
fn_args = self.function.arguments()
for i, name in enumerate(arguments):
try:
var = LLVMVariable(name, self.argtys[i], self.builder)
except IndexError:
raise FunctionDeclarationError(
self.current_node,
'Actual number of argument mismatch declaration.')
self.builder.store(fn_args[i], var.pointer)
self.symbols[name] = var
def generate_call(self, fn, args):
from function import LLVMFunction
if isinstance(fn, LLVMFunction): # another function
retty = fn.retty
argtys = fn.argtys
fn = fn.code_llvm
elif fn is self.function: # recursion
retty = self.retty
argtys = self.argtys
else:
raise InvalidCall(self.current_node)
return self._call_function(fn, args, retty, argtys)
def generate_assign(self, from_value, to_target):
casted = to_target.type.cast(from_value, self.builder)
self.builder.store(casted, to_target.pointer)
return casted
def generate_compare(self, op_class, lhs, rhs):
ty = lhs.type.coerce(rhs.type)
lval = ty.cast(lhs, self.builder)
rval = ty.cast(rhs, self.builder)
fn = getattr(ty, 'op_%s'%op_class.__name__.lower())
pred = fn(lval, rval, self.builder)
return LLVMTempValue(pred, LLVMType(types.Bool))
def generate_return(self, value=None):
if value is None: # no return value
self.builder.ret_void()
return
if isinstance(self.retty, LLVMVoid):
raise InvalidReturnError(
self.current_node,
'This function does not return any value.'
)
casted = self.retty.cast(value, self.builder)
self.builder.ret(casted)
def generate_binop(self, op_class, lhs, rhs):
ty = lhs.type.coerce(rhs.type)
lval = ty.cast(lhs, self.builder)
rval = ty.cast(rhs, self.builder)
try:
fn = getattr(ty, 'op_%s'%op_class.__name__.lower())
except AttributeError as e:
raise OperatorError(self.current_node, 'Debug detail: %s'%str(e))
else:
return LLVMTempValue(fn(lval, rval, self.builder), ty)
def generate_constant_int(self, value):
return LLVMConstant(LLVMType(types.Int), value)
def generate_constant_real(self, value):
return LLVMConstant(LLVMType(types.Double), value)
def generate_declare(self, name, ty):
with self.relocate_to_entry():
if issubclass(ty, types.GenericBoundedArray): # array
return LLVMArrayVariable(name, LLVMType(ty), ty.elemcount.value(self.builder), self.builder)
else: # other types
realty = LLVMType(ty)
return LLVMVariable(name, realty, self.builder)
def _call_function(self, fn, args, retty, argtys):
arg_values = map(lambda X: LLVMTempValue(X.value(self.builder), X.type), args)
# cast types
try:
for i, argty in enumerate(argtys):
arg_values[i] = argty.cast(arg_values[i], self.builder)
except IndexError:
raise InvalidCall(self.current_node, 'Number of argument mismatch')
out = self.builder.call(fn, arg_values)
return LLVMTempValue(out, retty)
def new_basic_block(self, name='uname'):
self.__blockcounter += 1
return self.function.append_basic_block('%s_%d'%(name, self.__blockcounter))
def generate_vector_load_elem(self, ptr, idx):
elemval = self.builder.extract_element(
ptr.value(self.builder),
idx.value(self.builder),
)
return LLVMTempValue(elemval, ptr.type.elemtype)
def generate_vector_store_elem(self, ptr, idx):
zero = self.generate_constant_int(0)
indices = map(lambda X: X.value(self.builder), [zero, idx])
addr = self.builder.gep2(ptr.pointer, indices)
return LLVMTempPointer(addr, ptr.type.elemtype)
def generate_array_load_elem(self, ptr, idx):
ptr_val = ptr.value(self.builder)
idx_val = idx.value(self.builder)
ptr_offset = self.builder.gep(ptr_val, idx_val)
return LLVMTempValue(self.builder.load(ptr_offset), ptr.type.elemtype)
def generate_array_store_elem(self, ptr, idx):
ptr_val = ptr.value(self.builder)
idx_val = idx.value(self.builder)
ptr_offset = self.builder.gep(ptr_val, idx_val)
return LLVMTempPointer(ptr_offset, ptr.type.elemtype)
def generate_if(self, test, iftrue, orelse):
bb_if = self.new_basic_block('if')
bb_else = self.new_basic_block('else')
bb_endif = self.new_basic_block('endif')
is_endif_reachable = False
boolean = self.ensure_boolean(test)
self.builder.cond_branch(boolean, bb_if, bb_else)
# true branch
self.builder.insert_at(bb_if)
for stmt in iftrue:
self.visit(stmt)
else:
if not self.builder.is_block_closed():
self.builder.branch(bb_endif)
is_endif_reachable=True
# false branch
self.builder.insert_at(bb_else)
for stmt in orelse:
self.visit(stmt)
else:
if not self.builder.is_block_closed():
self.builder.branch(bb_endif)
is_endif_reachable=True
# endif
self.builder.insert_at(bb_endif)
if not is_endif_reachable:
self.builder.unreachable()
def generate_while(self, test, body):
bb_cond = self.new_basic_block('loopcond')
bb_body = self.new_basic_block('loopbody')
bb_exit = self.new_basic_block('loopexit')
self.builder.branch(bb_cond)
# condition
self.builder.insert_at(bb_cond)
cond = self.visit(test)
self.builder.cond_branch(self.ensure_boolean(cond), bb_body, bb_exit)
# body
self.builder.insert_at(bb_body)
for stmt in body:
self.visit(stmt)
else:
self.builder.branch(bb_cond)
# Not sure if it is necessary
# if not self.builder.is_block_closed():
# self.builder.branch(bb_cond)
# end loop
self.builder.insert_at(bb_exit)
def generate_for_range(self, counter_ptr, initcount, endcount, step, loopbody):
self.builder.store(initcount.value(self.builder), counter_ptr.pointer)
bb_cond = self.new_basic_block('loopcond')
bb_body = self.new_basic_block('loopbody')
bb_incr = self.new_basic_block('loopincr')
bb_exit = self.new_basic_block('loopexit')
self.builder.branch(bb_cond)
# condition
self.builder.insert_at(bb_cond)
test = self.builder.icmp(llvm.ICMP_SLT, counter_ptr.value(self.builder), endcount.value(self.builder))
self.builder.cond_branch(test, bb_body, bb_exit)
# body
self.builder.insert_at(bb_body)
for stmt in loopbody:
self.visit(stmt)
else:
self.builder.branch(bb_incr)
# Not sure if it is necessary
# if not self.builder.is_block_closed():
# self.builder.branch(bb_incr)
# incr
self.builder.insert_at(bb_incr)
# counter_next = self.builder.add(counter_ptr.value(self.builder),
# step.value(self.builder))
counter_next = counter_ptr.type.op_add(counter_ptr.value(self.builder),
step.value(self.builder),
self.builder)
self.builder.store(counter_next, counter_ptr.pointer)
self.builder.branch(bb_cond)
# exit
self.builder.insert_at(bb_exit)
def generate_boolop(self, op_class, lhs, rhs):
bb_left = self.builder.get_basic_block()
boolty = LLVMType(types.Bool)
left = boolty.cast(self.visit(lhs), self.builder)
bb_right = self.new_basic_block('bool_right')
bb_result = self.new_basic_block('bool_result')
if isinstance(op_class, ast.And):
self.builder.cond_branch(left, bb_right, bb_result)
elif isinstance(op_class, ast.Or):
self.builder.cond_branch(left, bb_result, bb_right)
else:
raise AssertionError('Unknown Boolean operator')
self.builder.insert_at(bb_right)
right = boolty.cast(self.visit(rhs), self.builder)
self.builder.branch(bb_result)
self.builder.insert_at(bb_result)
pred = self.builder.phi(boolty.type(), [bb_left, bb_right], [left, right]);
return LLVMTempValue(pred, boolty)
def generate_not(self, operand):
boolty = LLVMType(types.Bool)
boolval = boolty.cast(operand, self.builder)
negated = boolty.op_not(boolval, self.builder)
return LLVMTempValue(negated, boolty)
def generate_array_slice(self, ptr, lower, upper=None, step=None):
assert upper is None
assert step is None
ptr_val = ptr.value(self.builder)
lower_val = lower.value(self.builder)
offsetted = self.builder.gep(ptr_val, lower_val)
return LLVMTempValue(offsetted, ptr.type)
@contextmanager
def relocate_to_entry(self):
# goto entry block
bb_last = self.builder.get_basic_block()
self.builder.insert_at(self.entry_block)
yield # relocated
# pickup at last block
self.builder.insert_at(bb_last)
def ensure_boolean(self, value):
return LLVMType(types.Bool).cast(value, self.builder)
| 35.759644 | 110 | 0.617127 | 1,450 | 12,051 | 4.938621 | 0.170345 | 0.147465 | 0.049155 | 0.045105 | 0.404552 | 0.286133 | 0.232789 | 0.207234 | 0.176512 | 0.168133 | 0 | 0.000933 | 0.288275 | 12,051 | 336 | 111 | 35.866071 | 0.833975 | 0.069538 | 0 | 0.322314 | 0 | 0 | 0.024257 | 0 | 0 | 0 | 0 | 0 | 0.012397 | 1 | 0.103306 | false | 0 | 0.045455 | 0.012397 | 0.247934 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da687c22550da7202f3e33817124a03999dca63a | 542 | py | Python | cloud/single_stage_detector/pytorch/onnx_demo.py | mgoin/inference | ede5477a2aee72ceb435e9ecd599ffa052417c2a | [
"Apache-2.0"
] | 4 | 2019-07-26T03:00:39.000Z | 2021-01-29T16:12:21.000Z | cloud/single_stage_detector/pytorch/onnx_demo.py | mgoin/inference | ede5477a2aee72ceb435e9ecd599ffa052417c2a | [
"Apache-2.0"
] | null | null | null | cloud/single_stage_detector/pytorch/onnx_demo.py | mgoin/inference | ede5477a2aee72ceb435e9ecd599ffa052417c2a | [
"Apache-2.0"
] | 2 | 2019-11-12T15:57:29.000Z | 2022-03-02T21:26:58.000Z | import onnxruntime
import onnx
import os
from onnx import numpy_helper
onnx_model_dir = 'test_ssd_model'
onnx_data_dir = 'test_data_set_0'
sess = onnxruntime.InferenceSession(os.path.join(onnx_model_dir, 'model.onnx'))
img_tensor = onnx.TensorProto()
with open(os.path.join(onnx_model_dir, onnx_data_dir, 'input_0.pb'), 'rb') as f:
img_tensor.ParseFromString(f.read())
test_img_data = numpy_helper.to_array(img_tensor)
out_onnx = sess.run(None, { sess.get_inputs()[0].name: test_img_data })
loc, label, prob = out_onnx
print(out_onnx) | 30.111111 | 80 | 0.778598 | 91 | 542 | 4.307692 | 0.461538 | 0.068878 | 0.091837 | 0.071429 | 0.112245 | 0.112245 | 0 | 0 | 0 | 0 | 0 | 0.006148 | 0.099631 | 542 | 18 | 81 | 30.111111 | 0.797131 | 0 | 0 | 0 | 0 | 0 | 0.093923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da6a8272e09ed6bcdd72fe1fe0ed6ca276090222 | 3,519 | py | Python | uat/test_uat_CLIParser.py | sorint-lab-us/aws-greengrass-gdk-cli | 7508c7f62dcee1638cfc895ea38f3842e0072f0e | [
"Apache-2.0"
] | null | null | null | uat/test_uat_CLIParser.py | sorint-lab-us/aws-greengrass-gdk-cli | 7508c7f62dcee1638cfc895ea38f3842e0072f0e | [
"Apache-2.0"
] | null | null | null | uat/test_uat_CLIParser.py | sorint-lab-us/aws-greengrass-gdk-cli | 7508c7f62dcee1638cfc895ea38f3842e0072f0e | [
"Apache-2.0"
] | null | null | null | import json
import os
import subprocess as sp
import tempfile
from pathlib import Path
import gdk.common.exceptions.error_messages as error_messages
def test_list_template():
check_list_template = sp.run(["gdk", "component", "list", "--template"], check=True, stdout=sp.PIPE)
assert "HelloWorld-python" in check_list_template.stdout.decode()
assert "HelloWorld-java" in check_list_template.stdout.decode()
def test_list_repository():
check_list_template = sp.run(["gdk", "component", "list", "--repository"], check=True, stdout=sp.PIPE)
assert "aws-greengrass-labs-database-influxdb" in check_list_template.stdout.decode()
def test_init_template_non_empty_dir():
check_init_template = sp.run(["gdk", "component", "init", "-t", "HelloWorld", "-l", "python"], stdout=sp.PIPE)
assert check_init_template.returncode == 1
assert "Try `gdk component init --help`" in check_init_template.stdout.decode()
def test_init_template():
dirpath = tempfile.mkdtemp()
os.chdir(dirpath)
check_init_template = sp.run(["gdk", "component", "init", "-t", "HelloWorld", "-l", "python"], check=True, stdout=sp.PIPE)
assert check_init_template.returncode == 0
assert Path(dirpath).joinpath("recipe.yaml").resolve().exists()
assert Path(dirpath).joinpath("gdk-config.json").resolve().exists()
def test_init_repository():
dirpath = tempfile.mkdtemp()
os.chdir(dirpath)
check_init_repo = sp.run(
["gdk", "component", "init", "-r", "aws-greengrass-labs-database-influxdb"], check=True, stdout=sp.PIPE
)
assert check_init_repo.returncode == 0
assert Path(dirpath).joinpath("recipe.yaml").exists()
assert Path(dirpath).joinpath("gdk-config.json").exists()
def test_build_template_zip():
dirpath = tempfile.mkdtemp()
# Recipe contains HelloWorld.zip artifact. So, create HelloWorld directory inside temporary directory.
path_HelloWorld = Path(dirpath).joinpath("HelloWorld")
os.mkdir(path_HelloWorld)
os.chdir(path_HelloWorld)
# Check if init downloads templates with necessary files.
check_init_template = sp.run(["gdk", "component", "init", "-t", "HelloWorld", "-l", "python"], check=True, stdout=sp.PIPE)
assert check_init_template.returncode == 0
assert Path(path_HelloWorld).joinpath("recipe.yaml").resolve().exists()
config_file = Path(path_HelloWorld).joinpath("gdk-config.json").resolve()
assert config_file.exists()
# Update gdk-config file mandatory field like region.
with open(str(config_file), "r") as f:
config = json.loads(f.read())
config["component"]["com.example.PythonHelloWorld"]["publish"]["region"] = "us-east-1"
with open(str(config_file), "w") as f:
f.write(json.dumps(config))
# Check if build works as expected.
check_build_template = sp.run(["gdk", "component", "build"], check=True, stdout=sp.PIPE)
assert check_build_template.returncode == 0
assert Path(path_HelloWorld).joinpath("zip-build").resolve().exists()
assert Path(path_HelloWorld).joinpath("greengrass-build").resolve().exists()
artifact_path = (
Path(path_HelloWorld)
.joinpath("greengrass-build")
.joinpath("artifacts")
.joinpath("com.example.PythonHelloWorld")
.joinpath("NEXT_PATCH")
.joinpath("HelloWorld.zip")
.resolve()
)
recipes_path = Path(path_HelloWorld).joinpath("greengrass-build").joinpath("recipes").joinpath("recipe.yaml").resolve()
assert artifact_path.exists()
| 40.918605 | 126 | 0.700199 | 443 | 3,519 | 5.410835 | 0.227991 | 0.045056 | 0.023363 | 0.049645 | 0.577388 | 0.483521 | 0.430955 | 0.396329 | 0.124322 | 0.124322 | 0 | 0.002002 | 0.148338 | 3,519 | 85 | 127 | 41.4 | 0.797798 | 0.06877 | 0 | 0.142857 | 0 | 0 | 0.189181 | 0.039731 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da6b4c0eec1b1ed14670ffd508f05ac5d26c2b77 | 1,326 | py | Python | mysite/image/forms.py | HelloTecXin/ZXBlog | 60d1f95f541138aa56acbaf4dcfbfe208491d65b | [
"MIT"
] | 1 | 2020-03-17T08:28:48.000Z | 2020-03-17T08:28:48.000Z | mysite/image/forms.py | HelloTecXin/ZXBlog | 60d1f95f541138aa56acbaf4dcfbfe208491d65b | [
"MIT"
] | null | null | null | mysite/image/forms.py | HelloTecXin/ZXBlog | 60d1f95f541138aa56acbaf4dcfbfe208491d65b | [
"MIT"
] | null | null | null | from django import forms
from django.core.files.base import ContentFile
from slugify import slugify
from urllib import request
from .models import Image
class ImageForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title','url','description')
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg','jpeg','png'] # 规定图片的扩展名
extension = url.rsplit('.',1)[1].lower() # 从得到图片的网址中分解出其扩展名
if extension not in valid_extensions: # 如果属于规定的扩展名,就认为提交的URL对象是一个图片
raise forms.ValidationError('The given url does not match valid image extension.')
return url
def save(self,force_insert=False,force_update=False,commit=True):
# ModelForm类中的save方法,将表单提交的数据保存到数据库
image = super(ImageForm, self).save(commit=False) # 执行父类ModelForm的save()方法,commit=False实例虽然被建立,但并没有保存数据
image_url = self.cleaned_data['url']
image_name = '{0}.{1}'.format(slugify(image.title),image_url.rsplit('.',1)[1].lower())
response = request.urlopen(image_url) # 以get方式访问该图片地址 ,通过该对象得到所访问URL的数据(图片的ASCII)
image.image.save(image_name, ContentFile(response.read()),save=False) # 将上述返回的结果保存到本地,并按照约定的名称给该图片文件命名
if commit:
image.save()
return image
| 41.4375 | 113 | 0.667421 | 150 | 1,326 | 5.82 | 0.5 | 0.024055 | 0.032073 | 0.041237 | 0.084765 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005803 | 0.220211 | 1,326 | 31 | 114 | 42.774194 | 0.838491 | 0.159879 | 0 | 0 | 0 | 0 | 0.088372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.2 | 0 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da741a60b0b7e242baf0c8917409303691b189e3 | 1,110 | py | Python | APP/__init__.py | jcyongqin/MerryChristmas2016 | f1bfc0f9df33dad474f28bbefa21f320e4ee48e9 | [
"MIT"
] | null | null | null | APP/__init__.py | jcyongqin/MerryChristmas2016 | f1bfc0f9df33dad474f28bbefa21f320e4ee48e9 | [
"MIT"
] | null | null | null | APP/__init__.py | jcyongqin/MerryChristmas2016 | f1bfc0f9df33dad474f28bbefa21f320e4ee48e9 | [
"MIT"
] | null | null | null | print('Merry Christmas!!!')
import sys
#
# int main(int argc, char* argv[]) {
# int n = argc > 1 ? atoi(argv[1]) : 4;
# for (int j = 1; j <= n; j++) {
# int s = 1 << j, k = (1 << n) - s, x;
# for (int y = s - j; y >= 0; y--, putchar('\n')) {
# for (x = 0; x < y + k; x++) printf(" ");
# for (x = 0; x + y < s; x++) printf("%c ", '!' ^ y & x);
# for (x = 1; x + y < s; x++) printf("%c ", '!' ^ y & (s - y - x - 1));
# }
# }
# }
def main(*args):
# """上面的是我尝试尽量用最少代码来画一个抽象一点的圣诞树,因此树干都没有."""
if args.__len__() > 1:
n = args[1]
else:
n = 4
for j in range(n):
s = 1 << j
k = (1 << n) - s
x = 0
for y in range(s - j)[::-1]:
for x in range(y + k):
print(" ", end="")
for x in range(s - y):
print("%s " % chr(ord('!') ^ y & x), end="")
for x in range(1, s - y + 1):
print("%s " % chr(ord('!') ^ y & (s - y - x - 1)), end="")
print("")
if __name__ == "__main__":
main(sys.argv)
| 28.461538 | 83 | 0.351351 | 160 | 1,110 | 2.3625 | 0.23125 | 0.063492 | 0.047619 | 0.087302 | 0.301587 | 0.10582 | 0.10582 | 0.042328 | 0 | 0 | 0 | 0.032308 | 0.414414 | 1,110 | 38 | 84 | 29.210526 | 0.549231 | 0.430631 | 0 | 0 | 0 | 0 | 0.058252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.095238 | 0.238095 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da760162956fd30fc878df9712168c347b1cba4a | 1,013 | py | Python | pyday_night_funkin/enums.py | Square789/PydayNightFunkin | 8d43daec947202566419a2d5ce63cc191b7b8e3c | [
"Apache-2.0"
] | null | null | null | pyday_night_funkin/enums.py | Square789/PydayNightFunkin | 8d43daec947202566419a2d5ce63cc191b7b8e3c | [
"Apache-2.0"
] | 34 | 2021-09-10T01:08:14.000Z | 2022-03-25T18:10:08.000Z | pyday_night_funkin/enums.py | Square789/PydayNightFunkin | 8d43daec947202566419a2d5ce63cc191b7b8e3c | [
"Apache-2.0"
] | null | null | null | """
Enums that aren't really too coupled to anything else.
"""
from enum import IntEnum
class DIFFICULTY(IntEnum):
EASY = 0
NORMAL = 1
HARD = 2
def to_song_json_suffix(self) -> str:
if self is self.EASY:
return "-easy"
elif self is self.NORMAL:
return ""
elif self is self.HARD:
return "-hard"
return ""
def to_atlas_prefix(self) -> str:
if self is self.EASY:
return "EASY"
elif self is self.NORMAL:
return "NORMAL"
elif self is self.HARD:
return "HARD"
return ""
# NOTE: That sucks, but is needed for menu selections etc.
DIFFICULTY_REVERSE_MAP = [DIFFICULTY.EASY, DIFFICULTY.NORMAL, DIFFICULTY.HARD]
class CONTROL(IntEnum):
LEFT = 0
DOWN = 1
UP = 2
RIGHT = 3
ENTER = 4
BACK = 5
DEBUG_DESYNC = 100
DEBUG_WIN = 101
DEBUG_LOSE = 102
class GAME_STATE(IntEnum):
LOADING = 0
COUNTDOWN = 1
PLAYING = 2
ENDED = 3
class ANIMATION_TAG(IntEnum):
IDLE = 0
SING = 1
MISS = 2
SPECIAL = 3
STORY_MENU = 4
STATIC = 5
PRESSED = 6
CONFIRM = 7
GAME_OVER = 8
| 15.828125 | 78 | 0.672261 | 158 | 1,013 | 4.221519 | 0.512658 | 0.053973 | 0.089955 | 0.083958 | 0.278861 | 0.278861 | 0.278861 | 0.278861 | 0.176912 | 0.176912 | 0 | 0.039846 | 0.231984 | 1,013 | 63 | 79 | 16.079365 | 0.817481 | 0.110563 | 0 | 0.191489 | 0 | 0 | 0.026876 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.021277 | 0 | 0.851064 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da78a73d88c09570047fcaf5e2a501ef100b4dc0 | 28,780 | py | Python | tools/check_cluster.py | jmatuskey/jupyterhub-deploy | 6669bb0fa8e6da52f74d4ca015cea9dc96105a34 | [
"Unlicense"
] | 1 | 2021-06-02T18:35:05.000Z | 2021-06-02T18:35:05.000Z | tools/check_cluster.py | jmatuskey/jupyterhub-deploy | 6669bb0fa8e6da52f74d4ca015cea9dc96105a34 | [
"Unlicense"
] | 64 | 2020-05-11T12:35:26.000Z | 2022-03-28T16:03:37.000Z | tools/check_cluster.py | jmatuskey/jupyterhub-deploy | 6669bb0fa8e6da52f74d4ca015cea9dc96105a34 | [
"Unlicense"
] | 11 | 2020-04-07T13:32:07.000Z | 2022-02-07T19:16:24.000Z | #! /usr/bin/env python
"""Check properties of Terraformed resources and/or JupyterHub to verify good deployment.
ignore the hub since it may not be delpoyed on the cluster yet.
check creation date
check for global hammer
"""
import sys
import os
import subprocess
import argparse
import re
import json
from collections import defaultdict
import builtins
import functools
import traceback
import yaml
CLUSTER_CHECKS = """
Globals:
environment:
- DEPLOYMENT_NAME
- ENVIRONMENT
- JH_HOSTNAME
- ADMIN_ARN
- ACCOUNT_ID
constants:
V_K8S: "1.21"
MAX_NODE_AGE: 10d
MAX_EFS_FILE_SYSTEM_SIZE: 50000000000000
CORE_NODES: 3
NOTEBOOK_EC2_TYPE: r5.xlarge
MAX_RESTARTS: 0
LOG_REACH: 30m
Groups:
- group: Kubernetes Pods
command: kubectl get pods -A
parser: named_columns
assertions:
- name: All pods
all: STATUS=='Running' and int(RESTARTS)<=MAX_RESTARTS
- name: EFS provisioner
ok_rows==1: NAMESPACE=='support' and 'efs-provisioner' in NAME
- name: Kube Proxy
ok_rows>=4: NAMESPACE=='kube-system' and 'kube-proxy' in NAME
- name: Autoscaler
ok_rows==1: NAMESPACE=='kube-system' and 'cluster-autoscaler' in NAME
- name: AWS Pods
ok_rows>=4: NAMESPACE=='kube-system' and 'aws-node' in NAME
- name: Core DNS
ok_rows==2: NAMESPACE=='kube-system' and 'coredns' in NAME
- group: JupyterHub Pods
command: kubectl get pods -A
parser: named_columns
assertions:
- name: Image puller
ok_rows>=1: NAMESPACE=='default' and 'continuous-image-puller' in NAME
- name: Hub
ok_rows==1: NAMESPACE=='default' and 'hub' in NAME
- name: Proxy
ok_rows>=1: NAMESPACE=='default' and 'proxy' in NAME
- name: User-scheduler
ok_rows==2: NAMESPACE=='default' and 'user-scheduler' in NAME
- name: User-placeholder
ok_rows>=1: NAMESPACE=='default' and 'user-placeholder' in NAME
- group: JupyterHub Nodes
command: kubectl get nodes -A --show-labels=true
parser: named_columns
assertions:
- name: At least 4 STATUS Ready new Hub AMI ID
ok_rows>=4: STATUS=="Ready" # and HUB_AMI_ID in LABELS
- name: All Nodes Ready Status
all: STATUS=="Ready" or STATUS=="Ready,SchedulingDisabled"
- name: Kubernetes Version
all: V_K8S in VERSION
- name: Node Age
all: convert_age(AGE) < convert_age(MAX_NODE_AGE)
- name: Core us-east-1a
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1a' in LABELS"
- name: Core us-east-1b
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1b' in LABELS"
- name: Core us-east-1c
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1c' in LABELS"
- name: Notebook nodes
ok_rows>=1: "DEPLOYMENT_NAME+'-notebook' in LABELS and NOTEBOOK_EC2_TYPE in LABELS and 'region=us-east-1' in LABELS"
- group: EKS Services
command: kubectl get services -A
parser: named_columns
assertions:
- name: Datadog Cluster Agent Service
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog-cluster-agent' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='5005/TCP'
- name: Datadog Kube State Metrics Service
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog-kube-state-metrics' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8080/TCP'
- name: Hub Service
ok_rows==1: NAMESPACE=='default' and NAME=='hub' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8081/TCP'
- name: Kubernetes Service
ok_rows==1: NAMESPACE=='default' and NAME=='kubernetes' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='443/TCP'
- name: Proxy API Service
ok_rows==1: NAMESPACE=='default' and NAME=='proxy-api' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8001/TCP'
- name: Proxy Public Service
ok_rows==1: NAMESPACE=='default' and NAME=='proxy-public' and TYPE=='LoadBalancer' and '.elb.amazonaws.com' in _['EXTERNAL-IP'] and '443:' in _['PORT(S)'] and '80:' in _['PORT(S)'] and 'TCP' in _['PORT(S)'] and 'UDP' not in _['PORT(S)']
- name: Cluster Autoscaler Service
ok_rows==1: NAMESPACE=='kube-system' and NAME=='cluster-autoscaler-aws-cluster-autoscaler' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8085/TCP'
- name: Kube DNS Service
ok_rows==1: NAMESPACE=='kube-system' and NAME=='kube-dns' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='53/UDP,53/TCP'
- group: EKS Deployments
command: kubectl get deployments -A
parser: named_columns
assertions:
- name: Hub Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='hub' and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Proxy Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='proxy' and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: User Scheduler Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='user-scheduler' and READY=='2/2' and _['UP-TO-DATE']=='2' and AVAILABLE=='2'
- name: Cluster Autoscaler Deployment
ok_rows==1: NAMESPACE=='kube-system' and 'cluster-autoscaler' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Core DNS Deployment
ok_rows==1: NAMESPACE=='kube-system' and 'coredns' in NAME and READY=='2/2' and _['UP-TO-DATE']=='2' and AVAILABLE=='2'
- name: EFS Provisioner Deployment
ok_rows==1: NAMESPACE=='support' and 'efs-provisioner' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Datadog Cluster Agent Deployment
ok_rows==1: NAMESPACE=='datadog' and 'datadog-cluster-agent' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Datadog Kube Metrics Deployment
ok_rows==1: NAMESPACE=='datadog' and 'datadog-kube-state-metrics' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- group: Route-53 Host
command: "host {JH_HOSTNAME}"
parser: raw
assertions:
- name: DNS Mapping
simple: "f'{JH_HOSTNAME} is an alias for' in _"
- group: JupyterHub Index Page
command: "wget --no-check-certificate -O- {JH_HOSTNAME}"
parser: raw
assertions:
- name: Server Index Page
simple: "'HTTP request sent, awaiting response... 200 OK' in _"
- group: EFS File Systems
command: awsudo {ADMIN_ARN} aws efs describe-file-systems --output yaml --query FileSystems
parser: yaml
assertions:
- name: EFS Home Dirs
ok_rows==1: Name==DEPLOYMENT_NAME+'-home-dirs' and LifeCycleState=='available' and Encrypted==True and NumberOfMountTargets==3 and OwnerId==ACCOUNT_ID and aws_kv_dict(Tags)['stsci-backup']=='dmd-2w-sat'
- name: EFS Max Size
all: int(SizeInBytes['Value']) < MAX_EFS_FILE_SYSTEM_SIZE
- group: Daemonsets named rows
command: kubectl get daemonsets -A
parser: named_rows
assertions:
- name: datadog - proxy - aws-nodes READY
simple: _['datadog']['READY'] == _['kube-proxy']['READY'] == _['aws-node']['READY']
- name: datadog - proxy - aws-nodes DESIRED
simple: _['datadog']['DESIRED'] == _['kube-proxy']['DESIRED'] == _['aws-node']['DESIRED']
- name: datadog - proxy - aws-nodes CURRENT
simple: _['datadog']['CURRENT'] == _['kube-proxy']['CURRENT'] == _['aws-node']['CURRENT']
- name: datadog - proxy - aws-nodes UP-TO-DATE
simple: _['datadog']['UP-TO-DATE'] == _['kube-proxy']['UP-TO-DATE'] == _['aws-node']['UP-TO-DATE']
- name: datadog - proxy - aws-nodes AVAILABLE
simple: _['datadog']['AVAILABLE'] == _['kube-proxy']['AVAILABLE'] == _['aws-node']['AVAILABLE']
- name: continuous image puller notebook nodes only
simple: int(_['continuous-image-puller']['READY']) == int(_['aws-node']['READY']) - CORE_NODES
- group: Daemonsets named columns
command: kubectl get daemonsets -A
parser: named_columns
assertions:
- name: continuous-image-puller
ok_rows==1: NAMESPACE=='default' and NAME=='continuous-image-puller'
- name: datadog
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog'
- name: kube-proxy
ok_rows==1: NAMESPACE=='kube-system' and NAME=='kube-proxy'
- name:
ok_rows==1: NAMESPACE=='kube-system' and NAME=='aws-node'
- name: matching daemonset states
all: READY==DESIRED==CURRENT==AVAILABLE==_['UP-TO-DATE']
- group: EKS AMI Rotation
command: awsudo {ADMIN_ARN} aws eks list-nodegroups --cluster-name {DEPLOYMENT_NAME} --query nodegroups --output text
parser: raw
assertions:
- name: Only rotated nodegroup names
simple: "functools.reduce(lambda a, b: a and b, [x.count('-')!=1 for x in _.split()])"
- group: Log Error Check
function: pod_logs(LOG_REACH)
parser: yaml
assertions:
- name: No errors in logs
simple: ERRORS==0
- group: Pod to Node Map
command: kubectl get pods -A -o wide
replace_output:
input: NOMINATED NODE
output: NOMINATED_NODE
parser: node_map
print_parsing: true
""" # noqa: E501
def convert_age(age_str):
"""Convert k8s abbreviated-style datetime str e.g. 14d2h to an integer."""
# age_str_org = age_str
def age_subst(age_str, letter, factor):
parts = age_str.split(letter)
if len(parts) == 2:
age_str = parts[0] + "*" + factor + "+" + parts[1]
return age_str
age_str = age_subst(age_str, "d", "60*60*24")
age_str = age_subst(age_str, "h", "60*60")
age_str = age_subst(age_str, "m", "60")
age_str = age_subst(age_str, "s", "1")
age_str = age_str[:-1]
# print(
# f"convert_age({repr(age_str_org)}) --> {repr(age_str)} --> {eval(age_str)}" # nosec
# ) # nosec
return eval(age_str) # nosec
def aws_kv_dict(key_value_dict_list):
"""Convert AWS dict representation [{ 'Key':k, 'Value':v}, ...] to a Python dict."""
return {item["Key"]: item["Value"] for item in key_value_dict_list}
def run(cmd, cwd=".", timeout=10):
"""Run subprocess `cmd` in dir `cwd` failing if not completed within `timeout` seconds
of if `cmd` returns a non-zero exit status.
Returns both stdout+stderr from `cmd`. (untested, verify manually if in doubt)
"""
print(cmd)
result = subprocess.run(
cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
check=True,
cwd=cwd,
timeout=timeout,
) # maybe succeeds
return result.stdout
def parse_node_map(output):
namespaces = parse_named_columns(output)
node_map = defaultdict(list)
for namespace in namespaces:
node_map[namespace["NODE"]].append(
namespace["NAMESPACE"] + ":" + namespace["NAME"]
)
output = ["Mapping from Node to Pod", "-" * 80, yaml.dump(dict(node_map))]
return "\n".join(output)
def parse_named_columns(output):
"""Return rows from a table string `output` as a sequence of dicts.
The first row should contain whitespace delimited column names.
Each subsequent row should contain whitespace delimited column values.
Given tabular `output` as found in many k8s commands:
col1_name col2_name ...
col1_row1_val col2_row1_val ...
col1_row2_val col1_row2_val ...
...
Returns [ {col1_name: col1_row1_val, col2_name: col2_row1_val, ...},
{col1_name: col1_row2_val, col2_name: col2_row2_val, ...},
... ]
Each dict in the returned sequence is suitable as a namespace for eval()
"""
lines = output.splitlines()
columns = lines[0].split()
rows = []
for line in lines[1:]:
d = dict(zip(columns, line.split()))
d["_"] = d
rows.append(d)
return rows
def parse_named_rows(output, key="NAME"):
return {"_": {row[key]: row for row in parse_named_columns(output)}}
def parse_raw(output):
"""Just return `output` as a single string assigned to dict key '_'
for reference in assertion expressions.
Returns {'_': output}
"""
return dict(_=output)
def parse_yaml(output):
"""Return the YAML parsing of `output` string. aws commands can
be filtered using the --query parameter to produce more manageable
output before YAML parsing.
"""
return yaml.safe_load(output)
def parse_json(output):
"""Return the JSON parsing of `output` string. aws commands can
be filtered using the --query parameter to produce more manageable
output before JSON parsing.
"""
return json.loads(output)
def parse_none(output):
"""Return the input as the output, i.e. no changes."""
return output
def test_function(parameters):
return yaml.dump(parameters)
class Checker:
"""The Checker class runs a number of tests defined in a `test_spec` string.
Commands
--------
Each Group includes a subprocess CLI command from which the output is captured,
parsed, and checked against various assertions.
Output Parsing
--------------
The command output is parsed using a parser which can be be one of
named_rows, raw, yaml, or json.
named_rows is ideal for parsing kubectl output in which each row
defines a set of variables as a dict. named_rows requires that
column names and values do not contain spaces; generally it is not
a problem but not all kubectl output modes work.
raw simply returns { "_": cmd_output } so _ is used as a variable
in assertions to refer to the entire output string.
yaml and json return parsed command output using their respective
loaders. The --query parameter of the 'aws' commands can be
useful for pre-filtering command output so that a simple direct
parsing is usable in assertions.
Test Assertions
---------------
A series of assertions are evaluated on the parsed output from each group's command.
Assertions take the form:
simple: <python expression using parsed outputs to define variables, eval must pass.>
ok_rows_expr: <python expression using parsed outputs to define row variables, ok_rows_expr must be True.>
all: <python expression using parsed outputs to define row variables, each row must pass.>
Examples of ok_rows expressions might be:
ok_rows==1
ok_rows>=3
Pseudo code for 'all' is:
ok_rows==len(total output rows)
ok_rows is assigned the number of times the assertion evaluates to True when run
against each of the row namespace dicts. Hence overall test success does not
require every row to pass the assertion.
The `test_spec` specifies a string of YAML which defines:
Globals:
environment:
- env var1 needed in assertion expressions imported from os.environ
...
constants:
- VAR: VAL a VAR needed in assertion expressions with the spec'd VAL
...
Groups:
- group: <Command Group Name>
command: <UNIX subprocess command string>
parser: <named_rows|raw|yaml|json>
assertions:
- name: <Name defining check>
<simple|all|ok_rows_expr>: <python expression>
- name: <Name defining check>
<simple|all|ok_rows_expr>: <python expression>
...
...
NOTE: In the spec, substitions for output vars, env vars, constants,
variables, and built-in functions occur in two basic ways:
- Using Python's f-string {} formatting. (commands)
- Treated as a variable name to be eval'ed. (assertions)
This is because commands are "".format()'ed but assertions are eval'ed,
each against similar namespaces with the caveat that the command formatting
includes no variables derived from it's own output.
if `output_file` is specified, commands are run and outputs are
stored at the spec'ed path, the checker exits w/o running tests.
if `input_file` is specified, it is presumed to be the path to command
output YAML stored by `output_file` and replaces running commands,
checks are run using the stored outputs.
input_file and output_file are mutually exclusive.
if `verbose` is specified then additional assertion-by-assertion,
row-by-row output is generated.
if `groups_regex` is specified, only the group names which can be
searched by the regex are checked. (case insensitive substrings
of group names work).
if `variables` is specified, it should be a comma seperated string
of VAR=VAL pairs, i.e. VAR1=VAL1,VAR2=VAL2,...
These variables are added to the namespace used for running/eval'ing
commands and assertions and override values already defined in Globals.
""" # noqa: E501
def __init__(
self,
test_spec=CLUSTER_CHECKS,
output_file=None,
input_file=None,
verbose=False,
groups_regex=".+",
exclude_regex="^$",
variables=None,
):
self._output_file = output_file
self._input_file = input_file
self._verbose = verbose
self._groups_regex = groups_regex
self._exclude_regex = exclude_regex
print("===> Loading test spec")
self.loaded_spec = yaml.safe_load(test_spec)
self.variables = (
dict([var.split("=") for var in variables.split(",")]) if variables else []
)
self._outputs = {}
self._errors = 0
self._error_msgs = []
@property
def groups(self):
return self.loaded_spec["Groups"]
@property
def spec_environment(self):
return {
var: os.environ[var]
for var in self.loaded_spec.get("Globals", {}).get("environment", [])
}
@property
def spec_constants(self):
return self.loaded_spec.get("Globals", {}).get("constants", {})
@property
def builtins(self):
result = {
key: getattr(builtins, key) for key in dir(builtins)
} # Python builtins
result.update(
dict(
convert_age=convert_age,
aws_kv_dict=aws_kv_dict,
test_function=test_function,
functools=functools,
pod_logs=self.pod_logs,
)
)
return result
@property
def combined_environment(self):
env = dict()
env.update(self.builtins)
env.update(self.spec_constants)
env.update(self.spec_environment)
env.update(self.variables)
return env
def main(self):
self.setup_outputs()
for check in self.groups:
if re.search(
self._groups_regex, check["group"], re.IGNORECASE
) and not re.search(self._exclude_regex, check["group"], re.IGNORECASE):
self.run_check(check)
if self._output_file:
self.store_outputs()
return self._errors
def setup_outputs(self):
"""Fetch saved commands ouputs from file rather than running commands."""
if self._input_file:
with open(self._input_file) as file:
self._outputs = yaml.safe_load(file)
else:
self._outputs = {}
def store_outputs(self):
"""Store command outputs to file for running offline later."""
print("=" * 80)
print("Saving", repr(self._output_file))
with open(self._output_file, "w+") as file:
yaml.dump(self._outputs, file)
def replace_output(self, check, output):
if check.get("replace_output"):
input_patt = check.get("replace_output").get("input")
output_patt = check.get("replace_output").get("output")
output = re.sub(input_patt, output_patt, output, flags=re.MULTILINE)
return output
def run_check(self, check):
print("=" * 80)
try:
output = self.get_command_output(check)
except Exception as exc:
self.error(
"Failed obtaining command output for group",
repr(check.get("group")),
":",
str(exc),
)
print("=" * 80)
return
if self._output_file:
return
if not output.startswith("FAILED"):
print("-" * 80)
print(output)
print("=" * 80)
self.process_output(check, output)
def process_output(self, check, output):
try:
output = self.replace_output(check, output)
parser = globals()[f"parse_{check['parser']}"]
namespaces = parser(output)
except Exception as exc:
self.error("PARSER failed for", repr(check["group"]), ":", str(exc))
return
if check.get("print_parsing"):
print(namespaces)
for assertion in check.get("assertions", []):
try:
self.check_assertion(check["group"], assertion, namespaces)
except Exception as exc:
self.error(
"EXECUTION failed for",
repr(check["group"]),
":",
repr(assertion["name"]),
":",
str(exc),
)
def get_command_output(self, check):
group = check["group"]
if not self._input_file:
self._outputs[group] = self.compute_outputs(group, check)
return self._outputs[group]
def compute_outputs(self, group, check):
if check.get("command"):
command = check.get("command").format(**self.combined_environment)
elif check.get("function"):
command = check.get("function").format(**self.combined_environment)
else:
raise RuntimeError(f"Group {group} doesn't define an input command.")
print("===> Fetching", repr(group))
print("=" * 80)
try:
if check.get("command"):
outputs = run(command).strip()
else:
outputs = eval( # nosec
command, self.combined_environment, self.combined_environment
)
except Exception as exc:
traceback.print_exc()
outputs = f"FAILED for '{group}': '{command}' : '{str(exc)}'"
self.error(outputs)
return outputs
def check_assertion(self, group_name, assertion, namespaces):
assertion = dict(assertion)
assertion_name = assertion.pop("name")
requirement, condition = list(assertion.items())[0]
# condition = condition.format(**self.combined_environment)
print(f"Checking assertion '{assertion_name}': {requirement} : {condition}")
if requirement == "simple":
self.verify_simple(group_name, assertion_name, namespaces, condition)
elif requirement.startswith(("ok_rows", "all")):
self.verify_rows(
group_name, assertion_name, namespaces, requirement, condition
)
else:
raise ValueError(
f"Unhandled requirement: {requirement} for assertion: {assertion}"
)
print()
def verify_rows(self, group_name, name, namespaces, requirement, condition):
rows = []
for i, namespace in enumerate(namespaces):
self.verbose(f"Checking '{name}' #{i} : {condition} ... ", end="")
if self.eval_condition(namespace, condition):
rows.append(namespace)
self.verbose("OK")
else:
self.verbose("FAILED on row:", namespace)
if requirement == "all":
requirement = f"ok_rows=={len(namespaces)}"
if self.eval_condition(dict(ok_rows=len(rows)), requirement): # nosec
print(f"===> OK '{group_name}' : '{name}'")
else:
self.error(f"FAILED '{group_name}' : '{name}' : {condition}")
def verify_simple(self, group_name, name, namespace, condition):
if self.eval_condition(namespace, condition):
print(f"===> OK '{group_name}' : '{name}'")
else:
self.error(f"FAILED '{group_name}' : '{name}' : {condition}")
self.verbose("Namespace:", namespace)
def eval_condition(self, namespace, condition):
namespace = dict(namespace) # local no-side-effects copy
namespace.update(self.combined_environment)
return eval(condition, {}, namespace) # nosec
def verbose(self, *args, **keys):
if self._verbose:
print(*args, **keys)
def error(self, *args):
self._errors += 1
self._error_msgs.append(" ".join(str(arg) for arg in args))
print("===> ERROR: ", *args)
def show_error_status(self):
print("=" * 80)
print("Overall", self._errors, "errors occurred:")
for msg in self._error_msgs:
print(msg)
def pod_logs(self, log_reach="30m"):
loaded = yaml.safe_load(run("kubectl get pods -A --output yaml"))
pods = [
(pod["metadata"]["namespace"], pod["metadata"]["name"])
for pod in loaded["items"]
]
print("=" * 80)
print("Fetching", len(loaded["items"]), "pod logs")
pod_errors = dict()
for i, (namespace, name) in enumerate(pods):
pod = f"{namespace}:{name}"
print()
output = run(
f"kubectl logs -n {namespace} {name} --since {log_reach} --all-containers --timestamps=True"
)
for line in output.splitlines():
if "error" in line.lower() and "| INFO |" not in line:
self.error(f"FAILED Pod {pod} log:", line)
if pod not in pod_errors:
pod_errors[pod] = []
pod_errors[pod].append(line)
print()
print("-" * 80)
return yaml.dump(
{
"ERRORS": len(pod_errors),
"FAILING_PODS": sorted(list(pod_errors.keys())),
"POD_ERRORS": pod_errors,
}
)
def parse_args():
parser = argparse.ArgumentParser(
description="Perform various cluster and hub checks to automatically detect basic anomalies."
)
parser.add_argument(
"--test-spec",
dest="test_spec",
action="store",
default=None,
help="Custom test specification. Defaults to None meaning use built-in spec.",
)
parser.add_argument(
"--output-file",
dest="output_file",
action="store",
default=None,
help="Filepath to store outputs of test commands.",
)
parser.add_argument(
"--input-file",
dest="input_file",
action="store",
default=None,
help="Filepath to load previously stored test command results.",
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="Include additional output.",
)
parser.add_argument(
"--groups-regex",
dest="groups_regex",
action="store",
default=".+",
help="Select groups to execute based on the specified regex, defaulting to all groups."
" Unique group substrings are valid, |-or patterns together. Case is irrelevant.",
)
parser.add_argument(
"--exclude-regex",
dest="exclude_regex",
action="store",
default="^$",
help="Select groups to skip based on the specified regex, defaulting to no groups."
" Unique group substrings are valid, |-or patterns together. Case is irrelevant.",
)
parser.add_argument(
"--variables",
dest="variables",
action="store",
default=None,
help="Custom override variables which can be used in commands, assertions, etc."
" --variables var1=val1,var2=val2,...",
)
return parser.parse_args()
def main():
"""Parse command line arguments and run the test spec.
Return the number of failing tests or 0 if all tests pass.
"""
args = parse_args()
test_spec = (
open(args.test_spec).read().strip() if args.test_spec else CLUSTER_CHECKS
)
checker = Checker(
test_spec=test_spec,
output_file=args.output_file,
input_file=args.input_file,
verbose=args.verbose,
groups_regex=args.groups_regex,
exclude_regex=args.exclude_regex,
variables=args.variables,
)
errors = checker.main()
checker.show_error_status()
return errors
if __name__ == "__main__":
sys.exit(main())
| 37.087629 | 243 | 0.618277 | 3,636 | 28,780 | 4.770627 | 0.162816 | 0.016603 | 0.012914 | 0.023982 | 0.2581 | 0.22276 | 0.18056 | 0.160613 | 0.127176 | 0.100254 | 0 | 0.011195 | 0.2582 | 28,780 | 775 | 244 | 37.135484 | 0.801302 | 0.202571 | 0 | 0.18705 | 0 | 0.053957 | 0.486507 | 0.083808 | 0 | 0 | 0 | 0 | 0.043165 | 1 | 0.064748 | false | 0 | 0.019784 | 0.008993 | 0.136691 | 0.052158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da78b0227ad76c6a1e8ba2489ed9c76d00da8725 | 791 | py | Python | tests/in/test_application.py | evereux/catia_python | 08948585899b12587b0415ce3c9191a408b34897 | [
"MIT"
] | 90 | 2019-02-21T10:05:28.000Z | 2022-03-19T01:53:41.000Z | tests/in/test_application.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | 99 | 2019-05-21T08:29:12.000Z | 2022-03-25T09:55:15.000Z | tests/in/test_application.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | 26 | 2019-04-04T06:31:36.000Z | 2022-03-30T07:24:47.000Z | #! /usr/bin/python3.6
from pycatia import catia
from tests.source_files import cat_part_measurable
def test_application():
caa = catia()
assert 'Application(name="CNEXT")' in caa.__repr__()
def test_refresh():
caa = catia()
documents = caa.documents
documents.open(cat_part_measurable)
document = caa.active_document
caa.refresh_display = False
assert caa.refresh_display is False
caa.refresh_display = True
assert caa.refresh_display is True
document.close()
def test_visible():
caa = catia()
documents = caa.documents
documents.open(cat_part_measurable)
document = caa.active_document
caa.visible = False
assert caa.visible is False
caa.visible = True
assert caa.visible is True
document.close()
| 19.775 | 56 | 0.705436 | 102 | 791 | 5.27451 | 0.333333 | 0.081784 | 0.126394 | 0.074349 | 0.416357 | 0.32342 | 0.32342 | 0.32342 | 0.32342 | 0.32342 | 0 | 0.0032 | 0.209861 | 791 | 39 | 57 | 20.282051 | 0.8576 | 0.025284 | 0 | 0.44 | 0 | 0 | 0.032468 | 0.032468 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7992586d3c2316d0ce8cb23cf3e01e30ae505b | 4,632 | py | Python | test/util.py | CarysT/xar | f476c05dec373fcdcd0e884d5a0201501555edb9 | [
"BSD-2-Clause"
] | null | null | null | test/util.py | CarysT/xar | f476c05dec373fcdcd0e884d5a0201501555edb9 | [
"BSD-2-Clause"
] | null | null | null | test/util.py | CarysT/xar | f476c05dec373fcdcd0e884d5a0201501555edb9 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import contextlib
import hashlib
import os
import os.path
import shutil
import stat
import subprocess
import sys
import xattr
class TestCaseSkipError(Exception):
pass
def skip_if_no_compression_support(type):
"""
Raises TestCaseSkipError if the type is "lzma" and the test is running on
darwin (OS X). In the future, we should add a hidden debugging flag to xar
to determine valid compression types. This will skip incorrectly if a
custom xar is used on OS X, or if a custom xar on another platform is
built without bzip2 or lzma.
"""
if sys.platform == "darwin" and type == "lzma":
raise TestCaseSkipError("{t} support not compiled in".format(t=type))
@contextlib.contextmanager
def directory_created(directory_path):
"""
Creates the named directory and provides the path to the directory to the
calling code. Automatically removes the directory when finished.
Usage:
with directory_created("foobar") as path:
do_stuff_with_path
"""
os.mkdir(directory_path)
try:
yield os.path.realpath(directory_path)
finally:
if os.path.exists(directory_path):
shutil.rmtree(directory_path)
@contextlib.contextmanager
def archive_created(archive_path, content_path, *extra_args, **extra_kwargs):
"""
Creates a named xar archive of the specified content path, returning the
path to the archive. Automatically removes the archive when finished.
Usage:
with archive_created("/bin", "bin.xar") as path:
do_stuff_with(path)
"""
cmd = ["xar", "-c", "-f", archive_path, content_path]
if extra_args:
cmd += list(extra_args)
try:
subprocess.check_call(cmd, **extra_kwargs)
assert os.path.exists(archive_path), "failed to create archive \"{p}\" but xar did not report an error".format(p=archive_path)
yield os.path.realpath(archive_path)
finally:
if os.path.exists(archive_path):
os.unlink(archive_path)
HASH_CHUNK_SIZE = 32768
def _md5_path(path):
with open(path, "r") as f:
h = hashlib.md5()
while True:
last = f.read(HASH_CHUNK_SIZE)
if not last:
break
h.update(last)
return h.digest()
def assert_identical_directories(path1, path2):
"""
Verifies two directories have identical contents. Checks file type (via
the high byte of the mode), size, atime, and mtime, but does not check
other attributes like uid and gid, since they can be expected to change.
"""
seen = set([])
for file1 in os.listdir(path1):
seen.add(file1)
entry1 = os.path.join(path1, file1)
entry2 = os.path.join(path2, file1)
assert os.path.exists(entry2), "\"{f1}\" exists in \"{p1}\" but not \"{p2}\"".format(f1=file1, p1=path1, p2=path2)
# Extended attributes
xattr1 = xattr.xattr(entry1)
xattr2 = xattr.xattr(entry2)
assert set(xattr1.list()) == set(xattr2.list()), "list of extended attributes on \"{f1}\" ({l1}) differs from \"{f2}\" ({l2})".format(f1=entry1, l1=xattr1.list(), f2=entry2, l2=xattr2.list())
for attribute in xattr1.list():
assert xattr1.get(attribute) == xattr2.get(attribute), "extended attribute \"{a1}\" on \"{f1}\" doesn't match value from \"{f2}\"".format(a1=attribute, f1=entry1, f2=entry2)
# Why do it this way? We want to lstat() instead of stat(), so we can't use os.path.isdir() and friends
stat1 = os.lstat(entry1)
stat2 = os.lstat(entry2)
# Modes
mode1 = stat1.st_mode
mode2 = stat2.st_mode
if stat.S_ISREG(mode1):
assert stat.S_ISREG(mode2)
if stat.S_ISDIR(mode1):
assert stat.S_ISDIR(mode2)
if stat.S_ISLNK(mode1):
assert stat.S_ISLNK(mode2)
if stat.S_ISCHR(mode1):
assert stat.S_ISCHR(mode2)
if stat.S_ISBLK(mode1):
assert stat.S_ISBLK(mode2)
if stat.S_ISFIFO(mode1):
assert stat.S_ISFIFO(mode2)
if stat.S_ISSOCK(mode1):
assert stat.S_ISSOCK(mode2)
# Sizes and the like
assert stat1.st_size == stat2.st_size, "size mismatch for \"{e1}\" ({s1}) and \"{e2}\" ({s2})".format(e1=entry1, s1=stat1.st_size, e2=entry2, s2=stat2.st_size)
assert stat1.st_mtime == stat2.st_mtime, "mtime mismatch for \"{e1}\" and \"{e2}\"".format(e1=entry1, e2=entry2)
assert _md5_path(entry1) == _md5_path(entry2), "md5 hash mismatch for \"{e1}\" and \"{e2}\"".format(e1=entry1, e2=entry2)
if os.path.isdir(entry1):
assert_identical_directories(entry1, entry2)
for file2 in os.listdir(path2):
assert file2 in seen, "\"{f2}\" exists in \"{p2}\" but not \"{p1}\"".format(f2=file2, p1=path1, p2=path2)
def touch(path):
if not os.path.exists(path):
with open(path, "w"):
pass
os.utime(path, None)
@contextlib.contextmanager
def chdir(*args, **kwargs):
cwd = os.getcwd()
os.chdir(*args, **kwargs)
try:
yield os.getcwd()
finally:
os.chdir(cwd)
| 31.726027 | 193 | 0.708765 | 721 | 4,632 | 4.457698 | 0.305132 | 0.02178 | 0.015246 | 0.034848 | 0.064095 | 0.053516 | 0.024891 | 0.024891 | 0.024891 | 0.024891 | 0 | 0.032711 | 0.155225 | 4,632 | 145 | 194 | 31.944828 | 0.788653 | 0.247193 | 0 | 0.117021 | 0 | 0 | 0.1039 | 0 | 0 | 0 | 0 | 0 | 0.180851 | 1 | 0.074468 | false | 0.021277 | 0.095745 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da79b4fcd76b875d0455312cd540c29c3adde2c1 | 15,083 | py | Python | run_game_with_python_arcade.py | LiorAvrahami/fishy-game | e13d71ad04625edffc1ff32f56c918166f6b0bb9 | [
"MIT"
] | 5 | 2021-04-24T18:13:36.000Z | 2021-08-31T13:54:55.000Z | run_game_with_python_arcade.py | LiorAvrahami/fishy-game | e13d71ad04625edffc1ff32f56c918166f6b0bb9 | [
"MIT"
] | null | null | null | run_game_with_python_arcade.py | LiorAvrahami/fishy-game | e13d71ad04625edffc1ff32f56c918166f6b0bb9 | [
"MIT"
] | null | null | null | import arcade
import arcade.gui
from modifications_to_python_arcade.gui_manager import ModifiedUIManager
from modifications_to_python_arcade.resizeable_window import ResizeableWindow
from arcade.gui.ui_style import UIStyle
import fish
from controls import PlayerControlsObject
from fish_generator import RandomFishGenerator,WaveFishGenerator,FishGenerator
import time
import pickle
import os
from game_sprite_buttons import RestartGameButton,ContinueGameButton,YouWinPoster,ViewHighScoresButton,YouLosePoster
import resources
GL_NEAREST = 9728 # open_gl scaling filter key for nearest neighbor
from game_sprite_buttons import TextureButton
SCREEN_TITLE = "Eat or Be eaten"
import resources
from game_constents import min_computer_fish_size,max_computer_fish_size,min_computer_fish_speed,max_computer_fish_speed,player_win_size,player_start_size
all_deltatimes = []
num_of_high_scores = 5
screen_size:list
main_game_view:arcade.View
game:ResizeableWindow
class MainGameView(arcade.View):
"""
Main application class.
"""
fish_sprites: arcade.SpriteList
ui_manager : ModifiedUIManager
player_fish: fish.PlayerFish
paused:bool
# buttons def
restart_button_game_lost:RestartGameButton
continue_button_paused:ContinueGameButton
continue_button_game_lost:ContinueGameButton
you_win_poster: YouWinPoster
you_lose_poster: YouLosePoster
view_high_scores_button: ViewHighScoresButton
time_played:float
controls_handler: PlayerControlsObject
fish_generator: FishGenerator
b_did_win_already : bool
FLAG_open_high_scores_menue : int
@property
def height(self):
return screen_size[1]
@property
def width(self):
return screen_size[0]
def __init__(self):
super().__init__()
self.on_resize()
self.restart_game()
def restart_game(self):
""" Set up the game variables. Call to re-start the game. """
# Create your sprites and sprite lists here
# set up buttons
self.background_texture = resources.background_texture_map["idle"]
self.fish_sprites = arcade.SpriteList()
self.ui_manager = ModifiedUIManager(self.window)
self.player_fish = fish.PlayerFish(self)
self.fish_generator = RandomFishGenerator(1.1,self,min_fish_size=min_computer_fish_size,max_fish_size=max_computer_fish_size,min_fish_speed=min_computer_fish_speed,max_fish_speed=max_computer_fish_speed)
self.fish_sprites.append(self.player_fish)
self.paused = False
self.controls_handler = PlayerControlsObject(change_player_direction=self.player_fish.change_movement_direction,
reset_game=self.restart_game, pause_game=self.toggle_game_paused)
self.restart_button_game_lost = RestartGameButton(self,False)
self.restart_button_game_won = self.restart_button_game_lost
self.ui_manager.add_ui_element(self.restart_button_game_won)
self.continue_button_paused = ContinueGameButton(self,False)
self.ui_manager.add_ui_element(self.continue_button_paused)
self.you_win_poster = YouWinPoster(self,False)
self.you_win_poster.center_y += self.restart_button_game_won.height/2 + self.you_win_poster.height/2 + 10
self.ui_manager.add_ui_element(self.you_win_poster)
self.you_lose_poster = YouLosePoster(self,False)
self.you_lose_poster.center_y = self.restart_button_game_lost.top + self.you_win_poster.height / 2 + 10
self.ui_manager.add_ui_element(self.you_lose_poster)
self.continue_button_game_won = ContinueGameButton(self, False)
self.continue_button_game_won.center_y += -self.restart_button_game_won.height / 2 - self.continue_button_game_won.height / 2 - 10
self.ui_manager.add_ui_element(self.continue_button_game_won)
self.view_high_scores_button = ViewHighScoresButton(self,True)
self.view_high_scores_button.center_x = self.window.width - self.view_high_scores_button.width/2 - 20
self.view_high_scores_button.center_y = self.view_high_scores_button.height / 2 + 20
self.ui_manager.add_ui_element(self.view_high_scores_button)
self.time_played = 0
self.b_did_win_already = False
self.FLAG_open_high_scores_menue = -1
def on_draw(self):
"""
Render the screen.
"""
# This command should happen before we start drawing. It will clear
# the screen to the background color, and erase what we drew last frame.
arcade.start_render()
left, right, bottom, top = self.window.get_viewport()
arcade.draw_lrwh_rectangle_textured(0, 0,
right, top,
self.background_texture)
self.fish_sprites.draw(filter=GL_NEAREST)
self.ui_manager.on_draw()
# draw time
arcade.draw_text("time: {:.0f}".format(self.time_played),20,self.height - 40,color=(255,240,200,210),font_size=25,bold=True,anchor_y="bottom",font_name="ariblk")
#draw score (only wen game is lost)
arcade.draw_text("score: {:.0f}%".format((self.player_fish.size - player_start_size)/(player_win_size-player_start_size)*100), 20, self.height - 40,
color=(255, 240, 200, 210), font_size=25, bold=True, anchor_y="top", font_name="ariblk")
last_time = None
def on_update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
"""
# calculate delta_time
if self.last_time is not None:
delta_time = time.time() - self.last_time
self.last_time = time.time()
if not self.is_game_lost and not self.b_did_win_already and not self.paused:
self.time_played += delta_time
# update game
if not self.paused:
self.fish_sprites.on_update(delta_time)
self.fish_generator.update(delta_time)
all_deltatimes.append(delta_time)
if self.FLAG_open_high_scores_menue == 0:
game.show_view(HighScoresView(self.time_played))
self.FLAG_open_high_scores_menue = -1
elif self.FLAG_open_high_scores_menue > 0:
self.FLAG_open_high_scores_menue -= 1
@property
def is_game_lost(self):
return not self.player_fish in self.fish_sprites
def unpause(self):
self.paused = False
self.continue_button_paused.is_visible = False
self.you_win_poster.is_visible = False
self.restart_button_game_won.is_visible = False
self.continue_button_game_won.is_visible = False
def toggle_game_paused(self):
if not self.is_game_lost:
if self.paused:
self.unpause()
else:
self.paused = True
self.continue_button_paused.is_visible = True
else:
self.restart_game()
def handle_game_lost(self):
self.restart_button_game_lost.is_visible = True
self.you_lose_poster.is_visible = True
def handle_game_won(self):
if not self.b_did_win_already:
self.you_win_poster.is_visible = True
self.continue_button_game_won.is_visible = True
self.restart_button_game_won.is_visible = True
self.b_did_win_already = True
high_scores = HighScoresView.load_high_scores()
if self.time_played < max([HighScoresView.try_parse(s[1]) for s in high_scores]):
self.FLAG_open_high_scores_menue = 1
def on_close(self):
self.window.on_close()
def switch_to_high_scores_view(self):
if not ( self.paused or self.b_did_win_already or self.is_game_lost ):
self.toggle_game_paused()
game.show_view(HighScoresView())
def on_show_view(self):
self.last_time = time.time()
self.controls_handler.reset_state()
def on_resize(self, width: float = 0, height: float = 0):
ratio = self.height/self.width
self.window.height = int(self.window.width*ratio)
return False
#UI
def on_key_press(self, key, key_modifiers):
"""
Called whenever a key on the keyboard is pressed.
"""
self.controls_handler.on_keyboard_press(key, key_modifiers)
def on_key_release(self, key, key_modifiers):
"""
Called whenever the user lets off a previously pressed key.
"""
self.controls_handler.on_keyboard_release(key, key_modifiers)
def on_mouse_motion(self, *args,**kwargs):
self.ui_manager.on_mouse_motion(*args,**kwargs)
def on_mouse_press(self, *args, **kwargs):
self.ui_manager.on_mouse_press(*args,**kwargs)
def on_mouse_release(self, *args, **kwargs):
self.ui_manager.on_mouse_release(*args,**kwargs)
class HighScoresView(arcade.View):
text_input_box : arcade.gui.UIInputBox
text_output_box : arcade.gui.UILabel
high_scores_text_boxes : list
ui_manager : arcade.gui.UIManager
rectangle_background : arcade.SpriteSolidColor
def __init__(self,new_high_score=None):
super().__init__()
arcade.set_background_color(arcade.color.AZURE)
self.ui_manager = arcade.gui.UIManager(self.window)
self.uistyle = UIStyle.default_style()
font_color = (30, 50, 50)
self.uistyle.set_class_attrs("label",font_color=font_color,font_color_hover=font_color,font_color_press=font_color)
title_texture = arcade.load_texture(r"resources\high scores.png")
self.title_poster = arcade.gui.UIImageButton(center_x=self.width / 2,center_y=self.height,normal_texture=title_texture,hover_texture=title_texture,press_texture=title_texture)
self.title_poster.center_y -= self.title_poster.height/2
self.ui_manager.add_ui_element(self.title_poster)
self.rectangle_background = arcade.SpriteSolidColor(self.width//2,self.height,(140,150,200))
self.rectangle_background.center_x = self.width / 2
self.rectangle_background.center_y = self.height/ 2
self.line_background = arcade.SpriteSolidColor(10,int(self.title_poster.bottom - 70),(20,30,60))
self.line_background.center_x = self.width / 2
self.line_background.center_y = self.title_poster.bottom - self.line_background.height/2 - 30
# back button:
back_button = arcade.gui.UIImageButton(center_x=0, center_y=0, normal_texture=resources.back_button_texture_map["mouse_out"], hover_texture=resources.back_button_texture_map["mouse_in"],
press_texture=resources.back_button_texture_map["mouse_pressed"])
back_button.center_x = self.width - back_button.width / 2 - 20
back_button.center_y = self.height - back_button.height / 2 - 20
self.ui_manager.add_ui_element(back_button)
@back_button.event("on_click")
def on_click():
self.ui_manager.remove_handlers()
self.ui_manager.purge_ui_elements()
game.show_view(main_game_view)
high_scores = self.load_high_scores()
if new_high_score is not None:
for index in range(len(high_scores)):
if new_high_score < self.try_parse(high_scores[index][1]):
high_scores.insert(index,(None,"{:.3g}".format(new_high_score)))
high_scores.pop()
break
self.draw_high_scores_table(high_scores)
@property
def height(self):
return screen_size[1]
@property
def width(self):
return screen_size[0]
@staticmethod
def try_parse(s):
try:
return float(s)
except:
return float("inf")
def draw_high_scores_table(self,high_scores:list):
self.names_boxes = [arcade.gui.UILabel(name,0,0, style=self.uistyle) if name is not None else
self.create_input_box() for name,score in high_scores]
self.scores_boxes = [arcade.gui.UILabel(score,0,0, style=self.uistyle) for name,score in high_scores]
for i in range(len(self.names_boxes)):
y = (self.names_boxes[i-1].center_y - self.names_boxes[i-1].height/2 if i > 0 else self.title_poster.bottom - 50)\
- self.names_boxes[i-1].height / 2 - 20
self.names_boxes[i].center_y = y
self.names_boxes[i].center_x = self.width/2 - self.names_boxes[i].width/2 - 30
self.scores_boxes[i].center_y = y
self.scores_boxes[i].center_x = self.width / 2 + self.scores_boxes[i].width / 2 + 30
self.ui_manager.add_ui_element(self.names_boxes[i])
self.ui_manager.add_ui_element(self.scores_boxes[i])
def create_input_box(self):
ret = arcade.gui.UIInputBox(0, 0, (self.line_background.left - self.rectangle_background.left)//1.2, style=self.uistyle)
@ret.event("on_enter")
def on_enter():
ret.text.replace("\n","\\n")
high_scores = [(self.names_boxes[i].text,self.scores_boxes[i].text) for i in range(len(self.names_boxes))]
self.save_high_scores(high_scores)
# replace text box with label
self.ui_manager._ui_elements.remove(ret)
new_label = arcade.gui.UILabel(ret.text, 0, 0, style=self.uistyle)
new_label.center_y = ret.center_y
new_label.center_x = self.width/2 - new_label.width/2 - 30
index = self.names_boxes.index(ret)
high_scores[index] = (new_label,high_scores[index][1])
self.ui_manager.add_ui_element(new_label)
self.ui_manager.focused_element = ret
return ret
def save_high_scores(self,high_scores):
with open("high_scores.pypickle", "wb+") as file:
pickle.dump(high_scores, file)
@staticmethod
def load_high_scores():
if os.path.exists("high_scores.pypickle"):
with open("high_scores.pypickle", "rb") as file:
high_scores = pickle.load(file)
else:
high_scores = []
while len(high_scores) < num_of_high_scores:
high_scores.append(("---","---"))
return high_scores[:num_of_high_scores]
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.rectangle_background.draw()
self.line_background.draw()
def on_resize(self, width: float = 0, height: float = 0):
ratio = self.height/self.width
self.window.height = int(self.window.width*ratio)
return False
def main():
""" Main method """
global game,main_game_view,screen_size
game = ResizeableWindow(1000, 500, "Fishy Game",resizable=True)
game.maximize()
game.dispatch_events()
screen_size = game.get_size()
game.stretch_game_with_window = True
# game.set_viewport(0, self.width, 0, self.height)
main_game_view = MainGameView()
game.show_view(main_game_view)
arcade.run()
if __name__ == "__main__":
main()
| 39.381201 | 211 | 0.679772 | 2,039 | 15,083 | 4.712604 | 0.14615 | 0.054116 | 0.028411 | 0.018316 | 0.399521 | 0.284733 | 0.197003 | 0.11791 | 0.095848 | 0.083047 | 0 | 0.014509 | 0.22774 | 15,083 | 382 | 212 | 39.484293 | 0.81044 | 0.047471 | 0 | 0.16 | 0 | 0 | 0.017241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.058182 | 0.018182 | 0.301818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7b3bc161256bb5501fd5bd641192702f9a7738 | 2,306 | py | Python | pebbles/views/sessions.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 4 | 2017-05-11T14:50:32.000Z | 2020-01-10T09:02:27.000Z | pebbles/views/sessions.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 145 | 2017-04-07T11:01:58.000Z | 2019-12-11T15:30:23.000Z | pebbles/views/sessions.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 3 | 2017-10-25T12:36:16.000Z | 2018-04-26T08:49:34.000Z | from flask_restful import fields, marshal
from flask import Blueprint as FlaskBlueprint
import logging
import json
from pebbles.models import User
from pebbles.forms import SessionCreateForm
from pebbles.server import app, restful
from pebbles.views.commons import is_group_manager, update_email # changed
sessions = FlaskBlueprint('sessions', __name__)
token_fields = {
'token': fields.String,
'user_id': fields.String,
'is_admin': fields.Boolean,
'is_group_owner': fields.Boolean,
'is_group_manager': fields.Boolean,
'icon_value': fields.String
}
admin_icons = ["Dashboard", "Users", "Groups", "Blueprints", "Configure", "Statistics", "Account"]
group_owner_icons = ["Dashboard", "", "Groups", "Blueprints", "", "", "Account"]
group_manager_icons = ["Dashboard", "", "", "Blueprints", "", "", "Account"]
user_icons = ["Dashboard", "", "", "", "", "", "Account"]
class SessionView(restful.Resource):
def post(self):
form = SessionCreateForm()
if not form.validate_on_submit():
logging.warn("validation error on user login")
return form.errors, 422
user = User.query.filter_by(eppn=form.eppn.data).first()
if user and not user.email_id:
# Email and eppn are same because we invite users through emailid
user = update_email(eppn=user.eppn, email_id=user.eppn)
if user and user.check_password(form.password.data):
if user.is_admin:
icons = json.dumps(admin_icons)
elif user.is_group_owner:
icons = json.dumps(group_owner_icons)
elif is_group_manager(user):
icons = json.dumps(group_manager_icons)
else:
icons = json.dumps(user_icons)
return marshal({
'token': user.generate_auth_token(app.config['SECRET_KEY']),
'is_admin': user.is_admin,
'is_group_owner': user.is_group_owner,
'is_group_manager': is_group_manager(user),
'user_id': user.id,
'icon_value': icons
}, token_fields)
logging.warn("invalid login credentials for %s" % form.eppn.data)
return {
'message': 'Unauthorized',
'status': 401
}, 401
| 36.603175 | 98 | 0.624024 | 265 | 2,306 | 5.218868 | 0.358491 | 0.045553 | 0.050615 | 0.028923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005254 | 0.257155 | 2,306 | 62 | 99 | 37.193548 | 0.802102 | 0.030789 | 0 | 0 | 0 | 0 | 0.15905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0.019231 | 0.153846 | 0 | 0.25 | 0.096154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7bab95ad749f6149016b3bc152e246a371a757 | 6,138 | py | Python | train_rrca.py | deepeshhada/ReXPlug | f6ba1e1707e04f82451fba8ada19c731c8f7c46e | [
"Apache-2.0"
] | 6 | 2021-04-04T05:09:32.000Z | 2022-01-21T10:59:20.000Z | train_rrca.py | deepeshhada/ReXPlug | f6ba1e1707e04f82451fba8ada19c731c8f7c46e | [
"Apache-2.0"
] | null | null | null | train_rrca.py | deepeshhada/ReXPlug | f6ba1e1707e04f82451fba8ada19c731c8f7c46e | [
"Apache-2.0"
] | 1 | 2021-11-06T05:36:03.000Z | 2021-11-06T05:36:03.000Z | import argparse
import os
import pickle
from copy import deepcopy
import pandas as pd
import torch.optim as optim
from torch.utils.data import DataLoader
from collate import CollateTrain, CollateTest
from models.RRCA import *
from utils.rrca_utils import evaluate, train_one_epoch
def get_embeddings(dataset_path):
with open(os.path.join(dataset_path, 'true_sentence_embeddings.pkl'), 'rb') as f:
true_embeddings = pickle.load(f)
return true_embeddings
def create_reviews_lists(train_df, true_embeddings):
user_reviews_dict = {}
item_reviews_dict = {}
for idx, row in train_df.iterrows():
if int(row[0]) not in user_reviews_dict:
user_reviews_dict[int(row[0])] = []
if int(row[1]) not in item_reviews_dict:
item_reviews_dict[int(row[1])] = []
user_reviews_dict[int(row[0])].append(true_embeddings[idx])
item_reviews_dict[int(row[1])].append(true_embeddings[idx])
return user_reviews_dict, item_reviews_dict
def create_dataset(df, true_embeddings, mode="Test"):
user_item_ratings = {}
if mode == "Train":
for idx, row in df.iterrows():
user_item_ratings[idx] = [int(row[0]), int(row[1]), true_embeddings[idx], row[3]]
else:
for idx, row in df.iterrows():
user_item_ratings[idx] = [int(row[0]), int(row[1]), row[3]]
return user_item_ratings
def train_rrca(
dataset_path="./data",
model_save_path="./saved_models",
model="rrca",
batch_size_rrca=256,
learning_rate_rrca=0.002,
num_epochs_rrca=150,
dataset_name="AmazonDigitalMusic"
):
with open('./pickled_meta/dataset_meta.pkl', 'rb') as f:
dataset_meta = pickle.load(f)
num_users = dataset_meta[dataset_name]['num_users']
num_items = dataset_meta[dataset_name]['num_items']
num_factors = 64
num_layers = 3
sentence_embed_dim = 512
embed_dim = num_factors * (2 ** (num_layers - 1))
model_save_path = os.path.join(model_save_path, dataset_name, model + '.pt')
dataset_path = os.path.join(dataset_path, dataset_name)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Prepare data_loaders
train_df = pd.read_csv(os.path.join(dataset_path, 'train_df.csv'))
val_df = pd.read_csv(os.path.join(dataset_path, 'val_df.csv'))
test_df = pd.read_csv(os.path.join(dataset_path, 'test_df.csv'))
print(f"Train size: {len(train_df)} | Val size: {len(val_df)} | Test size: {len(test_df)}")
print("Creating data loaders...")
true_embeddings = get_embeddings(dataset_path)
user_reviews_dict, item_reviews_dict = create_reviews_lists(train_df, true_embeddings)
train_set = create_dataset(train_df, true_embeddings, mode="Train")
val_set = create_dataset(val_df, true_embeddings, mode="Val")
test_set = create_dataset(test_df, true_embeddings, mode="Test")
train_loader = DataLoader(
dataset=train_set,
batch_size=batch_size_rrca,
shuffle=True,
collate_fn=CollateTrain(user_reviews_dict, item_reviews_dict)
)
val_loader = DataLoader(
dataset=val_set,
batch_size=batch_size_rrca,
shuffle=False,
collate_fn=CollateTest(user_reviews_dict, item_reviews_dict)
)
test_loader = DataLoader(
dataset=test_set,
batch_size=batch_size_rrca,
shuffle=False,
collate_fn=CollateTest(user_reviews_dict, item_reviews_dict)
)
print("Creating RRCA modules...")
review_regularizer = ReviewRegularizer(num_factors=num_factors).to(device)
cross_attention_module = CrossAttention(embed_dim=embed_dim, sentence_embed_dim=sentence_embed_dim).to(device)
model = RatingPredictor(
review_regularizer=review_regularizer,
cross_attention=cross_attention_module,
embed_dim=embed_dim,
num_users=num_users,
num_items=num_items,
num_factors=num_factors,
num_layers=num_layers
).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate_rrca)
loss_function = nn.MSELoss()
losses_overall, losses_rating_pred, losses_att, losses_reg = [], [], [], []
val_mses, val_maes = [], []
PATIENCE = 15
patience = PATIENCE
best_val_mse, best_model = 100, None
print("Training...")
print("=" * 80)
for epoch in range(1, num_epochs_rrca + 1):
if patience == 0:
break
epoch_loss_overall, epoch_loss_rating_pred, epoch_loss_att, epoch_loss_reg, val_mse, val_mae = train_one_epoch(
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss_function=loss_function,
optimizer=optimizer,
epoch=epoch,
device=device
)
if val_mse < best_val_mse:
print("Saving model...")
patience = PATIENCE
best_val_mse = val_mse
best_model = deepcopy(model)
torch.save(best_model.state_dict(), model_save_path)
else:
patience -= 1
losses_overall.append(epoch_loss_overall)
losses_rating_pred.append(epoch_loss_rating_pred)
losses_att.append(epoch_loss_att)
losses_reg.append(epoch_loss_reg)
val_mses.append(val_mse)
val_maes.append(val_mae)
print("=" * 80)
print('RRCA trained. Evaluating on the test set.')
print("-" * 80)
test_mse, test_mae = evaluate(best_model, test_loader, device)
print(f"Test MSE: {test_mse:.4f} | Test MAE: {test_mae:.4f}")
print("=" * 80)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train ReXPlug.")
parser.add_argument("--dataset_path", type=str, default="./data", help="Root folder path of preprocessed dataset.")
parser.add_argument("--model_save_path", type=str, default="./saved_models", help="Root path to save RRCA's model.")
parser.add_argument("--model", type=str, default="rrca", help="Choose from 'rrca' or 'rr'.")
parser.add_argument("--batch_size_rrca", type=int, default=256, help="Batch size to train RRCA.")
parser.add_argument("--learning_rate_rrca", type=float, default=0.002, help="Learning rate for RRCA.")
parser.add_argument("--num_epochs_rrca", type=int, default=150, help="Number of epochs to train RRCA.")
parser.add_argument(
"--dataset_name",
type=str,
default="AmazonDigitalMusic",
choices=("AmazonDigitalMusic", "AmazonVideoGames", "AmazonClothing", "Yelp_1", "Yelp_2", "BeerAdvocate"),
help="Name of the dataset to use."
)
args = parser.parse_args()
root_path = os.path.join(args.model_save_path, args.dataset_name)
if not os.path.exists(root_path):
os.makedirs(root_path)
train_rrca(**(vars(args)))
| 33.540984 | 117 | 0.750244 | 920 | 6,138 | 4.693478 | 0.197826 | 0.045855 | 0.031264 | 0.035665 | 0.229968 | 0.173692 | 0.112552 | 0.087077 | 0.087077 | 0.064845 | 0 | 0.011473 | 0.119583 | 6,138 | 182 | 118 | 33.725275 | 0.787565 | 0.003258 | 0 | 0.102564 | 0 | 0.00641 | 0.145029 | 0.009647 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.064103 | 0 | 0.115385 | 0.070513 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8173e404603548727bb332a693728554dd4658 | 3,927 | py | Python | timekeeper/log.py | jmcph4/timekeeper | 1ab850739c7071ebd8a4d1a63795d014bfa9c41b | [
"MIT"
] | null | null | null | timekeeper/log.py | jmcph4/timekeeper | 1ab850739c7071ebd8a4d1a63795d014bfa9c41b | [
"MIT"
] | 5 | 2017-07-19T10:09:32.000Z | 2017-07-30T03:32:56.000Z | timekeeper/log.py | jmcph4/timekeeper | 1ab850739c7071ebd8a4d1a63795d014bfa9c41b | [
"MIT"
] | null | null | null | from datetime import datetime
import sqlite3
from . import slice
class Log(object):
"""
Represents a series of slices, forming a log of how time was spent
"""
DT_FMT = "%Y-%m-%d %H:%M"
_COL_WIDTH = 15
def __init__(self, slices):
self._slices = {}
for s in slices:
self._slices[s.start] = (s, False)
@property
def slices(self):
sl = {}
for k, v in self._slices.items():
sl[k] = v[0]
return sl
def get_slice(self, dt):
"""
Returns the slice at the specified time
"""
return self._slices.get(dt)[0]
def set_slice(self, s, saved=False):
"""
Adds s to the log, overwriting any slice previously at that location
"""
self._slices[s.start] = (s, saved)
def __repr__(self):
s = "Start | End | Category | Description \n"
s += "-----------------+------------------+-----------------+-------------------------------\n"
for k, v in self._slices.items():
start_str = v[0].start.strftime(self.DT_FMT)
end_str = v[0].end.strftime(self.DT_FMT)
if not v[1]:
saved_notice = "(!)"
else:
saved_notice = ""
s += saved_notice + start_str + " | " + end_str + " | " + v[0].category + " " * (self._COL_WIDTH - len(v[0].category)) + " | " + v[0].description + "\n"
return s
def save(self, db_path):
"""
Saves the log to the specified database file by inserting each slice
into the SQL table
"""
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS log (id INTEGER PRIMARY KEY AUTOINCREMENT, start DATETIME, end DATETIME, category VARCHAR, description TEXT)''')
for k, v in self._slices.items():
if not v[1]: # if not saved
start_str = v[0].start.strftime(self.DT_FMT)
end_str = v[0].end.strftime(self.DT_FMT)
data = (start_str, end_str, v[0].category, v[0].description)
c.execute('''INSERT INTO log (start, end, category, description) VALUES (?, ?, ?, ?)''', data)
conn.commit()
v = (v[0], True) # set slice as saved
conn.close()
def load(self, db_path):
"""
Loads a log from the specified database file by inserting each slice
into the log object from the SQL table
"""
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('''SELECT * FROM log''')
data = c.fetchall()
for d in data:
self.set_slice(slice.Slice(datetime.strptime(d[1], self.DT_FMT),
datetime.strptime(d[2], self.DT_FMT),
d[3], d[4]), True)
conn.close()
def __len__(self):
length = 0
for k, v in self._slices.items():
length += len(v[0])
return length
def category_aggregate(self):
"""
Returns a dictionary associating each category in the log with the total
number of minutes attributed to it
"""
categories = {}
for k, v in self._slices.items():
categories[v[0].category] = 0
for k, v in self._slices.items():
categories[v[0].category] += len(v[0])
return categories
def ranged_category_aggregate(self, start, end):
"""
Same as category_aggregate() but only applies to slices within the range
[start, end]
"""
new_slices = []
for k, v in self.slices.items():
if k > start and k < end:
new_slices.append(v)
tmp = Log(new_slices)
return tmp.category_aggregate()
| 29.088889 | 164 | 0.507512 | 484 | 3,927 | 3.995868 | 0.272727 | 0.015512 | 0.018097 | 0.025336 | 0.333506 | 0.315926 | 0.297311 | 0.249741 | 0.212513 | 0.212513 | 0 | 0.011458 | 0.355488 | 3,927 | 134 | 165 | 29.30597 | 0.752667 | 0.152279 | 0 | 0.243243 | 0 | 0.013514 | 0.136219 | 0.028008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.040541 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da820a998854815eb9a984cf6f47297e37abd1fc | 709 | py | Python | tests/test_utils.py | leugimkm/codeseeker | f8a1f8668807a2b02cbaf5c596d26164ba75e366 | [
"MIT"
] | 1 | 2022-02-02T04:43:32.000Z | 2022-02-02T04:43:32.000Z | tests/test_utils.py | leugimkm/codeseeker | f8a1f8668807a2b02cbaf5c596d26164ba75e366 | [
"MIT"
] | 7 | 2022-02-02T05:25:40.000Z | 2022-03-23T17:16:19.000Z | tests/test_utils.py | leugimkm/codeseeker | f8a1f8668807a2b02cbaf5c596d26164ba75e366 | [
"MIT"
] | null | null | null | import io
import unittest
from unittest.mock import patch
from textwrap import dedent
from codeseeker.utils import show
class TestCodeSeekerUtils(unittest.TestCase):
def test_show(self):
data = [
{"path": "repository/path/to/file.py"},
{"path": "repository/path/to/file2.py"},
]
expected = dedent("""\
repository/path/to/file.py
repository/path/to/file2.py
2 file(s) found(s).\n"""
) # noqa: E124
with patch("sys.stdout", new_callable=io.StringIO) as mock_stdout:
show(data)
self.assertEqual(mock_stdout.getvalue(), expected)
if __name__ == '__main__':
unittest.main()
| 24.448276 | 74 | 0.603667 | 83 | 709 | 5.012048 | 0.518072 | 0.134615 | 0.153846 | 0.096154 | 0.216346 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011628 | 0.272214 | 709 | 28 | 75 | 25.321429 | 0.794574 | 0.014104 | 0 | 0 | 0 | 0 | 0.278336 | 0.15208 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da828fe3ebcfe4b60891da48c991e49aa603a4a1 | 3,267 | py | Python | cryptography/rail_fence_cipher/Python/rail_fence_cipher.py | avi-pal/al-go-rithms | 5167a20f1db7b366ff19f2962c1746a02e4f5067 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | cryptography/rail_fence_cipher/Python/rail_fence_cipher.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | cryptography/rail_fence_cipher/Python/rail_fence_cipher.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | # used for decryption, take the second element for sorting
def takeSecond(elem):
return elem[1]
def display_rail(lines):
depth = len(lines)
col = len(lines[0])
# depth is the number of rows of the grid
# lines is a tuple where line[i] is the i-th line to print
# col is the number of columns = number of characters of the initial string
for i in range(0,depth):
print( ( ("| %c "*col) + "|") % tuple(lines[i]) )
def encrypt(string,depth):
#make sure that string is a string!
string = str(string)
nChar = len(string)
# create a nested list with 'depth' number of items
# each item has a number of characters = length of the string to cypher
# initialize the list with all spaces:
lines = [ [' ',]*nChar for _ in range(depth)]
encStrings = list()
# encStrings will be a list dynamically filled with the letters of 'string'
# each item of the list will represent a row of the rail.
# this list will then have 'depth' items
encrStrings = ['' for _ in range(depth)]
# Define the sequence in which the rows are filled
if depth == 2:
row_sequence = [0,1]
else:
row_sequence = [i for i in range(0,depth)]
row_sequence.extend(range(depth-2,0,-1) )
# length of the sequence
seqLen = len(row_sequence)
for i in range(0,nChar):
row = row_sequence[i%seqLen] #repeatedly go through the sequence
lines[row][i] = string[i]
encrStrings[row] = encrStrings[row] + string[i]
display_rail(lines)
encrString = ''.join(c for c in encrStrings)
return encrString
def decrypt(encrString,depth):
# from depth and the length of the string we can determine the sequence
# of places in the rails as they were filled
nChar = len(encrString)
if depth == 2:
row_sequence = [1,2]
else:
row_sequence = [i for i in range(0,depth)]
row_sequence.extend(range(depth-2,0,-1) )
# length of the sequence
seqLen = len(row_sequence)
sequence = []
# build a list with the indexes of rows and column according to the sequence
for i in range(0,nChar):
row = row_sequence[i%seqLen] #repeatedly go through the sequence
sequence.append([row,i])
# sort according to rows (so in the order the encrypted string is taken)
sequence.sort()
# now associate the encrypted string to the rail 'coordinates'
for i in range(nChar):
sequence[i].append(encrString[i])
# finally for decryption we rearrange the list items according to columns and read the result
sequence.sort(key=takeSecond)
string = ''.join(c[2] for c in sequence)
return string
# EXAMPLES
# check that len(string)>depth
print("encryptions with depth 2: ")
res = encrypt("rail fence",2)
print("rail fence: " + res)
res = decrypt(res,2)
print("decryption -> " + res)
res = encrypt("Github",2)
print("Github: " + res)
res = decrypt(res,2)
print("decryption -> " + res)
res = encrypt("I am a test!",2)
print("I am a test! -> " + res)
res = decrypt(res,2)
print("decryption -> " + res)
print("encryptions with depth 3: ")
res = encrypt("rail fence",3)
print("rail fence: " + res)
res = decrypt(res,3)
print("decryption -> " + res)
res = encrypt("Github",3)
print("Github: " + res)
res = decrypt(res,3)
print("decryption -> " + res)
res = encrypt("I am a test!",3)
print("I am a test! -> " + res)
res = decrypt(res,3)
print("decryption -> " + res)
| 27.923077 | 94 | 0.685644 | 520 | 3,267 | 4.280769 | 0.236538 | 0.049416 | 0.016173 | 0.02965 | 0.340072 | 0.323001 | 0.30009 | 0.287511 | 0.27044 | 0.237197 | 0 | 0.012864 | 0.191001 | 3,267 | 116 | 95 | 28.163793 | 0.829361 | 0.367309 | 0 | 0.457143 | 0 | 0 | 0.132713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0 | 0.014286 | 0.1 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da85a9841500fbd6e450901a3ca02828bbbeb03f | 1,443 | py | Python | selfdrive/can/tests/test_packer_chrysler.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 4 | 2019-02-12T03:06:31.000Z | 2020-07-17T03:54:46.000Z | selfdrive/can/tests/test_packer_chrysler.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 3 | 2020-09-08T07:21:59.000Z | 2020-09-08T07:22:07.000Z | selfdrive/can/tests/test_packer_chrysler.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 4 | 2019-05-21T19:02:46.000Z | 2020-03-24T14:27:45.000Z | import unittest
import random
from selfdrive.can.tests.packer_old import CANPacker as CANPackerOld
from selfdrive.can.packer import CANPacker
import selfdrive.car.chrysler.chryslercan as chryslercan
class TestPackerMethods(unittest.TestCase):
def setUp(self):
self.chrysler_cp_old = CANPackerOld("chrysler_pacifica_2017_hybrid")
self.chrysler_cp = CANPacker("chrysler_pacifica_2017_hybrid")
def test_correctness(self):
# Test all commands, randomize the params.
for _ in xrange(1000):
gear = ('drive', 'reverse', 'low')[random.randint(0, 3) % 3]
lkas_active = (random.randint(0, 2) % 2 == 0)
hud_alert = random.randint(0, 6)
hud_count = random.randint(0, 65536)
lkas_car_model = random.randint(0, 65536)
m_old = chryslercan.create_lkas_hud(self.chrysler_cp_old, gear, lkas_active, hud_alert, hud_count, lkas_car_model)
m = chryslercan.create_lkas_hud(self.chrysler_cp, gear, lkas_active, hud_alert, hud_count, lkas_car_model)
self.assertEqual(m_old, m)
apply_steer = (random.randint(0, 2) % 2 == 0)
moving_fast = (random.randint(0, 2) % 2 == 0)
frame = random.randint(0, 65536)
m_old = chryslercan.create_lkas_command(self.chrysler_cp_old, apply_steer, moving_fast, frame)
m = chryslercan.create_lkas_command(self.chrysler_cp, apply_steer, moving_fast, frame)
self.assertEqual(m_old, m)
if __name__ == "__main__":
unittest.main()
| 40.083333 | 120 | 0.726265 | 203 | 1,443 | 4.871921 | 0.315271 | 0.105157 | 0.113246 | 0.051567 | 0.435794 | 0.344793 | 0.293225 | 0.173913 | 0.173913 | 0.084934 | 0 | 0.039037 | 0.165627 | 1,443 | 35 | 121 | 41.228571 | 0.782392 | 0.02772 | 0 | 0.074074 | 0 | 0 | 0.057816 | 0.041399 | 0 | 0 | 0 | 0 | 0.074074 | 1 | 0.074074 | false | 0 | 0.185185 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da85c358f54be05780771410e2c91e3ce7581a8d | 9,156 | py | Python | kddg/api/layers.py | Kortemme-Lab/kddg | 9fc09172abbefd4fef49261687c60a9bd9b6b29b | [
"MIT"
] | 2 | 2016-06-14T00:32:02.000Z | 2020-05-04T03:29:46.000Z | kddg/api/layers.py | Kortemme-Lab/kddg | 9fc09172abbefd4fef49261687c60a9bd9b6b29b | [
"MIT"
] | null | null | null | kddg/api/layers.py | Kortemme-Lab/kddg | 9fc09172abbefd4fef49261687c60a9bd9b6b29b | [
"MIT"
] | null | null | null | #!/usr/bin/python2.4
# encoding: utf-8
"""
api_layers.py
The definition of the layers of the database API and the generic user interface class.
Created by Shane O'Connor 2015.
Copyright (c) 2015 __UCSF__. All rights reserved.
"""
import inspect
import functools
from klab import colortext
from kddg.api import settings
sys_settings = settings.load()
### API function decorators. These are used to group functions together when printing the help text.
functional_layer = {
0 : 'API warnings',
1 : 'Information layer',
2 : 'Prediction layer',
3 : 'Results layer',
4 : 'Analysis layer',
5 : 'Application layer',
6 : 'Consistency layer',
7 : 'Data entry layer',
None: 'Miscellanous'
}
def alien(func):
func._helptype = 'Alien functions (these should be moved into another package)'
func._layer = 0
func._layer_order = 0
return func
def brokenfn(func):
func._helptype = 'Broken functions: this need to be fixed/updated'
func._layer = 0
func._layer_order = 1
return func
def deprecated(func):
func._helptype = 'Deprecated functions. These should be removed but exist for now to print errors upon use'
func._layer = 0
func._layer_order = 2
return func
def informational_misc(func):
func._helptype = 'Miscellaneous information API'
func._layer = 1
func._layer_order = 0
return func
def informational_file(func):
func._helptype = 'File information API'
func._layer = 1
func._layer_order = 1
return func
def informational_pdb(func):
func._helptype = 'Structure information API'
func._layer = 1
func._layer_order = 2
return func
def informational_complex(func):
func._helptype = 'Complex information API'
func._layer = 1
func._layer_order = 3
return func
def informational_job(func):
func._helptype = 'Prediction information API'
func._layer = 1
func._layer_order = 4
return func
def job_creator(func):
func._helptype = 'Job creation API'
func._layer = 2
func._layer_order = 0
return func
def job_input(func):
func._helptype = 'Input file generation API'
func._layer = 2
func._layer_order = 1
return func
def job_execution(func):
func._helptype = 'Job execution API'
func._layer = 2
func._layer_order = 2
return func
def job_completion(func):
func._helptype = 'Job completion API'
func._layer = 2
func._layer_order = 3
return func
def job_results(func):
func._helptype = 'Results API'
func._layer = 3
func._layer_order = 0
return func
def analysis_api(func):
func._helptype = 'Analysis API'
func._layer = 4
func._layer_order = 0
return func
def app_pymol(func):
func._helptype = 'PyMOL API'
func._layer = 5
func._layer_order = 0
return func
def sanity_check(func):
func._helptype = 'Data consistency /sanity checks'
func._layer = 6
func._layer_order = 0
return func
def general_data_entry(func):
func._helptype = 'Data entry'
func._layer = 7
func._layer_order = 0
return func
def ppi_data_entry(func):
func._helptype = 'PPI Data entry'
func._layer = 7
func._layer_order = 1
return func
class GenericUserInterface(object):
'''This is the class that should be used to interface with the database. It hides functions that should only be called
within this other API functions.
The class contains a private copy of the internal API and wraps the public functions of that API so that the
functions of GenericUserInterface contain only the public functions of the internal API. Private functions
are denoted as such by a leading underscore in the function name.
'''
@staticmethod
def generate(cls, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
return GenericUserInterface(cls, passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port, file_content_buffer_size = file_content_buffer_size)
@staticmethod
def bind_object_function(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs): return fn(*args, **kwargs)
return wrapper
def __init__(self, cls, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
self._ddg_interface = cls(passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port, file_content_buffer_size = file_content_buffer_size)
self._api_functions = []
self._api_function_args = {}
self.DDG_db = self._ddg_interface.DDG_db
self.DDG_db_utf = self._ddg_interface.DDG_db_utf
self.cls = cls
for m in inspect.getmembers(cls, predicate=inspect.ismethod):
if m[0][0] != '_':
fn_name = m[0]
fn_ref = getattr(self._ddg_interface, fn_name)
self._api_function_args[fn_name] = fn_ref.func_code.co_varnames[:fn_ref.func_code.co_argcount]
self._api_functions.append(fn_name)
self.__dict__[fn_name] = GenericUserInterface.bind_object_function(getattr(self._ddg_interface, fn_name))
def help(self, show_deprecated_functions = False):
print(self.get_help(show_deprecated_functions = show_deprecated_functions))
def get_help(self, show_deprecated_functions = False):
helpstr = []
title = ' %s API ' % self._ddg_interface.__class__.__name__
l = len(title)
helpstr.append(colortext.mcyan('\n' + ('*' * (l + 10)) + '\n' + ('*' * 5) + title + ('*' * 5) + '\n' + ('*' * (l + 10)) + '\n'))
doc_strings = {}
for fn_name in sorted(self._api_functions):
fn = self.__dict__[fn_name]
function_layer, function_layer_order, function_class = None, None, None
try:
function_layer = fn._layer
assert(function_layer in functional_layer)
function_layer_order = fn._layer_order
except:
function_layer = None
function_layer_order = 0
try:
function_class = fn._helptype
except:
function_class = 'Miscellanous'
if function_class.startswith('Deprecated functions') and not show_deprecated_functions:
continue
doc_strings[function_layer] = doc_strings.get(function_layer, {})
doc_strings[function_layer][function_layer_order] = doc_strings[function_layer].get(function_layer_order, {})
doc_strings[function_layer][function_layer_order][function_class] = doc_strings[function_layer][function_layer_order].get(function_class, {})
doc_strings[function_layer][function_layer_order][function_class][fn_name] = self._get_fn_docstring(fn, fn_name)
for function_layer, function_layer_components in sorted(doc_strings.iteritems()):
function_layer_name = functional_layer[function_layer]
prefix = ''
if function_layer != None:
prefix = 'Layer %d: ' % function_layer
helpstr.append(colortext.mcyan('-------- %s%s --------\n' % (prefix, function_layer_name)))
for function_layer_order, function_classes in sorted(function_layer_components.iteritems()):
for function_class, fn_names in sorted(function_classes.iteritems()):
helpstr.append(colortext.mlightpurple(' %s\n' % function_class))
for fn_name, docstr in sorted(fn_names.iteritems()):
helpstr.append(colortext.mgreen(' %s(%s)' % (fn_name, ', '.join(self._api_function_args[fn_name]))))
if docstr:
helpstr.append(colortext.myellow(' %s' % ('\n '.join([s.strip() for s in docstr.split('\n') if s.strip()]))))
else:
helpstr.append(colortext.mred(' <not documented>'))
helpstr.append('')
return '\n'.join(helpstr)
def _get_fn_docstring(self, fn, fn_name, default_name = ''):
'''Returns the docstring for a function, winding up the inheritance tree until we find a non-empty docstring.
If no docstring is found, default_name is returned.'''
if fn.__doc__:
return fn.__doc__
# Wind up the hierarchy until we find the class where this function was last defined
for parent in self.cls.__mro__[1:]:
overridden = getattr(parent, fn_name, None)
if overridden and overridden.__doc__:
return overridden.__doc__
return default_name
| 34.292135 | 257 | 0.662844 | 1,152 | 9,156 | 4.978299 | 0.212674 | 0.056495 | 0.050218 | 0.020924 | 0.356582 | 0.33531 | 0.296774 | 0.207149 | 0.150654 | 0.122406 | 0 | 0.009556 | 0.245631 | 9,156 | 266 | 258 | 34.421053 | 0.820762 | 0.108126 | 0 | 0.302198 | 0 | 0 | 0.093132 | 0 | 0 | 0 | 0 | 0 | 0.005495 | 1 | 0.137363 | false | 0.021978 | 0.021978 | 0.010989 | 0.296703 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8712cd2ff361045352f744da703fa2ec6f82df | 3,142 | py | Python | fb_api.py | wing3s/shop_bot | 4c6a34538ac8de9999edae190f6269bc6a63c2cf | [
"BSD-3-Clause"
] | 1 | 2016-04-11T01:18:53.000Z | 2016-04-11T01:18:53.000Z | fb_api.py | wing3s/shop_bot | 4c6a34538ac8de9999edae190f6269bc6a63c2cf | [
"BSD-3-Clause"
] | null | null | null | fb_api.py | wing3s/shop_bot | 4c6a34538ac8de9999edae190f6269bc6a63c2cf | [
"BSD-3-Clause"
] | null | null | null | import os
import requests
import time
import ConfigParser
import logging
import logging.config
from requests.exceptions import RequestException
from helper import get_logger, base_path
config = ConfigParser.ConfigParser()
config.read(os.path.join(base_path, 'config.ini'))
logger = get_logger('fb_api', __file__)
__author__ = "Wen-Hao Lee"
__email__ = "wing3s@gmail.com"
__copyright__ = "Copyright 2014, Numnum"
class FBBot(object):
graph_url = "https://graph.facebook.com"
cooldown = 120 # sec
search_radius = 500 # m
def search_restaurant(self, lat, lon):
restaurants = self._search_place('restaurant', lat, lon)
steakhouses = self._search_place('steakhouse', lat, lon)
bars = self._search_place('bar', lat, lon)
return restaurants + steakhouses + bars
def _search_place(self, query, lat, lon):
params = {
'q': query,
'type': 'place',
'center': '%s,%s' % (lat, lon),
'distance': self.search_radius,
'limit': 500,
'offset': 0
}
return self.search(params)
def search(self, params):
params['access_token'] = "{app_id}|{app_key}".format(
app_key=config.get('fbAPI', 'key'),
app_id=config.get('fbAPI', 'id'))
try:
r = requests.get(
"%s/%s" % (self.graph_url, 'search'),
params=params)
resp = r.json()
if r.status_code != 200:
resp_err = resp.get('error')
err_code = resp_err.get('code')
if err_code == 4:
logger.warning(
'Reach limit, cooldown %ds' % self.cooldown)
time.sleep(self.cooldown)
return self.search(params)
else:
logger.error(resp)
return None
return resp['data']
except RequestException as err:
logger.error(err)
def fetch(self, fbid):
try:
r = requests.get("%s/%s" % (self.graph_url, fbid))
resp = r.json()
if r.status_code != 200:
resp_err = resp.get('error')
err_code = resp_err.get('code')
if err_code == 4:
logger.warning(
'Reach limit, cooldown %ds' % self.cooldown)
time.sleep(self.cooldown)
return self.fetch(fbid)
elif err_code == 21:
err_msg = resp_err.get('message')
new_fbid_pt = 'page ID'
new_fbid = err_msg[
err_msg.index(new_fbid_pt)+len(new_fbid_pt)+1:
err_msg.index('.')]
logger.warning(
'Get new fbid %s for %s' % (new_fbid, fbid))
return self.fetch(new_fbid)
else:
logger.error([resp, r.url])
return None
return resp
except RequestException as err:
logger.error(err) | 34.152174 | 70 | 0.510185 | 341 | 3,142 | 4.510264 | 0.316716 | 0.03186 | 0.029259 | 0.028609 | 0.287386 | 0.287386 | 0.287386 | 0.23407 | 0.23407 | 0.196359 | 0 | 0.013306 | 0.378103 | 3,142 | 92 | 71 | 34.152174 | 0.773797 | 0.001591 | 0 | 0.349398 | 0 | 0 | 0.103349 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.096386 | 0 | 0.301205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da89128d24114037df1325dfa4587c3b0ac3e279 | 6,409 | py | Python | examples/formula_library.py | bherbruck/plend | 55271d79c983cc3b3307661833c5a7dcc11efc32 | [
"MIT"
] | 5 | 2020-02-21T09:22:58.000Z | 2021-09-07T16:39:47.000Z | examples/formula_library.py | bherbruck/plend | 55271d79c983cc3b3307661833c5a7dcc11efc32 | [
"MIT"
] | null | null | null | examples/formula_library.py | bherbruck/plend | 55271d79c983cc3b3307661833c5a7dcc11efc32 | [
"MIT"
] | 1 | 2022-01-26T20:00:47.000Z | 2022-01-26T20:00:47.000Z | """
This example shows how to statically define formulas,
add them to a formula library, optimize them, and
output the results.
"Statically in this context means we are manually
setting the attribures (min and max) for each
ingredient and nutrient rather defining them
dynamically (which is where plend really shines)
TODO: make an example with dynamic formulas
"""
from plend import Nutrient, Ingredient, Formula, FormulaLibrary
from plend.presets.poultry import *
# initialize the starter formula
starter = Formula(name='Starter', code='B1', batch_size=100)
# add ingredients to starter from presets
starter.add_ingredient(corn)
starter.add_ingredient(soybean_meal)
starter.add_ingredient(oil, maximum=10)
# add nutrients to grower from presets
starter.add_ingredient(limestone)
starter.add_ingredient(meat_meal, maximum=10)
starter.add_nutrient(energy, minimum=3010)
starter.add_nutrient(protein, minimum=24)
starter.add_nutrient(fiber)
starter.add_nutrient(calcium, minimum=1)
# initialize the grower formula
grower = Formula(name='Grower', code='B2', batch_size=100)
# add ingredients to grower from presets
grower.add_ingredient(corn)
grower.add_ingredient(soybean_meal)
grower.add_ingredient(oil, maximum=10)
# add nutrients to grower from presets
grower.add_ingredient(limestone)
grower.add_ingredient(meat_meal, maximum=10)
grower.add_nutrient(energy, minimum=3175)
grower.add_nutrient(protein, minimum=22)
grower.add_nutrient(fiber)
grower.add_nutrient(calcium, minimum=0.9)
# initialize the finisher formula
finisher = Formula(name='Finisher', code='B3', batch_size=100)
# add ingredients to finisher from presets
finisher.add_ingredient(corn)
finisher.add_ingredient(soybean_meal)
finisher.add_ingredient(oil, maximum=10)
finisher.add_ingredient(limestone)
finisher.add_ingredient(meat_meal, maximum=10)
# add nutrients to finisher from presets
finisher.add_nutrient(energy, minimum=3225)
finisher.add_nutrient(protein, minimum=20)
finisher.add_nutrient(fiber)
finisher.add_nutrient(calcium, minimum=0.85)
formulas = FormulaLibrary(name='Broiler')
formulas.add_formulas(starter, grower, finisher)
formulas.optimize()
print(formulas.to_csv())
formulas.save_csv('examples/formulas.csv')
"""
this will have the output (this output has been aligned for readability):
library_name ,formula_name ,formula_code ,formula_cost ,formula_status ,item_type ,item_name ,item_code ,item_amount ,item_minimum ,item_maximum
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Corn , ,58.587658 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Soybean Meal , ,30.429012 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Oil , ,0.63258515 ,0 ,10
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Limestone , ,0.35074529 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Meat Meal , ,10.0 ,0 ,10
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Energy , ,3010.0000132 ,3010 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Protein , ,24.000000110000002 ,24 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Fiber , ,2.37756181 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Calcium , ,1.0 ,1 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Corn , ,61.16353 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Soybean Meal , ,25.859865 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Oil , ,2.8656471 ,0 ,10
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Limestone , ,0.11095768 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Meat Meal , ,10.0 ,0 ,10
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Energy , ,3174.9999923 ,3175 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Protein , ,21.999999950000003 ,22 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Fiber , ,2.3048842 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Calcium , ,0.9000000014 ,0.9 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Corn , ,66.023255 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Soybean Meal , ,20.933866 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Oil , ,3.038852 ,0 ,10
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Limestone , ,0.0040261626 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Meat Meal , ,10.0 ,0 ,10
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Energy , ,3224.9999740000003 ,3225 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Protein , ,19.999999805 ,20 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Fiber , ,2.278597355 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Calcium , ,0.849999999288 ,0.85 ,
""" | 60.462264 | 157 | 0.568575 | 656 | 6,409 | 5.478659 | 0.217988 | 0.054257 | 0.040067 | 0.045075 | 0.497496 | 0.458542 | 0.393434 | 0.217585 | 0.062883 | 0.031163 | 0 | 0.164482 | 0.345452 | 6,409 | 106 | 158 | 60.462264 | 0.692253 | 0.107037 | 0 | 0 | 0 | 0 | 0.036424 | 0.013907 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0 | false | 0 | 0.054054 | 0 | 0.054054 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da89cac67e3dd9455f993529126f6ea3e387def3 | 1,278 | py | Python | sstmap/scripts/dtr_to_netcdf.py | ssabrii/SSTMap | f4f3fb72ed632f00b9f519ae9eab4a41b6c69db9 | [
"MIT"
] | 23 | 2017-12-12T17:59:26.000Z | 2022-02-01T20:19:56.000Z | sstmap/scripts/dtr_to_netcdf.py | ssabrii/SSTMap | f4f3fb72ed632f00b9f519ae9eab4a41b6c69db9 | [
"MIT"
] | 45 | 2017-05-03T14:05:19.000Z | 2022-03-02T07:28:39.000Z | sstmap/scripts/dtr_to_netcdf.py | ssabrii/SSTMap | f4f3fb72ed632f00b9f519ae9eab4a41b6c69db9 | [
"MIT"
] | 24 | 2017-04-28T19:49:56.000Z | 2021-11-05T17:57:02.000Z | from argparse import ArgumentParser
import mdtraj as md
def parse_args():
"""Parse the command line arguments and perform some validation on the
arguments
Returns
-------
args : argparse.Namespace
The namespace containing the arguments
"""
parser = ArgumentParser(
description='''Run GIST calculations through command-line.''')
parser.add_argument('-i', '--input_parm', required=False, type=str,
help='''Input toplogy File.''')
parser.add_argument('-t', '--input_traj', required=True, type=str,
help='''Input trajectory file.''')
parser.add_argument('-o', '--output_prefix', required=False, type=str,
help='''Prefix for all the results files.''')
args = parser.parse_args()
return args
def main():
args = parse_args()
print("Reading in trajectory ...")
traj = md.load_dtr(args.input_traj, top=args.input_parm)
print(traj)
print("Outputting NETCDF ...")
traj.save_netcdf(args.output_prefix + "_converted.nc")
print("Outputting PDB file of frame 1 ...")
traj[0].save_pdb(args.output_prefix + "_converted.pdb")
print("Done")
def entry_point():
main()
if __name__ == '__main__':
entry_point()
| 29.045455 | 74 | 0.628326 | 152 | 1,278 | 5.098684 | 0.486842 | 0.034839 | 0.065806 | 0.051613 | 0.061935 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002041 | 0.233177 | 1,278 | 43 | 75 | 29.72093 | 0.788776 | 0.126761 | 0 | 0 | 0 | 0 | 0.258986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0 | 0.222222 | 0.185185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8e614ab7b081bdccebe5c0752328dc5769b689 | 11,303 | py | Python | fpga/lib/pcie/tb/test_dma_client_axis_sink_512_64.py | totuwei/corundum | e983ad519fb4523d0ffca32f5e436195bcfc945c | [
"BSD-2-Clause-FreeBSD"
] | 544 | 2019-08-12T03:45:32.000Z | 2022-03-19T14:17:20.000Z | fpga/lib/pcie/tb/test_dma_client_axis_sink_512_64.py | akira2009999/corundum | cdc14769c33186c6d45fcd79b95c70889febff2b | [
"BSD-2-Clause-FreeBSD"
] | 78 | 2020-08-20T20:06:33.000Z | 2022-03-30T23:44:37.000Z | fpga/lib/pcie/tb/test_dma_client_axis_sink_512_64.py | akira2009999/corundum | cdc14769c33186c6d45fcd79b95c70889febff2b | [
"BSD-2-Clause-FreeBSD"
] | 142 | 2019-07-15T04:23:23.000Z | 2022-03-29T01:25:33.000Z | #!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import dma_ram
import axis_ep
module = 'dma_client_axis_sink'
testbench = 'test_%s_512_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
SEG_COUNT = 4
SEG_DATA_WIDTH = 128
SEG_ADDR_WIDTH = 12
SEG_BE_WIDTH = int(SEG_DATA_WIDTH/8)
RAM_ADDR_WIDTH = SEG_ADDR_WIDTH+(SEG_COUNT-1).bit_length()+(SEG_BE_WIDTH-1).bit_length()
AXIS_DATA_WIDTH = 64
AXIS_KEEP_ENABLE = (AXIS_DATA_WIDTH>8)
AXIS_KEEP_WIDTH = (AXIS_DATA_WIDTH/8)
AXIS_LAST_ENABLE = 1
AXIS_ID_ENABLE = 0
AXIS_ID_WIDTH = 8
AXIS_DEST_ENABLE = 0
AXIS_DEST_WIDTH = 8
AXIS_USER_ENABLE = 1
AXIS_USER_WIDTH = 1
LEN_WIDTH = 20
TAG_WIDTH = 8
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_write_desc_ram_addr = Signal(intbv(0)[RAM_ADDR_WIDTH:])
s_axis_write_desc_len = Signal(intbv(0)[LEN_WIDTH:])
s_axis_write_desc_tag = Signal(intbv(0)[TAG_WIDTH:])
s_axis_write_desc_valid = Signal(bool(0))
s_axis_write_data_tdata = Signal(intbv(0)[AXIS_DATA_WIDTH:])
s_axis_write_data_tkeep = Signal(intbv(0)[AXIS_KEEP_WIDTH:])
s_axis_write_data_tvalid = Signal(bool(0))
s_axis_write_data_tlast = Signal(bool(0))
s_axis_write_data_tid = Signal(intbv(0)[AXIS_ID_WIDTH:])
s_axis_write_data_tdest = Signal(intbv(0)[AXIS_DEST_WIDTH:])
s_axis_write_data_tuser = Signal(intbv(0)[AXIS_USER_WIDTH:])
ram_wr_cmd_ready = Signal(intbv(0)[SEG_COUNT:])
enable = Signal(bool(0))
abort = Signal(bool(0))
# Outputs
s_axis_write_desc_ready = Signal(bool(0))
m_axis_write_desc_status_len = Signal(intbv(0)[LEN_WIDTH:])
m_axis_write_desc_status_tag = Signal(intbv(0)[TAG_WIDTH:])
m_axis_write_desc_status_id = Signal(intbv(0)[AXIS_ID_WIDTH:])
m_axis_write_desc_status_dest = Signal(intbv(0)[AXIS_DEST_WIDTH:])
m_axis_write_desc_status_user = Signal(intbv(0)[AXIS_USER_WIDTH:])
m_axis_write_desc_status_valid = Signal(bool(0))
s_axis_write_data_tready = Signal(bool(0))
ram_wr_cmd_be = Signal(intbv(0)[SEG_COUNT*SEG_BE_WIDTH:])
ram_wr_cmd_addr = Signal(intbv(0)[SEG_COUNT*SEG_ADDR_WIDTH:])
ram_wr_cmd_data = Signal(intbv(0)[SEG_COUNT*SEG_DATA_WIDTH:])
ram_wr_cmd_valid = Signal(intbv(0)[SEG_COUNT:])
# PCIe DMA RAM
dma_ram_inst = dma_ram.PSDPRam(2**16)
dma_ram_pause = Signal(bool(0))
dma_ram_port0 = dma_ram_inst.create_write_ports(
clk,
ram_wr_cmd_be=ram_wr_cmd_be,
ram_wr_cmd_addr=ram_wr_cmd_addr,
ram_wr_cmd_data=ram_wr_cmd_data,
ram_wr_cmd_valid=ram_wr_cmd_valid,
ram_wr_cmd_ready=ram_wr_cmd_ready,
pause=dma_ram_pause,
name='port0'
)
# sources and sinks
write_desc_source = axis_ep.AXIStreamSource()
write_desc_source_pause = Signal(bool(False))
write_desc_source_logic = write_desc_source.create_logic(
clk,
rst,
tdata=(s_axis_write_desc_ram_addr, s_axis_write_desc_len, s_axis_write_desc_tag),
tvalid=s_axis_write_desc_valid,
tready=s_axis_write_desc_ready,
pause=write_desc_source_pause,
name='write_desc_source'
)
write_desc_status_sink = axis_ep.AXIStreamSink()
write_desc_status_sink_logic = write_desc_status_sink.create_logic(
clk,
rst,
tdata=(m_axis_write_desc_status_len, m_axis_write_desc_status_tag, m_axis_write_desc_status_id, m_axis_write_desc_status_dest, m_axis_write_desc_status_user),
tvalid=m_axis_write_desc_status_valid,
name='write_desc_status_sink'
)
write_data_source = axis_ep.AXIStreamSource()
write_data_source_pause = Signal(bool(False))
write_data_source_logic = write_data_source.create_logic(
clk,
rst,
tdata=s_axis_write_data_tdata,
tkeep=s_axis_write_data_tkeep,
tvalid=s_axis_write_data_tvalid,
tready=s_axis_write_data_tready,
tlast=s_axis_write_data_tlast,
tid=s_axis_write_data_tid,
tdest=s_axis_write_data_tdest,
tuser=s_axis_write_data_tuser,
pause=write_data_source_pause,
name='write_data_source'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_write_desc_ram_addr=s_axis_write_desc_ram_addr,
s_axis_write_desc_len=s_axis_write_desc_len,
s_axis_write_desc_tag=s_axis_write_desc_tag,
s_axis_write_desc_valid=s_axis_write_desc_valid,
s_axis_write_desc_ready=s_axis_write_desc_ready,
m_axis_write_desc_status_len=m_axis_write_desc_status_len,
m_axis_write_desc_status_tag=m_axis_write_desc_status_tag,
m_axis_write_desc_status_id=m_axis_write_desc_status_id,
m_axis_write_desc_status_dest=m_axis_write_desc_status_dest,
m_axis_write_desc_status_user=m_axis_write_desc_status_user,
m_axis_write_desc_status_valid=m_axis_write_desc_status_valid,
s_axis_write_data_tdata=s_axis_write_data_tdata,
s_axis_write_data_tkeep=s_axis_write_data_tkeep,
s_axis_write_data_tvalid=s_axis_write_data_tvalid,
s_axis_write_data_tready=s_axis_write_data_tready,
s_axis_write_data_tlast=s_axis_write_data_tlast,
s_axis_write_data_tid=s_axis_write_data_tid,
s_axis_write_data_tdest=s_axis_write_data_tdest,
s_axis_write_data_tuser=s_axis_write_data_tuser,
ram_wr_cmd_be=ram_wr_cmd_be,
ram_wr_cmd_addr=ram_wr_cmd_addr,
ram_wr_cmd_data=ram_wr_cmd_data,
ram_wr_cmd_valid=ram_wr_cmd_valid,
ram_wr_cmd_ready=ram_wr_cmd_ready,
enable=enable,
abort=abort
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while write_desc_status_sink.empty():
yield clk.posedge
def wait_pause_ram():
while write_desc_status_sink.empty():
dma_ram_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
dma_ram_pause.next = False
yield clk.posedge
def wait_pause_source():
while write_desc_status_sink.empty():
write_data_source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
write_data_source_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
cur_tag = 1
enable.next = 1
yield clk.posedge
print("test 1: write")
current_test.next = 1
addr = 0x00000000
test_data = b'\x11\x22\x33\x44'
write_desc_source.send([(addr, len(test_data), cur_tag)])
write_data_source.send(axis_ep.AXIStreamFrame(test_data, id=cur_tag))
yield write_desc_status_sink.wait(2000)
status = write_desc_status_sink.recv()
print(status)
assert status.data[0][0] == len(test_data)
assert status.data[0][1] == cur_tag
assert status.data[0][2] == cur_tag
data = dma_ram_inst.read_mem(addr, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert dma_ram_inst.read_mem(addr, len(test_data)) == test_data
cur_tag = (cur_tag + 1) % 256
yield delay(100)
yield clk.posedge
print("test 2: various writes")
current_test.next = 2
for length in list(range(1,66))+[128]:
for offset in list(range(8,65,8))+list(range(4096-64,4096,8)):
for diff in [-16, -2, -1, 0, 1, 2, 16]:
if length+diff < 1:
continue
for wait in wait_normal, wait_pause_ram, wait_pause_source:
print("length %d, offset %d, diff %d"% (length, offset, diff))
#addr = length * 0x100000000 + offset * 0x10000 + offset
addr = offset
test_data = bytearray([x%256 for x in range(length)])
test_data2 = bytearray([x%256 for x in range(length+diff)])
dma_ram_inst.write_mem(addr & 0xffff80, b'\xaa'*(len(test_data)+256))
write_desc_source.send([(addr, len(test_data), cur_tag)])
write_data_source.send(axis_ep.AXIStreamFrame(test_data2, id=cur_tag))
yield wait()
yield clk.posedge
yield clk.posedge
status = write_desc_status_sink.recv()
print(status)
assert status.data[0][0] == min(len(test_data), len(test_data2))
assert status.data[0][1] == cur_tag
assert status.data[0][2] == cur_tag
data = dma_ram_inst.read_mem(addr&0xfffff0, 64)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
if len(test_data) <= len(test_data2):
assert dma_ram_inst.read_mem(addr-8, len(test_data)+16) == b'\xaa'*8+test_data+b'\xaa'*8
else:
assert dma_ram_inst.read_mem(addr-8, len(test_data2)+16) == b'\xaa'*8+test_data2+b'\xaa'*8
cur_tag = (cur_tag + 1) % 256
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 35.211838 | 166 | 0.657348 | 1,674 | 11,303 | 4.038829 | 0.158303 | 0.101168 | 0.076912 | 0.066262 | 0.56042 | 0.4841 | 0.327171 | 0.308978 | 0.291229 | 0.221269 | 0 | 0.02679 | 0.253649 | 11,303 | 320 | 167 | 35.321875 | 0.774656 | 0.107228 | 0 | 0.244635 | 0 | 0 | 0.031867 | 0.002184 | 0 | 0 | 0.002581 | 0 | 0.038627 | 1 | 0.030043 | false | 0 | 0.017167 | 0 | 0.051502 | 0.034335 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8e6b5d27ab3ab699761ec5dcf3eadc2360c2c0 | 5,713 | py | Python | scrape_votes.py | purrcat259/reddit-vote-grapher | 0a0f1dccee7befc6e94e856d09eb61b546b34644 | [
"MIT"
] | 1 | 2016-05-18T06:30:26.000Z | 2016-05-18T06:30:26.000Z | scrape_votes.py | purrcat259/reddit-vote-grapher | 0a0f1dccee7befc6e94e856d09eb61b546b34644 | [
"MIT"
] | null | null | null | scrape_votes.py | purrcat259/reddit-vote-grapher | 0a0f1dccee7befc6e94e856d09eb61b546b34644 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import time
import os
import csv
import praw
import OAuth2Util
from pprint import pprint
class SubmissionCSV:
def __init__(self, file_name='', csv_directory='data'):
self.file_name = file_name + '.csv'
self.file_path = os.path.join(os.getcwd(), csv_directory, self.file_name)
def run(self, data_row=None):
self.create_csv()
if data_row is not None:
self.write_row(row=data_row)
def create_csv(self):
# create the CSV if it does not exist
if not os.path.isfile(self.file_path):
with open(self.file_path, mode='w', newline='') as csvfile:
csvfile.flush()
time.sleep(1)
def write_row(self, row=None):
if row is not None:
with open(self.file_path, mode='a', newline='') as csvfile:
writer = csv.writer(csvfile, quotechar='"')
writer.writerow(row)
csvfile.flush()
class VoteScraper:
def __init__(self, user_agent='vote-grapher-v1-by-Always_SFW', subreddit='EliteDangerous', verbose=True):
self.user_agent = user_agent
self.subreddit_name = subreddit
self.verbose = verbose
self.r = None
self.o = None
self.subreddit = None
self.submission_limit = 50
self.start_time = time.time()
# holds the objects for cached submissions.
self.cached_submissions = []
def run(self):
self.connect()
while True:
print('Retrieving/Removing submissions')
self.cache_new_submissions()
self.remove_old_submissions()
self.store_submissions_data()
self.show_time_elapsed()
self.print('Pausing for 120 seconds')
time.sleep(120)
def print(self, string=''):
if self.verbose:
print(string)
def connect(self):
# initialise a connection to reddit
self.print('Initialising connection to Reddit')
try:
self.r = praw.Reddit(self.user_agent)
self.o = OAuth2Util.OAuth2Util(self.r)
# force re-validating the access token
self.o.refresh(force=True)
self.print('Successfully connected to Reddit')
except Exception as e:
print('Unable to connect to Reddit: {}'.format(e))
quit()
self.subreddit = self.r.get_subreddit(subreddit_name=self.subreddit_name)
def get_latest_submissions(self):
# self.print('Getting latest submissions')
try:
new_submissions = self.subreddit.get_new(limit=self.submission_limit)
except Exception as e:
print(e)
return []
return new_submissions
def cache_new_submissions(self):
new_submissions = self.get_latest_submissions()
# self.print('Caching new submissions')
previous_count = len(self.cached_submissions)
for submission in new_submissions:
if submission not in self.cached_submissions:
self.cached_submissions.append(submission)
self.print('{} new submissions recorded'.format(len(self.cached_submissions) - previous_count))
def remove_old_submissions(self):
# self.print('Removing old submissions')
current_time = time.time()
to_remove = []
previous_count = len(self.cached_submissions)
for submission in self.cached_submissions:
if (current_time - submission.created_utc) > (12 * 60 * 60):
# self.print('Removing Submission with ID: {} as it is older than 12 hours'.format(submission.id))
to_remove.append(submission)
# remove the old submissions from the cached submissions list
self.cached_submissions = [sub for sub in self.cached_submissions if sub not in to_remove]
self.print('{} old submissions removed'.format(previous_count - len(self.cached_submissions)))
# append '_complete' to the old submission file names
for submission in to_remove:
file_name = str(submission.id) + '.csv'
new_file_name = str(submission.id) + '_complete.csv'
path = os.path.join(os.getcwd(), 'data', file_name)
# only perform this if the file actually exists
if os.path.isfile(path):
os.rename(src=path, dst=os.path.join(os.getcwd(), 'data', new_file_name))
def store_submissions_data(self):
for i, sub in enumerate(self.cached_submissions):
try:
sub.refresh()
ratio = self.r.get_submission(sub.permalink).upvote_ratio
except Exception as e:
print(e)
continue
ups = int(round((ratio*sub.score)/(2*ratio - 1)) if ratio != 0.5 else round(sub.score/2))
downs = ups - sub.score
self.print('[{}] ID: {} S/U/D: {}/{}/{} Ratio: {} Age: {} hours Link: {}'.format(
i,
sub.id,
sub.score,
ups,
downs,
ratio,
abs(round((time.time() - sub.created_utc) / (60 * 60), 1)),
sub.short_link))
subcsv = SubmissionCSV(file_name=sub.id)
subcsv.run(data_row=[time.time(), sub.score, ups, downs, ratio])
time.sleep(2)
def show_time_elapsed(self):
# convert to hours
time_elapsed = (time.time() - self.start_time) / (60 * 60)
self.print('{} hours passed since start of script'.format(round(time_elapsed, 1)))
def main():
v = VoteScraper()
v.run()
if __name__ == '__main__':
main()
| 37.585526 | 114 | 0.593033 | 688 | 5,713 | 4.765988 | 0.252907 | 0.067399 | 0.070448 | 0.029277 | 0.165294 | 0.093626 | 0.031717 | 0.031717 | 0.031717 | 0 | 0 | 0.009512 | 0.300718 | 5,713 | 151 | 115 | 37.834437 | 0.811264 | 0.097322 | 0 | 0.10084 | 0 | 0.008403 | 0.075233 | 0.005638 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.008403 | 0.05042 | 0 | 0.201681 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da9203437dccc2b66c4d623067b94d8a0a97c3de | 3,488 | py | Python | DeleteBook.py | saurabhmaurya45/library-management-system | 2e489728068cca87ed58f493ac2524b6586f66cf | [
"Apache-2.0"
] | null | null | null | DeleteBook.py | saurabhmaurya45/library-management-system | 2e489728068cca87ed58f493ac2524b6586f66cf | [
"Apache-2.0"
] | null | null | null | DeleteBook.py | saurabhmaurya45/library-management-system | 2e489728068cca87ed58f493ac2524b6586f66cf | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import pymysql as ms
from tkinter import messagebox
# Add your own database name and password here to reflect in the code
mypass = "saurabh"
mydatabase = "library"
con = ms.connect(host="localhost", user="root", password=mypass, database=mydatabase)
cur = con.cursor()
# Enter Table Names here
bookTable = "books" # Book Table
def deleteBook():
bid = en1.get()
try:
a = int(bid)
type1 = type(a)
if type1 == int:
print(True)
cur.execute('select Book_Id from books')
print(True)
list = []
for i in cur:
getId = i[0]
list.append(getId)
print(True)
if a in list:
deleteSql = "delete from " + bookTable + " where Book_Id = '" + bid + "'"
cur.execute(deleteSql)
print(True)
con.commit()
print(True)
# messagebox.showinfo('success',"Successfully deleted Book Id "+bid+" ")
lb6 = Label(labelFrame, text="Successfully deleted book ", bg='black', fg='white',
font=("times new roman", 18, "bold"))
lb6.place(relx=0.3, rely=0.75)
print(True)
else:
lb6 = Label(labelFrame, text="Book deletion failed ", bg='black', fg='white',
font=("times new roman", 18, "bold"))
lb6.place(relx=0.3, rely=0.75)
# messagebox.showinfo('Error', "Please insert correct Book ID")
except:
messagebox.showinfo('Error', 'Invalid Book ID, must be number')
print(bid)
def delete():
global en1, con, cur, bookTable, root, labelFrame
root = Tk()
root.title("Library")
root.minsize(width=400, height=400)
root.geometry("1350x700+0+0")
root.config(bg='#0099cc')
title = Label(root, text="Welcome to Sterling's Library", bd=15, relief=GROOVE,
font=("algerian", 40, "bold"), bg="red", fg="white")
title.pack(side=TOP, fill=X)
labelFrame = Frame(root, bg='#333945', bd=10, relief=GROOVE)
labelFrame.place(relx=0.1, rely=0.35, relwidth=0.8, relheight=0.35)
headingFrame1 = Frame(root, bg="blue", bd=10, relief=GROOVE)
headingFrame1.place(relx=0.25, rely=0.15, relwidth=0.60, relheight=0.13)
headingLabel = Label(headingFrame1, text="DELETE BOOK", bg='blue', fg='white',
font=("bookman old style", 34, "bold"))
headingLabel.place(relx=0.25, rely=0.15, relwidth=0.5, relheight=0.5)
# Book ID to Delete
lb2 = Label(labelFrame, text="Book ID : ", bg='black', fg='white', font=("bookman old style", 20, "bold"))
lb2.place(relx=0.1, rely=0.33)
en1 = Entry(labelFrame)
en1.place(relx=0.3, rely=0.35, relwidth=0.62, relheight=0.15)
# Submit Button
SubmitBtn = Button(root, text="SUBMIT", bg='#d1ccc0', fg='black', font=("times new roman", 18, "bold"),
relief=GROOVE, bd=10, command=deleteBook)
SubmitBtn.place(relx=0.28, rely=0.75, relwidth=0.18, relheight=0.08)
quitBtn = Button(root, text="Quit", bg='#f7f1e3', fg='black', font=("times new roman", 18, "bold"), relief=GROOVE,
bd=10, command=root.quit)
quitBtn.place(relx=0.53, rely=0.75, relwidth=0.18, relheight=0.08)
root.mainloop()
| 35.591837 | 119 | 0.556479 | 443 | 3,488 | 4.376975 | 0.363431 | 0.041774 | 0.046416 | 0.03507 | 0.240846 | 0.22589 | 0.174317 | 0.174317 | 0.174317 | 0.114492 | 0 | 0.062041 | 0.297592 | 3,488 | 97 | 120 | 35.958763 | 0.729388 | 0.076261 | 0 | 0.149254 | 0 | 0 | 0.15061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0.029851 | 0.044776 | 0 | 0.074627 | 0.104478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da920dcd51ae362c5f26a3360b65c16937f31fe7 | 8,474 | py | Python | asdf/extension.py | larrybradley/asdf | b1e0fe6ab7aa319d5939ec2aa78d23822abf6bd4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | asdf/extension.py | larrybradley/asdf | b1e0fe6ab7aa319d5939ec2aa78d23822abf6bd4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | asdf/extension.py | larrybradley/asdf | b1e0fe6ab7aa319d5939ec2aa78d23822abf6bd4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import abc
import warnings
from pkg_resources import iter_entry_points
import six
import importlib
from . import types
from . import resolver
from .util import get_class_name
from .type_index import AsdfTypeIndex
from .version import version as asdf_version
from .exceptions import AsdfDeprecationWarning
__all__ = ['AsdfExtension', 'AsdfExtensionList']
ASDF_TEST_BUILD_ENV = 'ASDF_TEST_BUILD'
@six.add_metaclass(abc.ABCMeta)
class AsdfExtension:
"""
Abstract base class defining an extension to ASDF.
"""
@classmethod
def __subclasshook__(cls, C):
if cls is AsdfExtension:
return (hasattr(C, 'types') and
hasattr(C, 'tag_mapping') and
hasattr(C, 'url_mapping'))
return NotImplemented
@abc.abstractproperty
def types(self):
"""
A list of `asdf.CustomType` subclasses that describe how to store
custom objects to and from ASDF.
"""
pass
@abc.abstractproperty
def tag_mapping(self):
"""
A list of 2-tuples or callables mapping YAML tag prefixes to JSON Schema
URL prefixes.
For each entry:
- If a 2-tuple, the first part of the tuple is a YAML tag
prefix to match. The second part is a string, where case
the following are available as Python formatting tokens:
- ``{tag}``: the complete YAML tag.
- ``{tag_suffix}``: the part of the YAML tag after the
matched prefix.
- ``{tag_prefix}``: the matched YAML tag prefix.
- If a callable, it is passed the entire YAML tag must return
the entire JSON schema URL if it matches, otherwise, return `None`.
Note that while JSON Schema URLs uniquely define a JSON
Schema, they do not have to actually exist on an HTTP server
and be fetchable (much like XML namespaces).
For example, to match all YAML tags with the
``tag:nowhere.org:custom` prefix to the
``http://nowhere.org/schemas/custom/`` URL prefix::
return [('tag:nowhere.org:custom/',
'http://nowhere.org/schemas/custom/{tag_suffix}')]
"""
pass
@abc.abstractproperty
def url_mapping(self):
"""
A list of 2-tuples or callables mapping JSON Schema URLs to
other URLs. This is useful if the JSON Schemas are not
actually fetchable at their corresponding URLs but are on the
local filesystem, or, to save bandwidth, we have a copy of
fetchable schemas on the local filesystem. If neither is
desirable, it may simply be the empty list.
For each entry:
- If a 2-tuple, the first part is a URL prefix to match. The
second part is a string, where the following are available
as Python formatting tokens:
- ``{url}``: The entire JSON schema URL
- ``{url_prefix}``: The matched URL prefix
- ``{url_suffix}``: The part of the URL after the prefix.
- If a callable, it is passed the entire JSON Schema URL and
must return a resolvable URL pointing to the schema content.
If it doesn't match, should return `None`.
For example, to map a remote HTTP URL prefix to files installed
alongside as data alongside Python module::
return [('http://nowhere.org/schemas/custom/1.0.0/',
asdf.util.filepath_to_url(
os.path.join(SCHEMA_PATH, 'stsci.edu')) +
'/{url_suffix}.yaml'
)]
"""
pass
class AsdfExtensionList:
"""
Manage a set of extensions that are in effect.
"""
def __init__(self, extensions):
tag_mapping = []
url_mapping = []
validators = {}
self._type_index = AsdfTypeIndex()
for extension in extensions:
if not isinstance(extension, AsdfExtension):
raise TypeError(
"Extension must implement asdf.types.AsdfExtension "
"interface")
tag_mapping.extend(extension.tag_mapping)
url_mapping.extend(extension.url_mapping)
for typ in extension.types:
self._type_index.add_type(typ, extension)
validators.update(typ.validators)
for sibling in typ.versioned_siblings:
self._type_index.add_type(sibling, extension)
validators.update(sibling.validators)
self._tag_mapping = resolver.Resolver(tag_mapping, 'tag')
self._url_mapping = resolver.Resolver(url_mapping, 'url')
self._validators = validators
@property
def tag_to_schema_resolver(self):
"""Deprecated. Use `tag_mapping` instead"""
warnings.warn(
"The 'tag_to_schema_resolver' property is deprecated. Use "
"'tag_mapping' instead.",
AsdfDeprecationWarning)
return self._tag_mapping
@property
def tag_mapping(self):
return self._tag_mapping
@property
def url_mapping(self):
return self._url_mapping
@property
def type_index(self):
return self._type_index
@property
def validators(self):
return self._validators
class BuiltinExtension:
"""
This is the "extension" to ASDF that includes all the built-in
tags. Even though it's not really an extension and it's always
available, it's built in the same way as an extension.
"""
@property
def types(self):
return types._all_asdftypes
@property
def tag_mapping(self):
return resolver.DEFAULT_TAG_TO_URL_MAPPING
@property
def url_mapping(self):
return resolver.DEFAULT_URL_MAPPING
class _DefaultExtensions:
def __init__(self):
self._extensions = []
self._extension_list = None
self._package_metadata = {}
def _load_installed_extensions(self, group='asdf_extensions'):
for entry_point in iter_entry_points(group=group):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', category=AsdfDeprecationWarning)
ext = entry_point.load()
if not issubclass(ext, AsdfExtension):
warnings.warn("Found entry point {}, from {} but it is not a "
"subclass of AsdfExtension, as expected. It is "
"being ignored.".format(ext, entry_point.dist))
continue
dist = entry_point.dist
name = get_class_name(ext, instance=False)
self._package_metadata[name] = (dist.project_name, dist.version)
self._extensions.append(ext())
for warning in w:
warnings.warn('{} (from {})'.format(warning.message, name),
AsdfDeprecationWarning)
@property
def extensions(self):
# This helps avoid a circular dependency with external packages
if not self._extensions:
# If this environment variable is defined, load the default
# extension. This allows the package to be tested without being
# installed (e.g. for builds on Debian).
if os.environ.get(ASDF_TEST_BUILD_ENV):
# Fake the extension metadata
name = get_class_name(BuiltinExtension, instance=False)
self._package_metadata[name] = ('asdf', asdf_version)
self._extensions.append(BuiltinExtension())
self._load_installed_extensions()
return self._extensions
@property
def extension_list(self):
if self._extension_list is None:
self._extension_list = AsdfExtensionList(self.extensions)
return self._extension_list
@property
def package_metadata(self):
return self._package_metadata
def reset(self):
"""This will be used primarily for testing purposes."""
self._extensions = []
self._extension_list = None
self._package_metadata = {}
def resolver(self, uri):
tag_mapping = self.extension_list.tag_mapping
url_mapping = self.extension_list.url_mapping
return url_mapping(tag_mapping(uri))
default_extensions = _DefaultExtensions()
| 33.362205 | 80 | 0.623554 | 1,006 | 8,474 | 5.097416 | 0.269384 | 0.031201 | 0.023206 | 0.006435 | 0.205733 | 0.146451 | 0.115055 | 0.100234 | 0.081513 | 0.067473 | 0 | 0.001516 | 0.299504 | 8,474 | 253 | 81 | 33.494071 | 0.862365 | 0.345291 | 0 | 0.267176 | 0 | 0 | 0.070767 | 0.009462 | 0 | 0 | 0 | 0 | 0 | 1 | 0.152672 | false | 0.022901 | 0.091603 | 0.061069 | 0.381679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da923abab5b7e2cb6e8f37c23f2fa4cc9504aff5 | 2,153 | py | Python | source/setup.py | Sylvain-Barde/mic-toolbox | 10d9d930a1a359aaa831f2f917eff357a3d5282e | [
"BSD-3-Clause"
] | 4 | 2019-06-28T20:36:33.000Z | 2022-01-04T21:49:52.000Z | source/setup.py | Sylvain-Barde/mic-toolbox | 10d9d930a1a359aaa831f2f917eff357a3d5282e | [
"BSD-3-Clause"
] | 1 | 2019-06-27T14:52:52.000Z | 2019-07-04T14:14:14.000Z | source/setup.py | Sylvain-Barde/mic-toolbox | 10d9d930a1a359aaa831f2f917eff357a3d5282e | [
"BSD-3-Clause"
] | 1 | 2019-06-27T13:33:42.000Z | 2019-06-27T13:33:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 14:48:07 2018
@author: sb636
"""
import os
import sys
from setuptools import setup, Extension, find_packages
from distutils.errors import DistutilsModuleError
# Check for cython installation
try:
from Cython.Distutils import build_ext as _build_ext
HAVE_CYTHON = True
except ImportError:
# As a fallback import the standard setuptools build_ext, and raise
# error about Cython later
from setuptools.command.build_ext import build_ext as _build_ext
HAVE_CYTHON = False
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
return Extension(extName, [extPath])
class build_ext(_build_ext):
def initialize_options(self):
if not HAVE_CYTHON:
raise DistutilsModuleError(
'Cython is required to compile the package.\n'
'Cython can be obtained at www.cython.org or installed with '
'conda or pip.')
super(build_ext, self).initialize_options()
def finalize_options(self):
try:
import numpy
except ImportError:
raise DistutilsModulesError('Building extension modules requires numpy')
for ext in self.distribution.ext_modules:
ext.include_dirs.extend([numpy.get_include(), '.'])
ext.cython_directives = {
"cdivision": True,
"cdivision_warnings": False
}
super(build_ext, self).finalize_options()
setup(
name="mic-toolbox",
version="0.1.0a1",
packages=find_packages(),
ext_modules=[makeExtension(name) for name in scandir('mic')],
cmdclass={'build_ext': build_ext},
options = {'build_ext': {'inplace': True, 'force': True}}
)
| 29.902778 | 85 | 0.625639 | 258 | 2,153 | 5.100775 | 0.472868 | 0.079027 | 0.021277 | 0.024316 | 0.051672 | 0.051672 | 0.051672 | 0.051672 | 0 | 0 | 0 | 0.013325 | 0.267998 | 2,153 | 71 | 86 | 30.323944 | 0.821701 | 0.099861 | 0 | 0.081633 | 0 | 0 | 0.132615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.183673 | 0 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da92ca41103ff60b1a50e24d1900c7aae0620a32 | 4,049 | py | Python | api-reconstruction/ipython_analysis.py | eurecom-s3/syscall2api | 2f2c72c759b0fd803fe1302c3b6717cda1906916 | [
"MIT"
] | 10 | 2019-09-24T13:36:15.000Z | 2021-11-01T02:40:10.000Z | api-reconstruction/ipython_analysis.py | eurecom-s3/syscall2api | 2f2c72c759b0fd803fe1302c3b6717cda1906916 | [
"MIT"
] | 2 | 2020-10-19T11:51:08.000Z | 2021-04-17T01:08:23.000Z | api-reconstruction/ipython_analysis.py | eurecom-s3/syscall2api | 2f2c72c759b0fd803fe1302c3b6717cda1906916 | [
"MIT"
] | null | null | null | #!/usr/local/bin/ipython3 -i
import sys
from analysis import *
import analysis.classes as classes
import nwalign as nw
kb = {}
apis = {}
syscalls = {}
regexes = {}
models = {}
models2 = {}
kb_file = 'kb_no_empties.pickle'
regex_file = 'new_regex.pickle'
models_file = 'models.pickle'
models2_file = 'models2.pickle'
symbols_file = 'symbols.pickle'
leaf_models = {}
def first_run():
global kb
global apis
global syscalls
global regexes
global models
global symbols_file
kb_file = "pruned_db.pickle"
if not Path(kb_file).is_file():
print("Error: No KB file found", file=sys.stderr)
sys.exit(1)
with open(kb_file, "rb") as pf:
d = pickle.load(pf)
syscalls = pickle.load(pf)
d = prune_kb_from_signals(d)
print("Finding leaf apis")
leaves = find_leaves(d)
print("Finding strong polymorph apis")
polymorph = find_polymorph(d)
print("Finding empty apis")
empties = find_empties(d)
print("Finding 0Sys apis")
no_sys = find_no_syscall_apis(d)
print("Finding 0IndSys apis")
no_ind_sys = find_no_indirect_sys(d)
apis = set(d.keys())
print("Finding no-leaf apis")
no_leaves = apis - leaves
print("Finding weak monomorph apis")
monomorph = apis - polymorph
print("Finding 1+Sys apis")
sys = apis - no_sys
print("Finding 1+IndSys apis")
ind_sys = apis - no_ind_sys
print("Finding weak polymorph")
weak_polymorph = find_weak_polymorph(d)
print("Finding strong monomorph apis")
strong_monomorph = apis - weak_polymorph
print("Building models for strong monomorph apis")
precise_models = build_precise_models(d, strong_monomorph)
print("Building models for implicit monomorph apis")
implicit_precise_models = find_implicit_monomorph_models(d, precise_models)
print("Finding empty/non-empty models")
empty_models = {api for api, model in implicit_precise_models.items()
if len(model) == 0}
non_empty_models = {api: model
for api, model in implicit_precise_models.items()
if api not in empty_models}
strong_monomorph |= set(implicit_precise_models.keys())
# checks that no_ind_sys is a subset of no_sys
check_0sys(no_sys, no_ind_sys)
check_polymorph(weak_polymorph, polymorph)
check_empties_have_precise_model(empties, precise_models)
check_implicit_precise_models(implicit_precise_models, precise_models)
check_empties_have_empty_model(empties, empty_models)
kb = prune_kb_from_empties(d, empty_models)
with open('kb_no_empties.pickle', 'wb') as pf:
pickle.dump(kb, pf)
pickle.dump(syscalls, pf)
with open(symbols_file, 'wb') as pf:
pickle.dump(set(kb.keys()), pf)
pickle.dump(syscalls, pf)
def load_kb_no_empties():
global kb
global syscalls
global apis
global regexes
global regexes_test
global test_results
global models
global kb_file
global regex_file
global models_file
global symbols_file
global leaf_models
global models2
print("Loading KB")
with open(kb_file, "rb") as pf:
sys.modules['classes'] = classes
kb= pickle.load(pf)
syscalls = pickle.load(pf)
print("Loading symbols")
apis, syscalls = load_symbols(symbols_file)
kb = prune_kb_from_signals(kb)
# print("Loading regexes")
# f = open(regex_file, 'rb')
# regexes_test = pickle.load(f)
# f.close()
# regexes, test_results = regexes_split_test_results(regexes_test)
print("Loading generic models")
models = load_models(models_file)
print("Loading not-so-generic models")
models2 = load_models(models2_file)
symbols_generator(apis | syscalls.keys())
leaf_models = find_leaves_models(models, syscalls)
if __name__ == '__main__':
if (not Path(kb_file).is_file()
or not Path(models_file).is_file()
or not Path(symbols_file).is_file()):
first_run()
else:
load_kb_no_empties()
| 28.716312 | 79 | 0.678686 | 551 | 4,049 | 4.727768 | 0.185118 | 0.055278 | 0.029942 | 0.013052 | 0.127447 | 0.098273 | 0.087524 | 0.031478 | 0.031478 | 0 | 0 | 0.004456 | 0.224006 | 4,049 | 140 | 80 | 28.921429 | 0.824634 | 0.056557 | 0 | 0.160714 | 0 | 0 | 0.153987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.035714 | 0 | 0.053571 | 0.169643 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f0b6155221bd21f39e5a25133a8324a5286c72 | 3,785 | py | Python | accessdata/api/extensions.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | 2 | 2021-12-10T10:20:08.000Z | 2022-01-06T11:15:43.000Z | accessdata/api/extensions.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | null | null | null | accessdata/api/extensions.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | null | null | null | ## /api/extensions.py
"""
Maintains the API endpoint URI extensions.
"""
## Declaring __all__
__all__ = (
"status_check_ext",
"site_server_status_check_ext",
"case_create_ext",
"case_list_ext",
"case_create_portable_ext",
"evidence_list_ext",
"evidence_processed_list_ext",
"evidence_process_ext",
"object_page_list_ext",
"label_create_ext"
"label_list_ext"
"label_objects_job_ext"
"label_objects_list_ext"
"label_objects_count_ext"
"label_objects_sync_ext"
"search_report_ext",
"export_natives_ext",
"agent_push_ext",
"agent_collection_ext",
"agent_disk_acquisition_ext",
"agent_memory_acquisition_ext",
"agent_remediation_ext",
"agent_software_inventory_ext",
"agent_volatile_analysis_ext",
"agent_volatile_import_ext",
"job_status_ext",
"attribute_list_ext",
"attribute_list_by_case_ext",
"child_file_categories_ext",
"processing_case_ext",
"server_setting_ext",
"yara_ioc_rule_import_ext",
)
## Predefined Constants
DELETE = "delete"
GET = "get"
PATCH = "patch"
POST = "post"
PUT = "put"
## Status Extensions
base_ext = "api/v2/enterpriseapi"
status_check_ext = GET, base_ext + "/statuscheck"
site_server_status_check_ext = GET, base_ext + "/agent/getsiteserverstatus"
## Case Management Extensions
case_create_ext = POST, base_ext + "/core/createcase"
case_list_ext = GET, base_ext + "/core/getcaselist"
case_create_portable_ext = POST, base_ext + "/core/{caseid}/createportablecase"
## Evidence Management Extensions
evidence_list_ext = GET, base_ext + "/core/{caseid}/getevidencelist"
evidence_processed_list_ext = GET, base_ext + "/core/{caseid}/getprocessedevidencelist"
evidence_process_ext = POST, base_ext + "/core/{caseid}/processdata"
## Object Management Extensions
object_page_list_ext = POST, base_ext + "/core/{caseid}/getobjectlist/{pagenumber}/{pagesize}"
## Label Management Extensions
label_create_ext = POST, base_ext + "/core/{caseid}/createlabel"
label_list_ext = GET, base_ext + "/core/{caseid}/getlabellist"
label_objects_job_ext = POST, base_ext + "/jobs/{caseid}/labelobjects"
label_objects_list_ext = GET, base_ext + "/core/cases/{caseid}/label/{labelid}/evidenceobjects"
label_objects_count_ext = GET, base_ext + "/core/cases/{caseid}/label/{labelid}/objectscount"
label_objects_sync_ext = POST, base_ext + "/{caseid}/labelobjectssync"
## Search Extensions
search_report_ext = POST, base_ext + "/jobs/{caseid}/createsearchcountreport"
## Export Extenstions
export_natives_ext = POST, base_ext + "/jobs/{caseid}/dumpnativeobjects"
## Agent Management Extensions
agent_push_ext = POST, base_ext + "/agent/{caseid}/runagentpush"
agent_collection_ext = POST, base_ext + "/agent/{caseid}/collectiononagent"
agent_disk_acquisition_ext = POST, base_ext + "/agent/{caseid}/diskacquistion"
agent_memory_acquisition_ext = POST, base_ext + "/agent/{caseid}/memoryacquistion"
agent_remediation_ext = POST, base_ext + "/agent/{caseid}/remediate"
agent_software_inventory_ext = POST, base_ext + "/agent/{caseid}/softwareinventory"
agent_volatile_analysis_ext = POST, base_ext + "/agent/{caseid}/volatile"
agent_volatile_import_ext = GET, base_ext + "/agent/{caseid}/importvolatile/{jobid}"
## Generic Job Extensions
job_status_ext = GET, base_ext + "/core/{caseid}/getjobstatus/{jobid}"
## Utility Extensions
attribute_list_ext = GET, base_ext + "/core/getallattributes"
attribute_list_by_case_ext = GET, base_ext + "/core/{caseid}/getallattributesbycaseid"
child_file_categories_ext = GET, base_ext + "/core/getchildrenfilecategories"
processing_case_ext = GET, base_ext + "/processingcaseid"
server_setting_ext = GET, base_ext + "/core/getserversetting/{setting}"
yara_ioc_rule_import_ext = POST, base_ext + "/agent/importiocandyara" | 33.495575 | 97 | 0.763804 | 476 | 3,785 | 5.638655 | 0.22479 | 0.086066 | 0.069672 | 0.088674 | 0.335693 | 0.259314 | 0.086811 | 0.029806 | 0.029806 | 0 | 0 | 0.000298 | 0.113606 | 3,785 | 113 | 98 | 33.495575 | 0.799702 | 0.090092 | 0 | 0 | 0 | 0 | 0.491501 | 0.382474 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f0f93ca79ae51931ef205e9c059a600e80445c | 1,982 | py | Python | evaluators/dialog/state/distinct.py | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 | [
"MIT"
] | 3 | 2021-01-12T05:43:20.000Z | 2021-03-05T17:03:06.000Z | evaluators/dialog/state/distinct.py | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 | [
"MIT"
] | null | null | null | evaluators/dialog/state/distinct.py | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 | [
"MIT"
] | null | null | null | __all__ = ["DistinctStateEvaluator"]
from dataclasses import dataclass
from typing import Sequence, Optional
import torch
import utils
from utils import TensorMap
from datasets import VocabSet
from ...evaluator import DialogEvaluator
@dataclass
class DistinctStateEvaluator(DialogEvaluator):
vocabs: VocabSet
_values: dict = utils.private_field(default_factory=dict)
def reset(self):
self._values.clear()
@property
def speakers(self):
return set(spkr for spkr in self.vocabs.speaker.f2i if spkr != "<unk>")
@staticmethod
def compute_distinct(tokens):
if len(tokens) == 0:
return torch.tensor(0.0)
return torch.tensor(len(set(tokens)) / len(tokens))
def compute(self, samples: Sequence, spkr=None):
return {i: [self.compute_distinct(turn.text, i)
for sample in samples for turn in sample.output.turns
if spkr is None or turn.speaker == spkr]
for i in self.ngrams}
def update(self, samples: Sequence) -> Optional[TensorMap]:
for sample in samples:
asvs = [asv for turn in sample.output if turn.speaker != "<unk>"
for asv in turn.state]
spkr_asvs = {spkr: [asv for turn in sample.output
if turn.speaker != "<unk>"
for asv in turn.state]
for spkr in self.speakers}
stats = {"dist-a": self.compute_distinct(asvs)}
stats.update({
f"dist-a-{spkr}": self.compute_distinct(spkr_asvs[spkr])
for spkr in self.speakers
})
for k, v in stats.items():
if k not in self._values:
self._values[k] = list()
self._values[k].append(v.item())
return
def get(self) -> Optional[TensorMap]:
return {k: torch.tensor(v).mean() for k, v in self._values.items()}
| 33.59322 | 79 | 0.584258 | 239 | 1,982 | 4.769874 | 0.309623 | 0.031579 | 0.023684 | 0.034211 | 0.173684 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0 | 0.002952 | 0.316347 | 1,982 | 58 | 80 | 34.172414 | 0.838376 | 0 | 0 | 0.042553 | 0 | 0 | 0.028254 | 0.0111 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.148936 | 0.06383 | 0.468085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f2f95568be402c343f83e95bd816466c4a6dd1 | 1,788 | py | Python | src/dataset/manually_labeled_bases.py | yullidias/AutomaticIronyDetection | 3297ddc4ecc97e840b00df4ba4f9e6b8e710fdb9 | [
"MIT"
] | null | null | null | src/dataset/manually_labeled_bases.py | yullidias/AutomaticIronyDetection | 3297ddc4ecc97e840b00df4ba4f9e6b8e710fdb9 | [
"MIT"
] | 1 | 2020-12-05T14:22:03.000Z | 2020-12-05T14:22:03.000Z | src/dataset/manually_labeled_bases.py | yullidias/AutomaticIronyDetection | 3297ddc4ecc97e840b00df4ba4f9e6b8e710fdb9 | [
"MIT"
] | null | null | null | import src.utils.constants as cns
from src.utils.files import write_list
import pandas as pd
import glob
import os
def read_sheets():
manually_labeled_df = pd.DataFrame()
for sheet in glob.glob(cns.PATH_LABELED + '*'):
manually_labeled_df = manually_labeled_df.append(
pd.read_excel(sheet, index_col=0), ignore_index=True)
return manually_labeled_df
def rename_columns(dataset):
return dataset.rename(columns={
"pathOriginal": "path_ask",
"tweet 'Pergunta'": "reply_response_tweet",
"pathTweet": "id",
"tweet a ser avaliado": "tweet",
"rotulo": "label"
})
def parser_label(label):
if label == "Irônico":
return cns.IRONIC_LABEL
elif label == "Não irônico":
return cns.NOT_IRONIC_LABEL
else:
return cns.DONT_KNOW_LABLE
def update_label(df, col):
df[col] = df[col].apply(parser_label)
def path_to_id(df, col):
df[col] = df[col].apply(lambda x: os.path.basename(x)
.split('.json')[0])
def get_by_label(df, label):
return df[df["label"] == label]
def generate_manually_bases():
labled_df = read_sheets()
path_to_id(labled_df, "pathTweet")
labled_df = rename_columns(labled_df)
labled_df = labled_df[["id", "label"]]
update_label(labled_df, "label")
print("Generate base manually labeled as ironic ...")
write_list(cns.B_M_IRONIC,
get_by_label(labled_df, cns.IRONIC_LABEL)["id"].to_list())
print("Generate base manually labeled as not ironic ...")
write_list(cns.B_M_NOT_IRONIC,
get_by_label(labled_df, cns.NOT_IRONIC_LABEL)["id"].to_list())
return labled_df
if __name__ == "__main__":
generate_manually_bases()
| 26.294118 | 77 | 0.644295 | 243 | 1,788 | 4.440329 | 0.325103 | 0.074143 | 0.063021 | 0.037071 | 0.222428 | 0.18721 | 0.087118 | 0 | 0 | 0 | 0 | 0.001461 | 0.23434 | 1,788 | 67 | 78 | 26.686567 | 0.786706 | 0 | 0 | 0 | 0 | 0 | 0.143736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.104167 | 0.041667 | 0.395833 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f4d90e8a8de6335b4d40090aa8cb9b83b7e850 | 871 | py | Python | Larry/preprocess.py | NCBI-Hackathons/ClusterDuck | 1d5478500dffea973f96affd969783278193aa8a | [
"MIT"
] | 7 | 2019-02-19T15:10:24.000Z | 2020-05-31T00:41:13.000Z | Larry/preprocess.py | NCBI-Hackathons/ClusterDuck | 1d5478500dffea973f96affd969783278193aa8a | [
"MIT"
] | 11 | 2018-03-21T20:01:32.000Z | 2022-03-11T23:19:40.000Z | Larry/preprocess.py | NCBI-Hackathons/DiseaseClusters | 1d5478500dffea973f96affd969783278193aa8a | [
"MIT"
] | 3 | 2018-03-19T13:14:23.000Z | 2018-03-20T14:13:38.000Z | from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
STOPWORDS = set(stopwords.words('english'))
# Instantiate Lemmanizer
WNL = WordNetLemmatizer()
def preprocess(abstract, keywords=None):
"""
Convert an abstract to word tokens. This is done by lowering the case
of the text, tokenizing the text, removing english stopwords and
punctuation,and finally lemmatizing the words.
Args:
abstract: (str)
Return:
str
"""
# Lowercase all words
abstract = abstract.lower()
# tokenize words, remove punctuation
tokenizer = RegexpTokenizer(r'\w[\w-]+')
tokens = tokenizer.tokenize(abstract)
# Remove stopwords and lemmatize tokens
words = [WNL.lemmatize(word) for word in tokens if word not in STOPWORDS]
return words
| 26.393939 | 77 | 0.6969 | 102 | 871 | 5.95098 | 0.539216 | 0.039539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228473 | 871 | 32 | 78 | 27.21875 | 0.903274 | 0.394948 | 0 | 0 | 0 | 0 | 0.031513 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f520db46f1b8b8a53e17ff7a93ac14fed25f00 | 16,848 | py | Python | Codes/env.py | zongdaoming/Reinforcement-Learning | 426b646b1184e96d8a0f6c6341e53b13ef89ea12 | [
"Apache-2.0"
] | 1 | 2021-04-20T13:49:55.000Z | 2021-04-20T13:49:55.000Z | Codes/env.py | zongdaoming/Reinforcement-Learning | 426b646b1184e96d8a0f6c6341e53b13ef89ea12 | [
"Apache-2.0"
] | 1 | 2021-04-18T18:27:49.000Z | 2021-04-18T18:27:49.000Z | Codes/env.py | zongdaoming/Reinforcement-Learning | 426b646b1184e96d8a0f6c6341e53b13ef89ea12 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author : naive dormin
# @time : 2021/04/19 02:17:43
# @version : 1.0.0
import os
import time
import numpy as np
import random
from utils import *
import pickle
from ConvE import ConvE_double
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
byteTensor = torch.cuda.ByteTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
byteTensor = torch.ByteTensor
class Env(object):
"""knowledge graph environment definition"""
def __init__(self, dataPath, task=None, model="TransE"):
f1 = open(dataPath + 'entity2id.txt')
f2 = open(dataPath + 'relation2id.txt')
self.entity2id = f1.readlines()
self.relation2id = f2.readlines()
f1.close()
f2.close()
self.entity2id_ = {}
self.relation2id_ = {}
self.id2entity_ = {}
self.id2relation_ = {}
self.relations = []
for line in self.entity2id:
self.entity2id_[line.split()[0]] = int(line.split()[1])
self.id2entity_[int(line.split()[1])] = line.split()[0]
for line in self.relation2id:
self.relation2id_[line.split()[0]] = int(line.split()[1])
self.id2relation_[int(line.split()[1])] = line.split()[0]
self.relations.append(line.split()[0])
# Which model to compute pretrained embedding of entities and relations? (The definition of states)
if model == "TransH":
print("Uses TransH")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransH_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransH_relation_embedding.txt')
self.norm2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransH_norm_embedding.txt')
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
w_r = self.norm2vec[self.relation2id_[relation]]
new_entity2vec = self.entity2vec - \
np.sum(self.entity2vec * w_r, axis=1, keepdims=True) * w_r
self.entity2vec = new_entity2vec
elif model == "TransR":
print("Uses TransR")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransR_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransR_relation_embedding.txt')
self.projection2vec = np.loadtxt(
dataPath + "NELL-995_100_1.0_TransR_norm_embedding.txt")
dim = int(np.sqrt(self.projection2vec.shape[1]))
# By default, entities and relations share the same dimension
# This is not the main point of research
self.projection2vec = self.projection2vec.reshape([-1, dim, dim])
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
M_vec = self.projection2vec[self.relation2id_[relation], :, :]
new_entity2vec = np.matmul(M_vec, self.entity2vec.T).T
self.entity2vec = new_entity2vec
elif model == "TransD":
print("Uses TransD")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransD_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransD_relation_embedding.txt')
self.ent_norm2vec = np.loadtxt(
dataPath + "NELL-995_100_1.0_TransD_ent_norm_embedding.txt")
self.rel_norm2vec = np.loadtxt(
dataPath + "NELL-995_100_1.0_TransD_rel_norm_embedding.txt")
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
rel_proj = self.rel_norm2vec[self.relation2id_[relation]]
new_entity2vec = self.entity2vec + \
np.sum(self.entity2vec * self.ent_norm2vec,
axis=1, keepdims=True) * rel_proj
self.entity2vec = new_entity2vec
elif model == "ProjE":
print("Uses ProjE")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_ProjE_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_ProjE_relation_embedding.txt')
self.simple_hr_combination_weights = np.loadtxt(
dataPath + "NELL-995_100_ProjE_simple_hr_combination_weights.txt")
self.simple_tr_combination_weights = np.loadtxt(
dataPath + "NELL-995_100_ProjE_simple_tr_combination_weights.txt")
self.combination_bias_hr = np.loadtxt(
dataPath + "NELL-995_100_ProjE_combination_bias_hr.txt")
self.combination_bias_tr = np.loadtxt(
dataPath + "NELL-995_100_ProjE_combination_bias_tr.txt")
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
dim = self.entity2vec.shape[1]
r = self.relation2vec[[self.relation2id_[relation]]]
# ent_mat = np.transpose(self.entity2vec)
hr = self.entity2vec * \
self.simple_hr_combination_weights[:dim] + \
r * self.simple_hr_combination_weights[dim:]
new_entity2vec = np.tanh(hr + self.combination_bias_hr)
self.entity2vec = new_entity2vec
elif model == "ConvE":
print("Uses ConvE")
start_time = time.time()
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_ConvE_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_ConvE_relation_embedding.txt')
self.TransE_to_ConvE_id_entity = {}
with open(dataPath + "TransE_to_ConvE_entity_id.txt") as fr:
for line in fr:
line_list = line.strip().split()
self.TransE_to_ConvE_id_entity[int(
line_list[0])] = int(line_list[1])
self.TransE_to_ConvE_id_relation = {}
with open(dataPath + "TransE_to_ConvE_relation_id.txt") as fr:
for line in fr:
line_list = line.strip().split()
self.TransE_to_ConvE_id_relation[int(
line_list[0])] = int(line_list[1])
homepath = os.path.expanduser('~')
token2idx_ent, idx2token_ent, label2idx_ent, idx2label_ent = pickle.load(
open(homepath + "/.data/NELL-995/vocab_e1", 'rb'))
token2idx_rel, idx2token_rel, label2idx_rel, idx2label_rel = pickle.load(
open(homepath + "/.data/NELL-995/vocab_rel", 'rb'))
self.ConvE_model = ConvE_double(
len(token2idx_ent), len(token2idx_rel))
model_params = torch.load(
dataPath + "NELL-995_ConvE_0.2_0.3_100.model")
self.ConvE_model.load_state_dict(model_params)
for parameter in self.ConvE_model.parameters():
parameter.requires_grad = False
if USE_CUDA:
self.ConvE_model.cuda()
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
rel_id = token2idx_rel[relation]
ConvE_ent_id_list = [self.TransE_to_ConvE_id_entity[i]
for i in range(len(self.TransE_to_ConvE_id_entity))]
new_entity2vec_list = []
bs = self.ConvE_model.batch_size
batch_count = len(ConvE_ent_id_list) // bs
for i in range(batch_count):
x_middle, output = self.ConvE_model(longTensor(
ConvE_ent_id_list[i * bs: (i + 1) * bs]), longTensor([rel_id] * bs))
new_entity2vec_list.append(x_middle.cpu())
if len(ConvE_ent_id_list) % bs != 0:
input_ent_list = ConvE_ent_id_list[batch_count * bs:] + [
0] * (bs - len(ConvE_ent_id_list) % bs)
x_middle, output = self.ConvE_model(longTensor(
input_ent_list), longTensor([rel_id] * bs))
new_entity2vec_list.append(
x_middle[: len(ConvE_ent_id_list) % bs].cpu())
self.entity2vec = torch.cat(new_entity2vec_list).numpy()
torch.cuda.empty_cache()
"""
else:
if USE_CUDA:
self.ConvE_model.cuda()
"""
end_time = time.time()
print("Embedding calculation time: ", end_time - start_time)
else:
print("Default. Uses TransE")
self.entity2vec = np.loadtxt(dataPath + 'entity2vec.bern')
self.relation2vec = np.loadtxt(dataPath + 'relation2vec.bern')
if task is None:
self.embedding_precomputed_flag = False
else:
self.embedding_precomputed_flag = True
self.model = model
self.path = []
self.path_relations = []
# Knowledge Graph for path finding
f = open(dataPath + 'kb_env_rl.txt')
kb_all = f.readlines()
f.close()
self.kb = []
if task != None:
relation = task.split()[2] # Remove query relation and its inverse
for line in kb_all:
rel = line.split()[2]
if rel != relation and rel != relation + '_inv':
self.kb.append(line)
else:
for line in kb_all:
self.kb.append(line)
self.entity2link = {}
# Build the dictionary. Attention: they are all represented with numbers!
for line in self.kb:
line_list = line.strip().split()
head = self.entity2id_[line_list[0]]
tail = self.entity2id_[line_list[1]]
rel = self.relation2id_[line_list[2]]
if head not in self.entity2link:
self.entity2link[head] = {rel: [tail]}
elif rel not in self.entity2link[head]:
self.entity2link[head][rel] = [tail]
else:
self.entity2link[head][rel].append(tail)
self.die = 0 # record how many times does the agent choose an invalid action
self.banned_action_list = []
def interact(self, state, action):
# state and action are all represented with numbers
# print("Die: ", self.die)
'''
This function process the interact from the agent
state: is [current_position, target_position, die]
action: an integer
return: (reward, [new_position, target_position, die], done)
'''
done = 0 # Whether the episode has finished
curr_pos = state[0]
target_pos = state[1]
if action in self.banned_action_list:
# print("Type 1")
choices = []
elif curr_pos not in self.entity2link:
# print("Type 2", curr_pos)
choices = []
elif action not in self.entity2link[curr_pos]:
# print("Type 3")
choices = []
else:
# print("Type 4")
choices = self.entity2link[curr_pos][action]
"""
chosed_relation = self.relations[action]
choices = []
for line in self.kb:
triple = line.rsplit()
e1_idx = self.entity2id_[triple[0]]
if curr_pos == e1_idx and triple[2] == chosed_relation and triple[1] in self.entity2id_:
choices.append(triple)
"""
if len(choices) == 0: # doesn't find a successful path
# print("No proper path! ")
reward = -1
self.die += 1
next_state = state # stay in the initial state
next_state[-1] = self.die # Total failure times
# print(next_state)
return (reward, next_state, done)
else: # find a valid step
# print("Proper path exists! ")
# Randomly choose one from multiple choices
chose_entity = random.choice(choices)
# path[2]: relation;path[1]: tail entity(the next entity)
self.path.append(self.id2relation_[
action] + ' -> ' + self.id2entity_[chose_entity])
self.path_relations.append(self.id2relation_[action]) # Relation
# print 'Find a valid step', path
# print 'Action index', action
self.die = 0
new_pos = chose_entity # Using the next entity as the new position
reward = 0 # Reward is zero means the action is valid
new_state = [new_pos, target_pos, self.die]
if new_pos == target_pos:
print('Find a path:', self.path)
done = 1 # episode finished
reward = 0 # reward is 0 means the episode is successful
new_state = None
# print(new_state)
return (reward, new_state, done)
def idx_state(self, idx_list, relation=None): # Calculate state vector
if idx_list != None:
curr = self.entity2vec[idx_list[0], :]
targ = self.entity2vec[idx_list[1], :]
if self.embedding_precomputed_flag == True or relation is None:
pass
else:
if self.model == "TransH":
w_r = self.norm2vec[relation]
curr = curr - np.sum(curr * w_r) * w_r
targ = targ - np.sum(targ * w_r) * w_r
elif self.model == "TransR":
M_vec = self.projection2vec[relation, :, :]
curr = np.matmul(M_vec, curr.T).T
targ = np.matmul(M_vec, targ.T).T
elif self.model == "TransD":
rel_proj = self.rel_norm2vec[relation]
curr = curr + \
np.sum(
curr * self.ent_norm2vec[idx_list[0]]) * rel_proj
targ = targ + \
np.sum(
targ * self.ent_norm2vec[idx_list[1]]) * rel_proj
elif self.model == "ProjE":
dim = self.entity2vec.shape[1]
r = self.relation2vec[relation]
curr = curr * \
self.simple_hr_combination_weights[:dim] + \
r * self.simple_hr_combination_weights[dim:]
curr = np.tanh(curr + self.combination_bias_hr)
targ = targ * \
self.simple_hr_combination_weights[:dim] + \
r * self.simple_hr_combination_weights[dim:]
targ = np.tanh(targ + self.combination_bias_hr)
elif self.model == "ConvE":
curr_id = self.TransE_to_ConvE_id_entity[idx_list[0]]
targ_id = self.TransE_to_ConvE_id_entity[idx_list[1]]
rel_id = self.TransE_to_ConvE_id_relation[relation]
bs = self.ConvE_model.batch_size
curr = [curr_id] + [0] * (bs - 1)
curr, output = self.ConvE_model(
longTensor(curr), longTensor([rel_id] * bs))
curr = curr[0].cpu().numpy()
targ = [targ_id] + [0] * (bs - 1)
targ, output = self.ConvE_model(
longTensor(targ), longTensor([rel_id] * bs))
targ = targ[0].cpu().numpy()
else: # Default, TransE
pass
return np.expand_dims(np.concatenate((curr, targ - curr)), axis=0)
else:
return None
def get_valid_actions(self, entityID): # Get the valid action
actions = set()
for line in self.kb:
triple = line.split()
e1_idx = self.entity2id_[triple[0]]
if e1_idx == entityID:
actions.add(self.relation2id_[triple[2]])
return np.array(list(actions))
# A path's embedding is calculated as summing all the relational vectors
def path_embedding(self, path):
embeddings = [self.relation2vec[self.relation2id_[relation], :]
for relation in path]
embeddings = np.reshape(embeddings, (-1, embedding_dim))
path_encoding = np.sum(embeddings, axis=0)
return np.reshape(path_encoding, (-1, embedding_dim))
| 42.014963 | 107 | 0.553063 | 1,927 | 16,848 | 4.623249 | 0.149974 | 0.034572 | 0.038164 | 0.042429 | 0.393086 | 0.328656 | 0.275564 | 0.234706 | 0.196543 | 0.165563 | 0 | 0.032135 | 0.344314 | 16,848 | 400 | 108 | 42.12 | 0.774328 | 0.094611 | 0 | 0.237624 | 0 | 0 | 0.081437 | 0.063137 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016502 | false | 0.006601 | 0.039604 | 0 | 0.079208 | 0.026403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f718d511a624ff6bafcf060c184c7b35cb49f0 | 2,608 | py | Python | txrtpengine/NGCPProxy.py | braams/txrtpengine | 5511cf79d7fc338b28d927c19e5ff3b88e66a5be | [
"MIT"
] | null | null | null | txrtpengine/NGCPProxy.py | braams/txrtpengine | 5511cf79d7fc338b28d927c19e5ff3b88e66a5be | [
"MIT"
] | null | null | null | txrtpengine/NGCPProxy.py | braams/txrtpengine | 5511cf79d7fc338b28d927c19e5ff3b88e66a5be | [
"MIT"
] | null | null | null | import json
from twisted.internet import reactor
from twisted.python import log
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.web.server import Site
from txrtpengine.NGCP import NGCPClient
class NGCPProxy(Resource):
def __init__(self, addr):
self.c = NGCPClient(addr)
self.isLeaf = True
Resource.__init__(self)
def _onResponse(self, response, request):
request.write(json.dumps(response).encode('utf-8'))
request.finish()
def _onError(self, error, request):
request.write(json.dumps({'error': str(error)}).encode('utf-8'))
request.finish()
def render_POST(self, request):
request.setHeader('Content-Type', 'application/json; charset=utf-8')
# copy-paste from https://stackoverflow.com/a/33571117
def _byteify(data, ignore_dicts=False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
try:
content = request.content.read().decode("utf-8")
cmd = json.loads(content, object_hook=_byteify)
d = self.c.command(cmd)
d.addCallback(self._onResponse, request)
d.addErrback(self._onError, request)
return NOT_DONE_YET
except Exception as e:
return json.dumps({'error': str(e)}, ensure_ascii=False, indent=1).encode('utf-8')
if __name__ == '__main__':
import sys
from twisted.web.client import getPage
log.startLogging(sys.stdout)
def test():
reactor.listenTCP(1222, Site(NGCPProxy(('127.0.0.1', 16222))))
def onResponse(data):
log.msg("response: %s" % data)
getPage('http://localhost:1222/', method='POST', postdata='{"command":"ping"}').addBoth(onResponse)
reactor.callWhenRunning(test)
reactor.run()
| 33.435897 | 107 | 0.625383 | 321 | 2,608 | 4.965732 | 0.417445 | 0.041405 | 0.035132 | 0.016939 | 0.100376 | 0.032622 | 0 | 0 | 0 | 0 | 0 | 0.017792 | 0.267255 | 2,608 | 77 | 108 | 33.87013 | 0.816327 | 0.132285 | 0 | 0.039216 | 0 | 0 | 0.066933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137255 | false | 0 | 0.176471 | 0 | 0.45098 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f73a2d16ee1a1b4874c0d6207d250cd9f1609d | 6,980 | py | Python | ise_session_gui.py | ComtecSystem-dev/ise_session | 299bf47b7584094c7722a27a5cbec704e8acc084 | [
"Apache-2.0"
] | null | null | null | ise_session_gui.py | ComtecSystem-dev/ise_session | 299bf47b7584094c7722a27a5cbec704e8acc084 | [
"Apache-2.0"
] | null | null | null | ise_session_gui.py | ComtecSystem-dev/ise_session | 299bf47b7584094c7722a27a5cbec704e8acc084 | [
"Apache-2.0"
] | null | null | null | import sys
import requests
import xmltodict
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from PyQt5 import uic
#Link : Qt5 UI File
# - condition : The UI file should be located in the sam directory as the this file
form_class = uic.loadUiType("./ise_session.ui")[0]
# Class Define : UI Open
class ISE_Session():
def __init__(self, ip, id, pwd):
self.ip = ip
self.id = id
self.pwd = pwd
def getActiveSession(self):
url = "https://%s/admin/API/mnt/Session/ActiveList" % self.ip
ret_state, ret_val = self.request_action("get", url, self.id, self.pwd)
return ret_state, ret_val
def deleteSessionByMAC(self, MAC):
url = "https://%s/admin/API/mnt/Session/Delete/MACAddress/%s" % (self.ip, MAC)
ret_state, ret_val = self.request_action("delete", url, self.id, self.pwd)
return ret_state, ret_val
def request_action(self, request_type, url, id, pwd, ):
print("\t Request URL : %s %s" % (request_type, url))
print("\t Request ID/PWD : [%s][%s]" % (id, pwd))
session = requests.Session()
session.auth = (id, pwd)
if request_type == "get":
response = session.get(url, verify=False)
elif request_type == "delete":
response = session.delete(url, verify=False)
else:
return 000, "unknow error"
ret_val = None
if response.status_code == 401:
ret_val = "Auth failed"
elif response.status_code != 200:
ret_val = "Error code %s " % (response.status_code)
else:
ret_val = xmltodict.parse(response.text)
return response.status_code, ret_val
class MyWindow(QMainWindow, form_class) :
def __init__(self) :
super().__init__()
self.setupUi(self)
self.lineEdit_IP.setText("10.200.150.212")
self.lineEdit_ID.setText("admin")
self.lineEdit_PWD.setText("Comtec123")
# Linking functions to buttons
self.pushButton.clicked.connect(self.button1Function)
def button1Function(self):
ISE_IP = self.lineEdit_IP.text()
ISE_ID = self.lineEdit_ID.text()
ISE_PWD = self.lineEdit_PWD.text()
print("[MyWindow] button1Function() - [%s][%s][%s]" % (ISE_IP, ISE_ID, ISE_PWD))
ise_session = ISE_Session(ISE_IP, ISE_ID, ISE_PWD)
ret_state, ret_val = ise_session.getActiveSession()
if ret_state != 200:
QMessageBox.about(self, "에러", "%s" % (ret_val) )
else:
print("[MyWindow] button1Function() - %s" % (ret_state))
session_count = 0
session_list = []
if ret_val is not None and "activeList" in ret_val:
session_count = ret_val['activeList']['@noOfActiveSession']
if session_count == "1":
ret = ret_val['activeList']['activeSession']
session = {}
session['user_name'] = ret['user_name'] if 'user_name' in ret else '!!!'
session['mac'] = ret['calling_station_id'] if 'calling_station_id' in ret else '!!!'
session['ip'] = ret['framed_ip_address'] if 'framed_ip_address' in ret else '!!!'
session['sw_ip'] = ret['nas_ip_address'] if 'nas_ip_address' in ret else '!!!'
session_list.append(session)
print("\t%s" % (session))
else:
for ret in ret_val['activeList']['activeSession']:
session = {}
session['user_name'] = ret['user_name'] if 'user_name' in ret else '!!!'
session['mac'] = ret['calling_station_id'] if 'calling_station_id' in ret else '!!!'
session['ip'] = ret['framed_ip_address'] if 'framed_ip_address' in ret else '!!!'
session['sw_ip'] = ret['nas_ip_address'] if 'nas_ip_address' in ret else '!!!'
session_list.append(session)
print("\t%s" % (session))
self.Set_Table(["user_name", "mac", "ip", "sw_ip"], session_list)
def click_btn(self, btnClass, MAC):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("Are you soure you want to delete session on MAC(%s)" % (MAC))
msgBox.setWindowTitle("warring")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
ISE_IP = self.lineEdit_IP.text()
ISE_ID = self.lineEdit_ID.text()
ISE_PWD = self.lineEdit_PWD.text()
ise_session = ISE_Session(ISE_IP, ISE_ID, ISE_PWD)
ret_state, ret_val = ise_session.deleteSessionByMAC(MAC)
if ret_state == 200:
if ret_val is not None and "mnt-rest-result" in ret_val:
if "status" in ret_val["mnt-rest-result"]:
btnClass.setEnabled(False)
return
QMessageBox.about(self, "Error[%s]" % ret_state, "%s" % (ret_val) )
pass
def Set_Table(self, head_list, data_list):
self.tableWidget.setRowCount(len(data_list))
self.tableWidget.setColumnCount(len(head_list)+1)
self.tableWidget.setHorizontalHeaderLabels([" "]+head_list)
self.tableWidget.setColumnWidth(0, 50)
self.tableWidget.setColumnWidth(1, 130)
self.tableWidget.setColumnWidth(2, 150)
self.tableWidget.setColumnWidth(3, 130)
self.tableWidget.setColumnWidth(4, 130)
col_count = 0
row_count = 0
for table_data in data_list:
col_count = 0
btnDelete = QPushButton("Delete")
btnDelete.MAC = table_data['mac']
btnDelete.clicked.connect(partial(self.click_btn, btnDelete, table_data['mac']))
#btnDelete.clicked.connect(self.click_btn)
self.tableWidget.setCellWidget(row_count, col_count, btnDelete)
col_count = 1
for column_name in head_list:
column_val = table_data[column_name] if column_name in table_data else '!!!'
tableitem = QTableWidgetItem(column_val)
tableitem.setFlags(Qt.ItemIsEnabled)
self.tableWidget.setItem(row_count, col_count, tableitem)
col_count = col_count + 1
row_count = row_count + 1
if __name__ == "__main__" :
#QApplication : run the servic
app = QApplication(sys.argv)
#created the instance to WindowClass
myWindow = MyWindow()
#show UI
myWindow.show()
#Run Program
app.exec_() | 41.058824 | 109 | 0.570487 | 807 | 6,980 | 4.72119 | 0.22057 | 0.033071 | 0.018898 | 0.033596 | 0.298163 | 0.298163 | 0.275591 | 0.234646 | 0.234646 | 0.234646 | 0 | 0.013834 | 0.316476 | 6,980 | 170 | 110 | 41.058824 | 0.784741 | 0.039398 | 0 | 0.227273 | 0 | 0 | 0.13174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.007576 | 0.05303 | 0 | 0.166667 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16fa7014b2509e362e1a19500f13adaa6c41db09 | 1,109 | py | Python | caption_generation/sub_json.py | Collapsar-G/clevr-dataset-gen | a09b0559b53891bf4f4771190e4ad361406c67fe | [
"BSD-3-Clause"
] | 1 | 2021-05-23T13:48:59.000Z | 2021-05-23T13:48:59.000Z | caption_generation/sub_json.py | Collapsar-G/clevr-dataset-gen | a09b0559b53891bf4f4771190e4ad361406c67fe | [
"BSD-3-Clause"
] | null | null | null | caption_generation/sub_json.py | Collapsar-G/clevr-dataset-gen | a09b0559b53891bf4f4771190e4ad361406c67fe | [
"BSD-3-Clause"
] | null | null | null | import argparse
import json
import os
import ijson
parser = argparse.ArgumentParser()
# /questions/CLEVR_test_questions.json
# Inputs
parser.add_argument('--all_scene_paths', default='../data/CLEVR_v1.0/scenes',
help="JSON file containing questions information for all images " +
"from generate_questions.py")
parser.add_argument('--output_scene_file', default='../data/CLEVR_v1.0/CLEVR_train_scenes.json',
help="Directory containing JSON templates for captions")
if __name__ == "__main__":
all_scenes = []
args = parser.parse_args()
paths = os.listdir(args.all_scene_paths)
for scene_path in paths:
# print(scene_path)
with open(args.all_scene_paths + "/" + scene_path, 'r') as f:
all_scenes.append(json.load(f))
output = {
'info':
{"split": "train", "license": "Creative Commons Attribution (CC BY 4.0)", "version": "1.0",
"date": "2/14/2017"},
'scenes': all_scenes
}
with open(args.output_scene_file, 'w') as f:
json.dump(output, f)
| 34.65625 | 103 | 0.627592 | 140 | 1,109 | 4.728571 | 0.485714 | 0.036254 | 0.058912 | 0.054381 | 0.057402 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017794 | 0.239856 | 1,109 | 31 | 104 | 35.774194 | 0.767497 | 0.055005 | 0 | 0 | 0 | 0 | 0.321839 | 0.084291 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50054bcfcc58e68ad2fe236a5c12539ae5190f0 | 39,700 | py | Python | det3d/models/bbox_heads/clear_mg_ohs_head.py | Lelin-HUNUST/VISTA | 7bf34132d719cb0e5e803b92cd15451df58a9a5d | [
"MIT"
] | 47 | 2022-03-21T02:41:39.000Z | 2022-03-30T17:25:29.000Z | det3d/models/bbox_heads/clear_mg_ohs_head.py | Lelin-HUNUST/VISTA | 7bf34132d719cb0e5e803b92cd15451df58a9a5d | [
"MIT"
] | 1 | 2022-03-28T15:11:26.000Z | 2022-03-28T16:27:40.000Z | det3d/models/bbox_heads/clear_mg_ohs_head.py | Lelin-HUNUST/VISTA | 7bf34132d719cb0e5e803b92cd15451df58a9a5d | [
"MIT"
] | 2 | 2022-03-23T12:56:14.000Z | 2022-03-27T14:25:50.000Z | # Copyright (c) Gorilla-Lab. All rights reserved.
import logging
from functools import partial
from collections import defaultdict
from typing import Dict, List, Optional, Sequence
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..losses.ohs_loss_clear import OHSLossClear
from ..losses.attention_constrain_loss import AttentionConstrainedLoss
from ..registry import HEADS
from ..builder import build_loss
from ...core.bbox import box_torch_ops
from ...core.bbox.geometry import points_in_convex_polygon_torch
from ...core.bbox.box_coders import BoxCoder, GroundBox3dCoderAF
from ipdb import set_trace
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def _get_pos_neg_loss(cls_loss, labels, label_weights):
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(batch_size, -1)
cls_neg_loss = ((labels == 0) & (label_weights > 0)).type_as(
cls_loss) * cls_loss.view(batch_size, -1)
cls_pos_loss = cls_pos_loss.sum() / batch_size
cls_neg_loss = cls_neg_loss.sum() / batch_size
else:
cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
cls_neg_loss = cls_loss[..., 0].sum() / batch_size
return cls_pos_loss, cls_neg_loss
@HEADS.register_module
class OHSHeadClear(nn.Module):
def __init__(self,
box_coder: GroundBox3dCoderAF,
num_input: int,
num_pred: int,
num_cls: int,
header: bool = True,
name: str = "",
**kwargs,):
super().__init__()
self.box_coder = box_coder
self.conv_cls = nn.Conv2d(num_input, num_cls, 1)
self.mode = kwargs.get("mode", "bev")
if self.box_coder.center == "direct":
self.conv_xy = nn.Conv2d(num_input, 2, 1)
elif self.box_coder.center == "soft_argmin":
self.conv_xy = nn.Conv2d(num_input, 2 * self.box_coder.kwargs["xy_bin_num"], 1)
self.loc_bins_x = torch.linspace(self.box_coder.kwargs["x_range"][0], self.box_coder.kwargs["x_range"][1],
self.box_coder.kwargs["xy_bin_num"]).reshape(1, 1, -1, 1, 1)
self.loc_bins_y = torch.linspace(self.box_coder.kwargs["y_range"][0], self.box_coder.kwargs["y_range"][1],
self.box_coder.kwargs["xy_bin_num"]).reshape(1, 1, -1, 1, 1)
self.loc_bins = torch.cat([self.loc_bins_x, self.loc_bins_y], 1)
else:
raise NotImplementedError
if "direct" in self.box_coder.height:
self.conv_z = nn.Conv2d(num_input, 1, 1)
elif "soft_argmin" in self.box_coder.height:
self.conv_z = nn.Conv2d(num_input, self.box_coder.kwargs["z_bin_num"], 1)
self.z_loc_bins = torch.linspace(self.box_coder.kwargs["z_range"][0], self.box_coder.kwargs["z_range"][1],
self.box_coder.kwargs["z_bin_num"]).reshape(1, self.box_coder.kwargs["z_bin_num"], 1, 1)
else:
raise NotImplementedError
if "soft_argmin" in self.box_coder.dim:
self.conv_dim = nn.Conv2d(num_input, 3 * self.box_coder.kwargs["dim_bin_num"], 1)
self.dim_loc_bins = torch.linspace(self.box_coder.kwargs["dim_range"][0], self.box_coder.kwargs["dim_range"][1],
self.box_coder.kwargs["dim_bin_num"]).reshape(1, self.box_coder.kwargs[
"dim_bin_num"], 1, 1)
self.dim_bins = torch.cat([self.dim_loc_bins, self.dim_loc_bins, self.dim_loc_bins], 1)
else:
self.conv_dim = nn.Conv2d(num_input, 3, 1)
if self.box_coder.velocity:
self.conv_velo = nn.Conv2d(num_input, 2, 1)
if self.box_coder.rotation == "vector":
self.conv_r = nn.Conv2d(num_input, 2, 1)
elif self.box_coder.rotation == "soft_argmin":
self.conv_r = nn.Conv2d(num_input, self.box_coder.kwargs["r_bin_num"], 1)
self.r_loc_bins = torch.linspace(-np.pi, np.pi, self.box_coder.kwargs["r_bin_num"]).reshape(
1, self.box_coder.kwargs["r_bin_num"], 1, 1)
else:
self.conv_r = nn.Conv2d(num_input, 1, 1)
def forward(self, x, return_loss):
x_bev = x
ret_dict = {}
cls_preds = self.conv_cls(x_bev).permute(0, 2, 3, 1).contiguous()
# predict bounding box
xy = self.conv_xy(x_bev)
z = self.conv_z(x_bev)
dim = self.conv_dim(x_bev)
# encode as bounding box
if self.box_coder.center == "soft_argmin":
xy = xy.view(
(xy.shape[0], 2, self.box_coder.kwargs["xy_bin_num"], xy.shape[2], xy.shape[3]))
xy = F.softmax(xy, dim=2)
xy = xy * self.loc_bins.to(xy.device)
xy = torch.sum(xy, dim=2, keepdim=False)
if "soft_argmin" in self.box_coder.height:
z = F.softmax(z, dim=1)
z = z * self.z_loc_bins.to(z.device)
z = torch.sum(z, dim=1, keepdim=True)
if "soft_argmin" in self.box_coder.dim:
dim = dim.view(
(dim.shape[0], 3, self.box_coder.kwargs["dim_bin_num"], dim.shape[2], dim.shape[3]))
dim = F.softmax(dim, dim=2)
dim = dim * self.dim_loc_bins.to(dim.device)
dim = torch.sum(dim, dim=2, keepdim=False)
xy = xy.permute(0, 2, 3, 1).contiguous()
z = z.permute(0, 2, 3, 1).contiguous()
dim = dim.permute(0, 2, 3, 1).contiguous()
if self.box_coder.dim == "direct":
dim = F.relu(dim)
if self.box_coder.velocity:
velo = self.conv_velo(x_bev).permute(0, 2, 3, 1).contiguous()
r_preds = self.conv_r(x_bev)
if self.box_coder.rotation == "vector":
#import pdb; pdb.set_trace()
r_preds = F.normalize(r_preds, p=2, dim=1)
elif self.box_coder.rotation == "soft_argmin":
r_preds = F.softmax(r_preds, dim=1)
r_preds = r_preds * self.r_loc_bins.to(r_preds.device)
r_preds = torch.sum(r_preds, dim=1, keepdim=True)
r_preds = r_preds.permute(0, 2, 3, 1).contiguous()
if self.box_coder.velocity:
box_preds = torch.cat([xy, z, dim, velo, r_preds], -1)
else:
box_preds = torch.cat([xy, z, dim, r_preds], -1)
ret_dict.update({"box_preds": box_preds, "cls_preds": cls_preds})
return ret_dict
@HEADS.register_module
class MultiGroupOHSHeadClear(nn.Module):
def __init__(self,
mode: str = "3d",
in_channels: List[int] = [128, ],
norm_cfg=None,
tasks: List[Dict] = [],
weights=[],
box_coder: BoxCoder = None,
with_cls: bool = True,
with_reg: bool = True,
encode_background_as_zeros: bool = True,
use_sigmoid_score: bool = True,
loss_norm: Dict = dict(type="NormByNumPositives",
pos_class_weight=1.0,
neg_class_weight=1.0,),
loss_cls: Dict = dict(type="CrossEntropyLoss",
use_sigmoid=False,
loss_weight=1.0,),
loss_bbox: Dict = dict(type="SmoothL1Loss",
beta=1.0,
loss_weight=1.0,),
atten_res: Sequence[int] = None,
assign_cfg: Optional[dict] = dict(),
name="rpn",):
super().__init__()
assert with_cls or with_reg
# read tasks and analysis the classes for tasks
num_classes = [len(t["class_names"]) for t in tasks]
self.class_names = [t["class_names"] for t in tasks]
self.num_anchor_per_locs = [1] * len(num_classes)
self.targets = tasks
# define the essential paramters
self.box_coder = box_coder
self.with_cls = with_cls
self.with_reg = with_reg
self.in_channels = in_channels
self.num_classes = num_classes
self.encode_background_as_zeros = encode_background_as_zeros
self.use_sigmoid_score = use_sigmoid_score
self.box_n_dim = self.box_coder.n_dim
self.mode = mode
self.assign_cfg = assign_cfg
self.pc_range = np.asarray(self.box_coder.pc_range) # [6]
self.dims = self.pc_range[3:] - self.pc_range[:3] # [3]
# initialize loss
self.loss_norm = loss_norm
self.loss_cls = build_loss(loss_cls)
self.loss_reg = build_loss(loss_bbox)
self.atten_res = atten_res
# initialize logger
logger = logging.getLogger("MultiGroupHead")
self.logger = logger
# check box_coder
assert isinstance(
box_coder, GroundBox3dCoderAF), "OHSLoss must comes with an anchor-free box coder"
assert box_coder.code_size == len(
loss_bbox.code_weights), "code weights does not match code size"
# set multi-tasks heads
# split each head
num_clss = []
num_preds = []
box_code_sizes = [self.box_coder.n_dim] * len(self.num_classes)
for num_c, num_a, box_cs in zip(
self.num_classes, self.num_anchor_per_locs, box_code_sizes
):
if self.encode_background_as_zeros:
num_cls = num_a * num_c
else:
num_cls = num_a * (num_c + 1)
num_clss.append(num_cls)
num_pred = num_a * box_cs
num_preds.append(num_pred)
self.logger.info(
f"num_classes: {self.num_classes}, num_preds: {num_preds}"
)
# construct each task head
self.tasks = nn.ModuleList()
for task_id, (num_pred, num_cls) in enumerate(zip(num_preds, num_clss)):
self.tasks.append(
OHSHeadClear(
self.box_coder,
self.in_channels,
num_pred,
num_cls,
header=False,
mode=self.mode,
)
)
def set_train_cfg(self, cfg):
self.ohs_loss = []
self.atten_loss = []
for task_id, target in enumerate(self.targets):
self.ohs_loss.append(
OHSLossClear(self.box_coder,
target.num_class,
self.loss_cls,
self.loss_reg,
self.encode_background_as_zeros,
cfg,
self.loss_norm,
task_id,
self.mode))
self.atten_loss.append(
AttentionConstrainedLoss(
self.box_coder, target.num_class, task_id, self.atten_res)
)
self.logger.info("Finish Attention Constrained Loss Initialization")
self.logger.info("Finish MultiGroupOHSHeadClear Initialization")
def forward(self, x, return_loss=False):
ret_dicts = []
for task in self.tasks:
ret_dicts.append(task(x, return_loss))
return ret_dicts
def loss(self, example, preds_dicts, **kwargs):
annos = example["annos"]
batch_size_device = example["num_voxels"].shape[0]
batch_labels = [anno["gt_classes"] for anno in annos]
batch_boxes = [anno["gt_boxes"] for anno in annos]
batch_atten_map = kwargs.get('atten_map', None)
rets = []
for task_id, preds_dict in enumerate(preds_dicts):
box_preds = preds_dict["box_preds"]
cls_preds = preds_dict["cls_preds"]
bs_per_gpu = len(cls_preds)
batch_task_boxes = [batch_box[task_id] for batch_box in batch_boxes]
batch_task_labels = [batch_label[task_id] for batch_label in batch_labels]
attention_loss = defaultdict(list)
for index, bam in enumerate(batch_atten_map):
temp_attention_loss = self.atten_loss[task_id](
bam, batch_task_boxes, batch_task_labels)
for ke, va in temp_attention_loss.items():
attention_loss[ke].append(va)
targets = self.assign_hotspots(cls_preds,
batch_task_boxes,
batch_task_labels)
labels, label_weights, bbox_targets, bbox_locs, num_total_pos, num_total_neg = targets
# process assign targets
labels = torch.stack(labels, 0).view(bs_per_gpu, -1) # [B, H*W]
label_weights = torch.stack(label_weights, 0).view(bs_per_gpu, -1) # [B, H*W]
kwargs = {}
# calculate ohs loss for each task
loc_loss, cls_loss = self.ohs_loss[task_id](
box_preds,
cls_preds,
labels,
label_weights,
bbox_targets,
bbox_locs,
**kwargs
)
if self.loss_norm["type"] == "NormByNumExamples":
normalizer = num_total_pos + num_total_neg
elif self.loss_norm["type"] == "NormByNumPositives":
normalizer = max(num_total_pos, 1.0)
elif self.loss_norm["type"] == "NormByNumPosNeg":
normalizer = self.loss_norm["pos_cls_weight"] * num_total_pos + \
self.loss_norm["neg_cls_weight"] * num_total_neg
elif self.loss_norm["type"] == "dont_norm": # support ghm loss
normalizer = batch_size_device
else:
raise ValueError(f"unknown loss norm type")
loc_loss_reduced = loc_loss.sum() / normalizer
loc_loss_reduced *= self.loss_reg._loss_weight
cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels, label_weights)
cls_pos_loss /= self.loss_norm["pos_cls_weight"]
cls_neg_loss /= self.loss_norm["neg_cls_weight"]
cls_loss_reduced = cls_loss.sum() / normalizer
cls_loss_reduced *= self.loss_cls._loss_weight
loss = loc_loss_reduced + cls_loss_reduced
atten_loss = 0.0
for value in attention_loss.values():
if type(value) == list:
temp_loss = 0.0
norm_fac = len(value)
for temp_atten_loss in value:
temp_loss = temp_loss + temp_atten_loss
value = temp_loss * 1.0 / norm_fac
atten_loss = atten_loss + value
loss = loss + atten_loss
loc_loss_elem = [
loc_loss[:, :, i].sum() / num_total_pos
for i in range(loc_loss.shape[-1])
]
ret = {
"loss": loss,
"cls_pos_loss": cls_pos_loss.detach().cpu(),
"cls_neg_loss": cls_neg_loss.detach().cpu(),
"cls_loss_reduced": cls_loss_reduced.detach().cpu().mean(),
"loc_loss_reduced": loc_loss_reduced.detach().cpu().mean(),
"loc_loss_elem": [elem.detach().cpu() for elem in loc_loss_elem],
"num_pos": torch.tensor([num_total_pos]),
"num_neg": torch.tensor([num_total_neg]),
}
for key, value in attention_loss.items():
if type(value) == list:
temp_loss = 0.0
norm_fac = len(value)
for temp_atten_loss in value:
temp_loss = temp_loss + temp_atten_loss
value = temp_loss * 1.0 / norm_fac
ret.update({key: value.detach().cpu()})
rets.append(ret)
rets_merged = defaultdict(list)
for ret in rets:
for k, v in ret.items():
rets_merged[k].append(v)
return rets_merged
def assign_hotspots(self,
cls_scores: torch.Tensor,
gt_bboxes: List[np.ndarray],
gt_labels: List[np.ndarray]):
"""
assign hotspots(generate targets)
Args:
cls_scores (torch.Tensor, [B, H, W, C]): classification prediction score map
gt_bboxes (List[np.ndarray], [[M, ndim], [K, ndim], ...]): ground truth bounding box for each batch
gt_labels (List[np.ndarray], [[M], [K], ...]): ground truth bounding box id for each batch
cls_scores (torch.Tensor, [B, H, D, C], optional): classification prediction score map for RV.
Default to None.
"""
bs_per_gpu = len(gt_bboxes) # Get the batch size
device = cls_scores.device # Get the current device
gt_bboxes = [torch.tensor(box, device=device).float()
for box in gt_bboxes] # [M, 9], all gt_boxes
# [M] all gt_classes,start from 1,( 0 means background)
gt_labels = [torch.tensor(label, device=device).long() for label in gt_labels]
labels_list, label_weights_list, bbox_targets_list, bbox_locs_list, num_pos_list, num_neg_list = \
multi_apply(self.assign_hotspots_bev_single, cls_scores, gt_bboxes, gt_labels)
for i in range(bs_per_gpu):
bbox_locs_list[i][:, 0] = i
num_total_pos = sum([max(num, 1) for num in num_pos_list])
num_total_neg = sum([max(num, 1) for num in num_neg_list])
targets = (labels_list, label_weights_list, bbox_targets_list,
bbox_locs_list, num_total_pos, num_total_neg)
return targets
def assign_hotspots_bev_single(self,
cls_scores: torch.Tensor,
gt_bboxes: torch.Tensor,
gt_labels: torch.Tensor):
r"""
assign hotspots(generate targets) of BEV for a single batch.
Args:
cls_scores (torch.Tensor, [H, W, C]): classification prediction score map
gt_bboxes (torch.Tensor, [M, ndim]): ground truth bounding box
gt_labels_list (torch.Tensor, [M]): ground truth bounding box id
"""
h, w = cls_scores.size()[:2] # Get the size of the feature map of bev view (262,64)
# initialize relate labels
labels = torch.zeros_like(cls_scores[:, :, 0], dtype=torch.long) # Set up the bev labels
label_weights = torch.ones_like(
cls_scores[:, :, 0], dtype=torch.float) * self.loss_norm["neg_cls_weight"] # Initialize all weights to neg weights
# initialized to record the positive bbx's location in grid map
bbox_locs = cls_scores.new_zeros((0, 3), dtype=torch.long)
# initialized to record the positive bbx's regression targets
bbox_targets = cls_scores.new_zeros((0, self.box_coder.code_size), dtype=torch.float)
# scan gt_bboxes
self.effective_ratio = self.assign_cfg.get("effective_ratio", [1.0, 6.0])
if len(gt_bboxes > 0):
effective_boxes = gt_bboxes[:, [0, 1, 3, 4]].clone().detach() # [M, 4]
effective_ratio_l = (self.dims[0] / w) / effective_boxes[:, 2] # [M]
effective_ratio_w = (self.dims[1] / h) / effective_boxes[:, 3] # [M]
effective_ratio_l = effective_ratio_l.clamp(min=self.effective_ratio[0], # [M]
max=self.effective_ratio[1]) # [M]
effective_ratio_w = effective_ratio_w.clamp(min=self.effective_ratio[0], # [M]
max=self.effective_ratio[1]) # [M]
# expand the box'area into a grid if the box is too small,
# so that this box label can match the center of the correspond box
# the expanded box called `effective_boxes`
effective_boxes[:, 2] *= effective_ratio_l
effective_boxes[:, 3] *= effective_ratio_w
# get the corners
angles = gt_bboxes[:, -1] # [num_box]
effective_boxes = box_torch_ops.center_to_corner_box2d(
effective_boxes[:, :2], effective_boxes[:, 2:4], angles)
ignore_boxes_out = effective_boxes
# transfer the hybrid coordinate system to Cartesian coordinate system
self.box_coder.layout(w, h)
# read necessary parameters from box_coder
# center cartesian coordinate, grid coordinate index in hybrid coordinate
# grid_real_centers - [W * H, 2]
# w_indices - [W * H]
# h_indices - [W * H]
grid_real_centers = self.box_coder.grids_sensor
w_indices = self.box_coder.ww_l
h_indices = self.box_coder.hh_l
# scan bounding boxes
for i in range(len(gt_bboxes)):
# get the points(hotspots) cover by the bounding box
pos_mask = points_in_convex_polygon_torch(
grid_real_centers, effective_boxes[i].unsqueeze(0)) # [num_points, 8]
# get the raw hotspots
pos_ind = pos_mask.nonzero()[:, 0]
# NOTE: fix the bugs of targets assignment in bev, while using hybird coordinates,
# the `effective_boxes` may not expand enough to cover a grid center,
# so we nearest search a grid center as hotspots for this situation
gt_center = gt_bboxes[i: i + 1, :2] # [1, 2]
dist_to_grid_center = torch.norm(grid_real_centers - gt_center, dim=1) # [W * H]
min_ind = torch.argmin(dist_to_grid_center)
if min_ind not in pos_ind:
pos_ind = torch.cat([pos_ind.reshape(-1, 1), min_ind.reshape(-1, 1)],
dim=0).reshape(-1)
num_hotspots = self.assign_cfg.get("num_hotspots", 28)
if self.assign_cfg.get("select_hotspots", True):
# filter out the verbose hotspots
if len(pos_ind) > num_hotspots:
# if the hotspots are too many for the instance
# select the num_hotspots-th nearest as valid hotspots
points = grid_real_centers[pos_ind, :]
diff = gt_bboxes[i, :2] - points
diff = torch.norm(diff, dim=1)
sorted_ind = torch.argsort(diff)[:num_hotspots]
pos_ind = pos_ind[sorted_ind]
# get the indices of hotspots
pos_h_indices = h_indices[pos_ind] # [num_pos]
pos_w_indices = w_indices[pos_ind] # [num_pos]
# scan the positive hotspots
if len(pos_h_indices):
if not (labels[pos_h_indices, pos_w_indices] == 0).all():
unique_pos_h_indices = pos_h_indices.new_zeros((0,))
unique_pos_w_indices = pos_w_indices.new_zeros((0,))
unique_pos_ind = pos_ind.new_zeros((0,))
# NOTE: assert that each grid's gradient just be affected by one label
# if a grid was covered by other label, eliminate its effects
for ph, pw, pi in zip(pos_h_indices, pos_w_indices, pos_ind):
if labels[ph, pw] == 0:
unique_pos_h_indices = torch.cat(
(unique_pos_h_indices, ph.view((1))))
unique_pos_w_indices = torch.cat(
(unique_pos_w_indices, pw.view((1))))
unique_pos_ind = torch.cat((unique_pos_ind, pi.view((1))))
else:
label_weights[ph, pw] = 0
pos_h_indices = unique_pos_h_indices
pos_w_indices = unique_pos_w_indices
pos_ind = unique_pos_ind
# fullfill `labels` and `label_weights`
labels[pos_h_indices, pos_w_indices] = gt_labels[i]
label_weights[pos_h_indices, pos_w_indices] = self.loss_norm["pos_cls_weight"]
# get the overlap hotspots and set the `label_weights` as 0
ig_mask = points_in_convex_polygon_torch(
grid_real_centers, ignore_boxes_out[i].unsqueeze(0))
ig_mask = (ig_mask & (~pos_mask)).reshape(-1) # Get the overlapped grid
ig_h = h_indices[ig_mask]
ig_w = w_indices[ig_mask]
# 1 for hspots in gtbbx, 0 for non-hspots in gtbbx
label_weights[ig_h, ig_w] = 0
centers = grid_real_centers[pos_ind]
shifts = torch.zeros((len(centers), self.box_coder.code_size),
device=cls_scores.device,
dtype=torch.float)
# Got the encode bbx target for each positive grid
shifts = self.box_coder._encode(centers, shifts, gt_bboxes[i])
zeros = torch.zeros_like(pos_w_indices)
locs = torch.stack((zeros, pos_h_indices, pos_w_indices), dim=-1)
# get the corresponding bounding boxes
bbox_locs = torch.cat((bbox_locs, locs), dim=0)
bbox_targets = torch.cat((bbox_targets, shifts), dim=0)
# get the ratio os positive and negative examples
num_pos = bbox_targets.size(0)
num_neg = label_weights.nonzero().size(0) - num_pos
return (labels, label_weights, bbox_targets, bbox_locs, num_pos, num_neg)
def predict(self, example, preds_dicts, test_cfg, **kwargs):
rets = []
double_flip = test_cfg.get('double_flip', False)
for task_id, preds_dict in enumerate(preds_dicts):
batch_size_device = example['num_voxels'].shape[0]
if "metadata" not in example or len(example["metadata"]) == 0:
meta_list = [None] * batch_size_device
else:
meta_list = example["metadata"]
if double_flip:
assert batch_size_device % 4 == 0, f"batch_size_device: {batch_size_device}"
batch_size_device = int(batch_size_device / 4)
meta_list = meta_list[:4 * int(batch_size_device):4]
batch_box_preds_all = preds_dict["box_preds"]
batch_cls_preds_all = preds_dict["cls_preds"]
_, H, W, C = batch_box_preds_all.shape
batch_box_preds_all = batch_box_preds_all.reshape(
int(batch_size_device), 4, H, W, C)
batch_box_preds_sincos_all = batch_box_preds_all[:, :, :, :, 8:10].clone()
_, H, W, C = batch_cls_preds_all.shape
batch_cls_preds_all = batch_cls_preds_all.reshape(
int(batch_size_device), 4, H, W, C)
batch_cls_preds_all[:, 1] = torch.flip(batch_cls_preds_all[:, 1], dims=[1])
batch_cls_preds_all[:, 2] = torch.flip(batch_cls_preds_all[:, 2], dims=[2])
batch_cls_preds_all[:, 3] = torch.flip(batch_cls_preds_all[:, 3], dims=[1, 2])
batch_cls_preds_all = batch_cls_preds_all.sigmoid()
batch_cls_preds = batch_cls_preds_all.mean(dim=1)
batch_box_preds_sincos_all[:, 1] = torch.flip(
batch_box_preds_sincos_all[:, 1], dims=[1])
batch_box_preds_sincos_all[:, 2] = torch.flip(
batch_box_preds_sincos_all[:, 2], dims=[2])
batch_box_preds_sincos_all[:, 3] = torch.flip(
batch_box_preds_sincos_all[:, 3], dims=[1, 2])
num_class_with_bg = self.num_classes[task_id]
if not self.encode_background_as_zeros:
num_class_with_bg = self.num_classes[task_id] + 1
batch_cls_preds = batch_cls_preds.contiguous()
batch_cls_preds = batch_cls_preds.view(
batch_size_device, -1, num_class_with_bg)
batch_reg_preds = torch.zeros(
(int(batch_size_device), 4, H * W, 9), dtype=batch_box_preds_all.dtype, device=batch_box_preds_all.device)
for i in range(4):
batch_box_preds = batch_box_preds_all[:, i, :, :, :]
box_ndim = self.box_n_dim
h, w = batch_box_preds.size()[1:3]
batch_box_preds = batch_box_preds.contiguous()
batch_box_preds = batch_box_preds.view(batch_size_device, -1, box_ndim)
if i == 1: # theta = pi-theta
batch_box_preds[:, :, -2] = -batch_box_preds[:, :, -2]
batch_box_preds_sincos_all[:, i, :, :, 0] = - \
batch_box_preds_sincos_all[:, i, :, :, 0]
elif i == 2: # x=-x, theta = 2pi-theta, vx = -vx
batch_box_preds[:, :, -1] = -batch_box_preds[:, :, -1]
batch_box_preds_sincos_all[:, i, :, :, 1] = - \
batch_box_preds_sincos_all[:, i, :, :, 1]
elif i == 3: # x=-x,y=-y, theta=theta-pi, vx=-vx, vy=-vy
batch_box_preds[:, :, -1] = -batch_box_preds[:, :, -1]
batch_box_preds[:, :, -2] = -batch_box_preds[:, :, -2]
batch_box_preds_sincos_all[:, i, :, :, 0] = - \
batch_box_preds_sincos_all[:, i, :, :, 0]
batch_box_preds_sincos_all[:, i, :, :, 1] = - \
batch_box_preds_sincos_all[:, i, :, :, 1]
#import pdb; pdb.set_trace()
# -pi/2
#batch_box_preds[:, :, -2], batch_box_preds[:, :, -1] = batch_box_preds[:, :, -1], -batch_box_preds[:, :, -2]
# # +pi/2
#batch_box_preds[:, :, -2], batch_box_preds[:, :, -1] = -batch_box_preds[:, :, -1], batch_box_preds[:, :, -2]
batch_reg_preds_temp = self.box_coder._decode(
batch_box_preds[:, :, :self.box_coder.code_size], w, h
)
if i == 1: # y=-y, vy = -vy
batch_reg_preds_temp[:, :, 1] = -batch_reg_preds_temp[:, :, 1]
batch_reg_preds_temp[:, :, 7] = -batch_reg_preds_temp[:, :, 7]
elif i == 2: # x=-x, vx = -vx
batch_reg_preds_temp[:, :, 0] = -batch_reg_preds_temp[:, :, 0]
batch_reg_preds_temp[:, :, 6] = -batch_reg_preds_temp[:, :, 6]
elif i == 3: # x=-x,y=-y, vx=-vx, vy=-vy
batch_reg_preds_temp[:, :, 1] = -batch_reg_preds_temp[:, :, 1]
batch_reg_preds_temp[:, :, 0] = -batch_reg_preds_temp[:, :, 0]
batch_reg_preds_temp[:, :, 7] = -batch_reg_preds_temp[:, :, 7]
batch_reg_preds_temp[:, :, 6] = -batch_reg_preds_temp[:, :, 6]
batch_reg_preds[:, i, :, :] = batch_reg_preds_temp
batch_box_preds_sincos_all = batch_box_preds_sincos_all.mean(dim=1)
batch_box_preds_sincos_all = batch_box_preds_sincos_all.reshape(
batch_size_device, -1, 2)
batch_box_preds_rads = torch.atan2(
batch_box_preds_sincos_all[:, :, 1], batch_box_preds_sincos_all[:, :, 0])
batch_reg_preds = batch_reg_preds.reshape(batch_size_device, 4, H, W, 9)
batch_reg_preds[:, 1] = torch.flip(batch_reg_preds[:, 1], dims=[1])
batch_reg_preds[:, 2] = torch.flip(batch_reg_preds[:, 2], dims=[2])
batch_reg_preds[:, 3] = torch.flip(batch_reg_preds[:, 3], dims=[1, 2])
batch_reg_preds = batch_reg_preds.mean(dim=1)
batch_reg_preds = batch_reg_preds.reshape(batch_size_device, -1, 9)
batch_reg_preds[:, :, -1] = batch_box_preds_rads
else:
batch_box_preds = preds_dict["box_preds"]
batch_cls_preds = preds_dict["cls_preds"].sigmoid()
box_ndim = self.box_n_dim
h, w = batch_box_preds.size()[1:3]
batch_box_preds = batch_box_preds.view(batch_size_device, -1, box_ndim)
num_class_with_bg = self.num_classes[task_id]
if not self.encode_background_as_zeros:
num_class_with_bg = self.num_classes[task_id] + 1
batch_cls_preds = batch_cls_preds.contiguous()
batch_cls_preds = batch_cls_preds.view(batch_size_device, -1, num_class_with_bg)
batch_reg_preds = self.box_coder._decode(
batch_box_preds[:, :, :self.box_coder.code_size], w, h
)
batch_dir_preds = [None] * batch_size_device
rets.append(
self.get_task_detections(
task_id,
num_class_with_bg,
test_cfg,
batch_cls_preds,
batch_reg_preds,
batch_dir_preds,
meta_list,
)
)
num_samples = len(rets[0])
ret_list = []
for i in range(num_samples):
ret = {}
for k in rets[0][i].keys():
if k in ["box3d_lidar", "scores"]:
ret[k] = torch.cat([ret[i][k] for ret in rets])
elif k in ["label_preds"]:
flag = 0
for j, num_class in enumerate(self.num_classes):
rets[j][i][k] += flag
flag += num_class
ret[k] = torch.cat([ret[i][k] for ret in rets])
elif k == "metadata":
# metadata
ret[k] = rets[0][i][k]
ret_list.append(ret)
return ret_list
def get_task_detections(
self,
task_id,
num_class_with_bg,
test_cfg,
batch_cls_preds,
batch_reg_preds,
batch_dir_preds=None,
meta_list=None,
):
predictions_dicts = []
post_center_range = test_cfg.post_center_limit_range
if len(post_center_range) > 0:
post_center_range = torch.tensor(
post_center_range,
dtype=batch_reg_preds.dtype,
device=batch_reg_preds.device,
)
for box_preds, cls_preds, dir_preds, meta in zip(
batch_reg_preds,
batch_cls_preds,
batch_dir_preds,
meta_list,
):
box_preds = box_preds.float()
cls_preds = cls_preds.float()
if self.encode_background_as_zeros:
# this don't support softmax
assert self.use_sigmoid_score is True
total_scores = cls_preds
#total_scores = cls_preds
else:
# encode background as first element in one-hot vector
if self.use_sigmoid_score:
total_scores = cls_preds[..., 1:]
else:
total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]
feature_map_size_prod = (
batch_reg_preds.shape[1] // self.num_anchor_per_locs[task_id]
)
# get highest score per prediction, than apply nms
# to remove overlapped box.
if num_class_with_bg == 1:
top_scores = total_scores.squeeze(-1)
top_labels = torch.zeros(
total_scores.shape[0],
device=total_scores.device,
dtype=torch.long,
)
else:
top_scores, top_labels = torch.max(total_scores, dim=-1)
if test_cfg.score_threshold > 0.0:
thresh = torch.tensor(
[test_cfg.score_threshold], device=total_scores.device
).type_as(total_scores)
top_scores_keep = top_scores >= thresh
top_scores = top_scores.masked_select(top_scores_keep)
if top_scores.shape[0] != 0:
if test_cfg.score_threshold > 0.0:
box_preds = box_preds[top_scores_keep]
assert (box_preds[:, 3:6] > 0).cpu().numpy().any()
top_labels = top_labels[top_scores_keep]
boxes_for_nms = box_torch_ops.boxes3d_to_bevboxes_lidar_torch(box_preds)
selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms, top_scores,
thresh=test_cfg.nms.nms_iou_threshold,
pre_maxsize=test_cfg.nms.nms_pre_max_size,
post_max_size=test_cfg.nms.nms_post_max_size)
else:
selected = []
# if selected is not None:
selected_boxes = box_preds[selected]
selected_labels = top_labels[selected]
selected_scores = top_scores[selected]
# finally generate predictions.
if selected_boxes.shape[0] != 0:
box_preds = selected_boxes
scores = selected_scores
label_preds = selected_labels
final_box_preds = box_preds
final_scores = scores
final_labels = label_preds
if post_center_range is not None:
mask = (final_box_preds[:, :3] >= post_center_range[:3]).all(1)
mask &= (final_box_preds[:, :3] <= post_center_range[3:]).all(1)
predictions_dict = {
"box3d_lidar": final_box_preds[mask],
"scores": final_scores[mask],
"label_preds": label_preds[mask],
"metadata": meta,
}
else:
predictions_dict = {
"box3d_lidar": final_box_preds,
"scores": final_scores,
"label_preds": final_labels,
"metadata": meta,
}
else:
dtype = batch_reg_preds.dtype
device = batch_reg_preds.device
predictions_dict = {
"box3d_lidar": torch.zeros([0, box_preds.shape[1]], dtype=dtype, device=device),
"scores": torch.zeros([0], dtype=dtype, device=device),
"label_preds": torch.zeros(
[0], dtype=top_labels.dtype, device=device
),
"metadata": meta,
}
predictions_dicts.append(predictions_dict)
return predictions_dicts
| 46.541618 | 133 | 0.540227 | 4,925 | 39,700 | 4.033503 | 0.096041 | 0.033828 | 0.038611 | 0.019935 | 0.420992 | 0.349711 | 0.26821 | 0.225925 | 0.169444 | 0.152882 | 0 | 0.015431 | 0.358463 | 39,700 | 852 | 134 | 46.596244 | 0.764537 | 0.093325 | 0 | 0.204511 | 0 | 0 | 0.036144 | 0.000615 | 0 | 0 | 0 | 0 | 0.009023 | 1 | 0.018045 | false | 0 | 0.02406 | 0 | 0.058647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e501d8d323b13656dcc8aa99310855c82f2554fb | 4,267 | py | Python | run_full_dataset.py | TimoK93/ApLift | 732070175ab6bf76db5b0c793cdb4a1fb5d235d7 | [
"MIT"
] | 4 | 2021-09-23T17:44:01.000Z | 2022-01-10T07:14:25.000Z | run_full_dataset.py | TimoK93/ApLift | 732070175ab6bf76db5b0c793cdb4a1fb5d235d7 | [
"MIT"
] | 1 | 2021-10-18T07:41:31.000Z | 2021-10-18T07:41:31.000Z | run_full_dataset.py | TimoK93/ApLift | 732070175ab6bf76db5b0c793cdb4a1fb5d235d7 | [
"MIT"
] | null | null | null | """
A script to run the main script with all sequences of a dataset.
To use the script a config.yaml needs to be specified.
Example usage:
python3 main.py config/example_config.yaml
if "pretrained_model_path" is passed as an argument in the config, training is skipped and pretrained models are used
for the inference.
"""
import os
import shutil
from copy import copy
os.environ["CUDA_VISIBLE_DEVICES"] = "" # GPUs are not necessary!
from main import run_pipeline
from src.utilities.config_reader import main_function
def copyanything(src, dst):
for root, dirs, files in os.walk(src):
for name in files:
dir = root.replace(src, dst)
dst_file = os.path.join(dir, name)
if os.path.exists(dst_file):
print("Model", dst_file, "is already existing!")
os.makedirs(dir, exist_ok=True)
shutil.copy(os.path.join(root, name), os.path.join(dir, name))
@main_function
def main(working_dir, dataset: str, pretrained_models_path=None, **kwargs):
""" Runs the main pipeline on all sequences of a dataset """
''' Create directory an copy pretrained models '''
os.makedirs(working_dir, exist_ok=True)
if pretrained_models_path is not None:
copyanything(os.path.join(pretrained_models_path), working_dir)
''' Creates a list of jobs to be executed'''
jobs = list()
if dataset == "MOT17":
detectors = ["FRCNN", "DPM", "SDP"]
train_sequences = [2, 4, 5, 9, 10, 11, 13]
test_sequences = [1, 3, 6, 7, 8, 12, 14]
for d in detectors:
for t in train_sequences:
train = ["MOT17-%s-%s" % (str(_).rjust(2, "0"), d) for _ in train_sequences if _ != t]
val = ["MOT17-%s-%s" % (str(t).rjust(2, "0"), d)]
jobs.append(dict(train=train, val=val, detector=d))
for t in test_sequences:
train = ["MOT17-%s-%s" % (str(_).rjust(2, "0"), d) for _ in train_sequences]
val = ["MOT17-%s-%s" % (str(t).rjust(2, "0"), d)]
jobs.append(dict(train=train, val=val, detector=d))
elif dataset == "MOT20":
test_sequences = [4, 6, 7, 8]
train_sequences = [1, 2, 3, 5]
for t in train_sequences:
train = ["MOT20-%s" % str(_).rjust(2, "0") for _ in train_sequences if _ != t]
val = ["MOT20-%s" % str(t).rjust(2, "0")]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
for t in test_sequences:
train = ["MOT20-%s" % str(_).rjust(2, "0") for _ in train_sequences]
val = ["MOT20-%s" % str(t).rjust(2, "0")]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
elif dataset == "MOT15":
test_sequences = [
'Venice-1', 'TUD-Crossing', 'PETS09-S2L2', 'KITTI-19', 'KITTI-16', 'ETH-Jelmoli', 'ETH-Linthescher',
'ETH-Crossing', 'AVG-TownCentre', 'ADL-Rundle-3', 'ADL-Rundle-1'
]
train_sequences = [
'Venice-2', 'KITTI-17', 'KITTI-13', 'ETH-Sunnyday', 'ETH-Pedcross2', 'ETH-Bahnhof', 'ADL-Rundle-8',
'TUD-Stadtmitte', 'TUD-Campus', 'ADL-Rundle-6', 'PETS09-S2L1'
]
for t in train_sequences:
train = [_ for _ in train_sequences if _ != t]
val = [t]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
for t in test_sequences:
train = [_ for _ in train_sequences if _ != t]
val = [t]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
''' Runs the jobs sequentially '''
features = copy(kwargs["data_config"]["edge_features"])
for job in jobs:
print("Run Job:", job)
if os.path.exists(os.path.join(working_dir, job["val"][0], job["val"][0] + ".txt")):
print("... Result file already existing!")
continue
kwargs["data_config"]["edge_features"] = copy(features)
kwargs["data_config"]["dataset"]["detector"] = job["detector"]
kwargs["training_config"]["sequences_for_training"] = job["train"]
kwargs["training_config"]["sequences_for_inference"] = job["val"]
run_pipeline(working_dir=working_dir, **kwargs)
if __name__ == "__main__":
main()
| 40.638095 | 117 | 0.588704 | 578 | 4,267 | 4.207612 | 0.261246 | 0.069079 | 0.059211 | 0.046875 | 0.362253 | 0.280839 | 0.263158 | 0.260691 | 0.260691 | 0.260691 | 0 | 0.028779 | 0.258964 | 4,267 | 104 | 118 | 41.028846 | 0.740354 | 0.093743 | 0 | 0.266667 | 0 | 0 | 0.172127 | 0.012084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.066667 | 0 | 0.093333 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5033a61e857baa0aed9b97b41edbcf668557962 | 298 | py | Python | _Training_/RegEx - HackerRank/2. Character Class/Excluding Specific Characters.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | _Training_/RegEx - HackerRank/2. Character Class/Excluding Specific Characters.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | _Training_/RegEx - HackerRank/2. Character Class/Excluding Specific Characters.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/excluding-specific-characters/problem
import re
# Inputs
standard_input = """think?"""
Regex_Pattern = (
r"^[^\d][^aeiou][^bcDF][^\r\n\t\f\s][^AEIOU][^.,]$"
) # Do not delete 'r'.
print(str(bool(re.search(Regex_Pattern, input()))).lower())
# true
| 18.625 | 77 | 0.64094 | 41 | 298 | 4.585366 | 0.829268 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114094 | 298 | 15 | 78 | 19.866667 | 0.712121 | 0.355705 | 0 | 0 | 0 | 0 | 0.28877 | 0.256684 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e503d96de70079ddac429727f7983b8a0fcdef59 | 354 | py | Python | toroid/toroid/pairs.py | LeoTindall/corewar32 | c29891ca67c01dd65d01d120364a010eb12eb255 | [
"Apache-2.0"
] | null | null | null | toroid/toroid/pairs.py | LeoTindall/corewar32 | c29891ca67c01dd65d01d120364a010eb12eb255 | [
"Apache-2.0"
] | 1 | 2016-08-06T23:20:56.000Z | 2016-08-06T23:20:56.000Z | toroid/toroid/pairs.py | SilverWingedSeraph/corewar32 | c29891ca67c01dd65d01d120364a010eb12eb255 | [
"Apache-2.0"
] | null | null | null | def make_pairings(warriors):
if len(warriors) == 0:
return False, False
pairings = []
for (warrior1, warrior2) in zip(warriors[0::2], warriors[1::2]):
pairings.append((warrior1, warrior2))
if len(warriors) % 2 == 0:
odd_one_out = False
else:
odd_one_out = warriors[-1]
return pairings, odd_one_out
| 29.5 | 68 | 0.610169 | 47 | 354 | 4.446809 | 0.446809 | 0.086124 | 0.129187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045802 | 0.259887 | 354 | 11 | 69 | 32.181818 | 0.751908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e504a4528ab13dc7037b0bc87eb63a2bf6c3d4cb | 2,550 | py | Python | tests/test_deny_mime_type_validator.py | fastmonkeys/pontus | 6542190aae896cd79c55f7f43e98a6bf3cbc613b | [
"MIT"
] | 4 | 2017-04-24T10:17:28.000Z | 2020-05-28T06:25:03.000Z | tests/test_deny_mime_type_validator.py | fastmonkeys/pontus | 6542190aae896cd79c55f7f43e98a6bf3cbc613b | [
"MIT"
] | 9 | 2015-02-23T14:27:37.000Z | 2021-02-24T13:23:41.000Z | tests/test_deny_mime_type_validator.py | fastmonkeys/pontus | 6542190aae896cd79c55f7f43e98a6bf3cbc613b | [
"MIT"
] | 1 | 2017-08-14T16:40:44.000Z | 2017-08-14T16:40:44.000Z | # -*- coding: utf-8 -*-
import os
import pytest
import boto3
from pontus.exceptions import ValidationError
from pontus.validators import DenyMimeType
class TestDenyMimeTypeValidator(object):
@pytest.fixture
def jpeg_key(self, bucket):
with open(os.path.join(
os.path.dirname(__file__),
'data',
'example.jpg'
), 'rb') as image:
key_name = 'example.jpg'
obj = boto3.resource('s3').Object(bucket.name, key_name)
obj.put(
Body=image
)
return obj
def test_raises_validation_error_if_invalid_mime_type(
self,
jpeg_key
):
validator = DenyMimeType(mime_type='image/jpeg')
with pytest.raises(ValidationError) as e:
validator(jpeg_key)
assert str(e.value) == (
"Invalid file: File MIME type image/jpeg is in denied list "
"image/jpeg."
)
def test_does_not_raise_validation_error_if_valid_mime_type(
self,
jpeg_key
):
validator = DenyMimeType(mime_type='image/png')
validator(jpeg_key)
def test_repr(self):
assert repr(DenyMimeType(mime_type='image/png')) == (
u"<DenyMimeType mime_types='image/png'>"
)
def test_raises_validation_error_if_mime_type_not_in_valid_mime_types(
self,
jpeg_key
):
validator = DenyMimeType(mime_types=['image/jpeg', 'application/csv'])
with pytest.raises(ValidationError) as e:
validator(jpeg_key)
assert str(e.value) == (
"Invalid file: File MIME type image/jpeg is in denied list "
"['image/jpeg', 'application/csv']."
)
def test_doesnt_raise_validation_error_if_mime_type_in_valid_mime_types(
self,
jpeg_key
):
validator = DenyMimeType(mime_types=['image/png', 'application/csv'])
validator(jpeg_key)
def test_raises_validation_error_if_mime_type_doesnt_match_regex(
self,
jpeg_key
):
validator = DenyMimeType(regex=r'image\/.*')
with pytest.raises(ValidationError) as e:
validator(jpeg_key)
assert str(e.value) == (
"Invalid file: File MIME type image/jpeg matches denied regex "
"r'image\/.*'."
)
def test_doesnt_raise_validation_error_if_mime_type_matches_regex(
self,
jpeg_key
):
validator = DenyMimeType(regex=r'application\/.*')
validator(jpeg_key)
| 29.310345 | 78 | 0.608627 | 292 | 2,550 | 5.034247 | 0.25 | 0.061905 | 0.069388 | 0.081633 | 0.644218 | 0.57483 | 0.554422 | 0.554422 | 0.444218 | 0.385714 | 0 | 0.002221 | 0.293725 | 2,550 | 86 | 79 | 29.651163 | 0.813992 | 0.008235 | 0 | 0.432432 | 0 | 0 | 0.159478 | 0.009102 | 0 | 0 | 0 | 0 | 0.054054 | 1 | 0.108108 | false | 0 | 0.067568 | 0 | 0.202703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50744ae0a91afd75f5121562b3f88c9fadcfea8 | 1,966 | py | Python | MyModel/signLanguageTranslator.py | rahulmishra11/Sign-Language-Translator | 83b6907f722324d01142ab25e9e9cf806c51b0d3 | [
"Apache-2.0"
] | null | null | null | MyModel/signLanguageTranslator.py | rahulmishra11/Sign-Language-Translator | 83b6907f722324d01142ab25e9e9cf806c51b0d3 | [
"Apache-2.0"
] | null | null | null | MyModel/signLanguageTranslator.py | rahulmishra11/Sign-Language-Translator | 83b6907f722324d01142ab25e9e9cf806c51b0d3 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import pandas as pd
import numpy as np
import tensorflow.keras as keras
train = pd.read_csv("./sign_mnist_train/sign_mnist_train.csv")
test = pd.read_csv("./sign_mnist_test/sign_mnist_test.csv")
# put labels into y_train variable
Y_train = train["label"]
# Drop 'label' column
X_train = train.drop(labels = ["label"],axis = 1)
# put labels into y_test variable
Y_test = test["label"]
# Drop 'label' column
X_test = test.drop(labels = ["label"],axis = 1)
# Normalize the data
X_train = X_train / 255.0
X_test = X_test / 255.0
print("x_train shape: ",X_train.shape)
print("x_test shape: ",X_test.shape)
#Reshape
X_train = X_train.values.reshape(-1,28,28,1)
X_test = X_test.values.reshape(-1,28,28,1)
print("x_train shape: ",X_train.shape)
print("x_test shape: ",X_test.shape)
model = keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=3, input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=25, activation='softmax'),
])
model.summary()
model.compile(
loss="sparse_categorical_crossentropy",
optimizer = 'adam',
metrics=['accuracy']
)
history = model.fit(X_train,Y_train,
epochs=10,)
pd.DataFrame(history.history).plot()
model.save("sign_mnist_train.h5")
print(model.evaluate(X_test,Y_test))
| 29.787879 | 87 | 0.715158 | 301 | 1,966 | 4.508306 | 0.269103 | 0.113486 | 0.062638 | 0.08843 | 0.537214 | 0.450258 | 0.422255 | 0.422255 | 0.422255 | 0.422255 | 0 | 0.041351 | 0.126653 | 1,966 | 65 | 88 | 30.246154 | 0.748981 | 0.066124 | 0 | 0.288889 | 0 | 0 | 0.143794 | 0.058502 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088889 | 0 | 0.088889 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e507533d51e8ae8cde7f283f5c299ebd345bec98 | 5,817 | py | Python | gammapy/detect/tests/test_kernel.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T12:21:14.000Z | 2019-02-10T19:58:07.000Z | gammapy/detect/tests/test_kernel.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | gammapy/detect/tests/test_kernel.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.io import fits
from astropy.units import Quantity
from astropy.coordinates.angles import Angle
from ...utils.testing import requires_dependency, requires_data
from ...image import SkyImage
from ...stats import significance
from ...datasets import FermiGalacticCenter
from ..kernel import KernelBackgroundEstimatorData, KernelBackgroundEstimator
@requires_dependency('scipy')
def test_KernelBackgroundEstimatorData():
"""Tests compute correlated maps in KernelBackgroundEstimatorData.
This is the only method in KernelBackgroundEstimatorData that actually calculates anything.
"""
# Set up test counts and background
counts_hdu = SkyImage.empty(nxpix=10, nypix=10, binsz=1, fill=42).to_image_hdu()
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
background_data = 42 * np.ones_like(counts, dtype=float)
# Single unit pixel kernel so should actually be no change.
background_kernel = np.ones((1, 1))
images = KernelBackgroundEstimatorData(counts, background_data)
images.compute_correlated_maps(background_kernel)
# Test significance image against Li & Ma significance value
expected = significance(counts, background_data)
actual = images.significance
assert_allclose(actual, expected)
@requires_dependency('scipy')
@requires_data('gammapy-extra')
class TestKernelBackgroundEstimator(object):
def setup_class(self):
"""Prepares appropriate input and defines inputs for test cases.
"""
from scipy.ndimage import convolve
# Load/create example model images
counts_hdu = SkyImage.empty(nxpix=10, nypix=10, binsz=1, fill=42).to_image_hdu()
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
# Initial counts required by one of the tests.
self.counts = counts
psf = FermiGalacticCenter.psf()
psf = psf.table_psf_in_energy_band(Quantity([10, 500], 'GeV'))
kernel_array = psf.kernel(pixel_size=Angle(1, 'deg'),
offset_max=Angle(3, 'deg'), normalize=True)
counts_blob = convolve(counts, kernel_array, mode='constant')
self.counts_blob = counts_blob
# Start with flat background estimate
# Background must be provided as an ImageHDU
images = KernelBackgroundEstimatorData(counts=counts, header=counts_hdu.header)
images_blob = KernelBackgroundEstimatorData(counts=counts_blob, header=counts_hdu.header)
source_kernel = np.ones((1, 3))
background_kernel = np.ones((5, 3))
significance_threshold = 4
mask_dilation_radius = 1
# Loads prepared inputs into estimator
self.kbe = KernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.kbe2 = KernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.kbe_blob = KernelBackgroundEstimator(
images_blob,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
def test_run_iteration_point(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.kbe.run_iteration()
# Should be run twice to update the mask
self.kbe.run_iteration()
mask = self.kbe.mask_image_hdu.data
background = self.kbe.background_image_hdu.data
# Check mask matches expectations
expected_mask = np.ones_like(self.counts)
expected_mask[4][3] = 0
expected_mask[4][4] = 0
expected_mask[4][5] = 0
assert_allclose(mask.astype(int), expected_mask)
# Check background, should be 42 uniformly
assert_allclose(background.astype(float), 42 * np.ones((10, 10)))
def test_run_iteration_blob(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.kbe_blob.run_iteration()
# Should be run twice to update the mask
self.kbe_blob.run_iteration()
background = self.kbe_blob.background_image_hdu.data
# Check background, should be 42 uniformly within 10%
assert_allclose(background, 42 * np.ones((10, 10)), rtol=0.15)
def test_run(self):
"""Tests run script."""
mask, background = self.kbe2.run()
assert_allclose(mask.sum(), 97)
assert_allclose(background, 42 * np.ones((10, 10)))
def test_save_files(self, tmpdir):
"""Tests that files are saves, and checks values within them."""
# Create temporary file to write output into
self.kbe.run_iteration(1)
self.kbe.save_files(base_dir=str(tmpdir), index=0)
filename = tmpdir / '00_mask.fits'
mask = fits.open(str(filename))[1].data
filename = tmpdir / '00_significance.fits'
significance = fits.open(str(filename))[1].data
filename = tmpdir / '00_background.fits'
background = fits.open(str(filename))[1].data
# Checks values in files against known results for one iteration.
assert_allclose(mask.sum(), 97)
assert_allclose(significance.sum(), 157.316195729298)
assert_allclose(background.sum(), 4200)
| 36.130435 | 97 | 0.677153 | 698 | 5,817 | 5.47851 | 0.302292 | 0.020136 | 0.013598 | 0.021967 | 0.317469 | 0.299425 | 0.275366 | 0.252877 | 0.234048 | 0.195607 | 0 | 0.026203 | 0.238955 | 5,817 | 160 | 98 | 36.35625 | 0.837588 | 0.223655 | 0 | 0.28866 | 0 | 0 | 0.020184 | 0 | 0 | 0 | 0 | 0 | 0.103093 | 1 | 0.061856 | false | 0 | 0.123711 | 0 | 0.195876 | 0.010309 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e508b0cb043508fe01e3e1d06e6baa67a2130ba3 | 4,772 | py | Python | morsesmale.py | scotthellman/discrete-topology | 6182fe607868d88c462c185be8629a35ad2d7c37 | [
"MIT"
] | null | null | null | morsesmale.py | scotthellman/discrete-topology | 6182fe607868d88c462c185be8629a35ad2d7c37 | [
"MIT"
] | null | null | null | morsesmale.py | scotthellman/discrete-topology | 6182fe607868d88c462c185be8629a35ad2d7c37 | [
"MIT"
] | null | null | null | import networkx as nx
import numpy as np
import scipy
import graph
import itertools
from collections import defaultdict
def calculate_persistence(crystal, other, minimum_value, G, function_vals):
minimums = []
min_vertices = []
other = set(other)
for vertex in crystal:
neighbors = set(G.neighbors(vertex)) & other
if len(neighbors) == 0:
continue
value = function_vals[vertex]
worst_case = minimum_value - value
minimum_dist = None
minimum_node = None
for n in neighbors:
diff = minimum_value - function_vals[n]
if minimum_dist is None or diff > worst_case and diff < minimum_dist:
minimum_dist = diff
minimum_node = n
if minimum_dist < worst_case:
minimum_dist = worst_case
minimum_node = vertex
minimums.append(minimum_dist)
min_vertices.append(minimum_node)
try:
chosen_index = np.argmin(minimums)
return minimums[chosen_index], min_vertices[chosen_index]
except ValueError:
return float("inf"), None
def find_filtrations(G, function_vals, msc):
#TODO: throw exception when 2 values are the same
# minkP(X) mines(pa,pk) maxxiekamin − xik.
crystals = defaultdict(list)
for i,label in enumerate(msc):
crystals[label].append(i)
filtration = [crystals]
while len(crystals) > 1:
#find the crystal with the smalled persistence
best_pair = None
best_persistence = None
for crystal in crystals:
minimum_val = function_vals[crystal[0]]
for other in crystals:
if other != crystal:
persistence = calculate_persistence(crystals[crystal], crystals[other],
minimum_val, G, function_vals)[0]
if best_persistence is None or persistence < best_persistence:
best_pair = (crystal, other)
best_persistence = persistence
new_crystals = defaultdict(list)
for crystal,values in crystals.items():
if crystal != best_pair[0]:
new_crystals[crystal].extend(values)
else:
new_crystals[best_pair[1]].extend(values)
filtration.append(new_crystals)
crystals = new_crystals
return filtration
def generate_morse_smale(G, pdist, function_vals):
maxima, minima, ascent, descent = find_extrema(G, pdist, function_vals)
max_labels = assign_extrema(G, maxima, ascent)
min_labels = assign_extrema(G, minima, descent)
return list(zip(min_labels, max_labels))
def assign_extrema(G, extrema, path):
assignments = [0] * len(G.nodes())
for node in G:
traverser = node
while traverser not in extrema:
traverser = path[traverser]
assignments[node] = traverser
return assignments
def find_extrema(G, pdist, function_vals):
ascent = {}
descent = {}
maxima = []
minima = []
for i,value in enumerate(function_vals):
neighbors = np.array(G.neighbors(i))
distances = np.array([d for n,d in enumerate(pdist[i]) if n in neighbors])
differences = np.array([function_vals[n] - value for n in neighbors])
normalized = differences / distances
ordered = np.argsort(normalized)
if np.all(differences < 0):
maxima.append(i)
ascent[i] = i
descent[i] = neighbors[ordered[0]]
elif np.all(differences > 0):
minima.append(i)
ascent[i] = neighbors[ordered[-1]]
descent[i] = i
else:
ascent[i] = neighbors[ordered[-1]]
descent[i] = neighbors[ordered[0]]
return maxima, minima, ascent, descent
def get_filtrations(pdist, function_vals, k=2):
if k is None:
G = graph.generate_gabriel_graph(pdist)
else:
G = graph.generate_knn_graph(pdist, k)
msc = generate_morse_smale(G, pdist, function_vals)
filtrations = find_filtrations(G, function_vals, msc)
return filtrations
if __name__ == "__main__":
import scipy.spatial
values = np.array(range(20)).reshape(20,1)
pairs = scipy.spatial.distance.pdist(values)
pdist = scipy.spatial.distance.squareform(pairs)
G = graph.generate_knn_graph(pdist, 2)
func_vals = values % 5
maxs, mins, ascent, descent = find_extrema(G, pdist, func_vals)
msc = generate_morse_smale(G, pdist, func_vals)
print(msc)
filtration = find_filtrations(G, func_vals, msc)
print("-"*20)
for f in filtration:
print(f)
get_filtrations(pdist, func_vals)
| 34.832117 | 91 | 0.615884 | 567 | 4,772 | 5.017637 | 0.241623 | 0.059051 | 0.029877 | 0.025308 | 0.165905 | 0.134622 | 0.047803 | 0 | 0 | 0 | 0 | 0.007136 | 0.295264 | 4,772 | 137 | 92 | 34.832117 | 0.838537 | 0.028709 | 0 | 0.059322 | 0 | 0 | 0.00259 | 0 | 0 | 0 | 0 | 0.007299 | 0 | 1 | 0.050847 | false | 0 | 0.059322 | 0 | 0.169492 | 0.025424 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50969a938fad964949586d116b12c2990a0ae87 | 2,054 | py | Python | pyxll_jupyter/widget.py | TanKingsley/pyxll-jupyter | 4f7b3eb361079b74683d89340dfff9576fb2ff41 | [
"MIT"
] | 1 | 2020-12-28T10:40:38.000Z | 2020-12-28T10:40:38.000Z | pyxll_jupyter/widget.py | TanKingsley/pyxll-jupyter | 4f7b3eb361079b74683d89340dfff9576fb2ff41 | [
"MIT"
] | null | null | null | pyxll_jupyter/widget.py | TanKingsley/pyxll-jupyter | 4f7b3eb361079b74683d89340dfff9576fb2ff41 | [
"MIT"
] | null | null | null | """
JupyterQtWidget is the widget that gets embedded in Excel and hosts
a tabbed browser widget containing the Jupyter notebook.
"""
from .kernel import start_kernel, launch_jupyter
from .browser import Browser
from .qtimports import QWidget, QVBoxLayout
import subprocess
import ctypes
class JupyterQtWidget(QWidget):
def __init__(self, parent=None, scale=None, initial_path=None):
super().__init__(parent)
# proc gets set to the subprocess when the jupyter is started
self.proc = None
# Get the scale from the window DPI
if scale is None:
LOGPIXELSX = 88
hwnd = self.winId()
if isinstance(hwnd, str):
hwnd = int(hwnd, 16 if hwnd.startswith("0x") else 10)
hwnd = ctypes.c_size_t(hwnd)
screen = ctypes.windll.user32.GetDC(hwnd)
try:
scale = ctypes.windll.gdi32.GetDeviceCaps(screen, LOGPIXELSX) / 96.0
finally:
ctypes.windll.user32.ReleaseDC(hwnd, screen)
# Create the browser widget
self.browser = Browser(self, scale=scale)
self.browser.closed.connect(self.close)
# Add the browser to the widgets layout
layout = QVBoxLayout()
layout.addWidget(self.browser)
self.setLayout(layout)
# Start the kernel and open Jupyter in a new tab
app = start_kernel()
self.proc, url = launch_jupyter(app.connection_file, cwd=initial_path)
self.browser.create_tab(url)
def closeEvent(self, event):
# Kill the Jupyter subprocess using taskkill (just killing the process using POpen.kill
# doesn't terminate any child processes)
if self.proc is not None:
while self.proc.poll() is None:
si = subprocess.STARTUPINFO(wShowWindow=subprocess.SW_HIDE)
subprocess.check_call(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)],
startupinfo=si,
shell=True)
| 36.678571 | 95 | 0.619279 | 246 | 2,054 | 5.089431 | 0.47561 | 0.031949 | 0.028754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011096 | 0.297955 | 2,054 | 55 | 96 | 37.345455 | 0.857143 | 0.221519 | 0 | 0 | 0 | 0 | 0.011356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.142857 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50c65e44676b2b7cbe06fd4c5deb5f102a8415d | 621 | py | Python | py/A Rule Of Divisibility By 13.py | aadithpm/code-a-day | 18d7c1847e14d32d33d09d29f8847b6252c6e9e6 | [
"Unlicense"
] | 3 | 2018-03-16T14:52:40.000Z | 2020-12-04T10:12:07.000Z | py/A Rule Of Divisibility By 13.py | aadithpm/code-a-day | 18d7c1847e14d32d33d09d29f8847b6252c6e9e6 | [
"Unlicense"
] | null | null | null | py/A Rule Of Divisibility By 13.py | aadithpm/code-a-day | 18d7c1847e14d32d33d09d29f8847b6252c6e9e6 | [
"Unlicense"
] | 5 | 2017-06-30T05:35:00.000Z | 2019-07-13T08:05:30.000Z | """
https://www.codewars.com/kata/564057bc348c7200bd0000ff/train/python
"""
def thirt(n):
seq = [1,10,9,12,3,4]
n = list(int(i) for i in reversed(str(n)))
if len(seq) < len(n):
compute1 = [i for i in seq[0:len(n)-len(seq)]]
seq.extend(compute1)
compute1 = sum(i * j for i,j in zip(n,seq))
compute1 = list(int(i) for i in reversed(str(compute1)))
compute2 = sum(i * j for i,j in zip(compute1,seq))
if compute1 == compute2:
return compute2
else:
compute1 = list(int(i) for i in reversed(str(compute2)))
return sum(i * j for i,j in zip(compute1,seq))
| 34.5 | 67 | 0.602254 | 106 | 621 | 3.528302 | 0.339623 | 0.074866 | 0.053476 | 0.074866 | 0.42246 | 0.42246 | 0.42246 | 0.42246 | 0.315508 | 0.139037 | 0 | 0.082278 | 0.236715 | 621 | 17 | 68 | 36.529412 | 0.706751 | 0.107891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50e8e3032d7d7837365ea7b6780cf4d9b0c82b7 | 5,722 | py | Python | entityfactssheetsharvester/entityfactssheetsharvester.py | zazi/entityfactssheetsharvester | 150e702a763d73356adba112c0e1c1141df4884c | [
"Apache-2.0"
] | 1 | 2019-08-13T07:44:32.000Z | 2019-08-13T07:44:32.000Z | entityfactssheetsharvester/entityfactssheetsharvester.py | zazi/entityfactssheetsharvester | 150e702a763d73356adba112c0e1c1141df4884c | [
"Apache-2.0"
] | null | null | null | entityfactssheetsharvester/entityfactssheetsharvester.py | zazi/entityfactssheetsharvester | 150e702a763d73356adba112c0e1c1141df4884c | [
"Apache-2.0"
] | 1 | 2019-08-13T07:44:32.000Z | 2019-08-13T07:44:32.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
import socket
import sys
import requests
from threading import current_thread
from rx import create, of
from rx import operators as op
from rx.scheduler import ThreadPoolScheduler
USER_AGENT_HTTP_HEADER_KEY = 'user-agent'
USER_AGENT_PATTERN = "entityfactssheetsharvester-bot-from-{0}/0.0.1 (https://github.com/slub/entityfactssheetsharvester; zazi@smiy.org) entityfactssheetsharvester/0.0.1"
HOSTNAME = socket.getfqdn()
USER_AGENT = USER_AGENT_PATTERN.format(HOSTNAME)
HTTP_HEADERS = {USER_AGENT_HTTP_HEADER_KEY: USER_AGENT}
ENTITYFACTS_BASE_URI = "http://hub.culturegraph.org/entityfacts/"
UTF8_CHARSET_ID = 'utf-8'
LINEBREAK = "\n"
THREAD_POOL_SCHEDULER = ThreadPoolScheduler(10)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_gnd_identifier(line):
gnd_identifier = line
# remove line break
lastchar = line[-1]
oslinebreak = os.linesep
if lastchar == oslinebreak:
gnd_identifier = line[0:-1]
eprint("GND identifier '{0}' (thread = '{1}')".format(gnd_identifier, current_thread().name))
return gnd_identifier
def entityfacts_request(request_uri, gnd_identifier):
eprint("try to retrieve EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
response = requests.get(request_uri, headers=HTTP_HEADERS, timeout=60)
if response.status_code != 200:
eprint("couldn't fetch EntityFacts sheet for GND identifier '{0}', got a '{1}' (thread = '{2}')".format(
gnd_identifier, response.status_code, current_thread().name))
return None
response_body = response.content.decode(UTF8_CHARSET_ID)
eprint("retrieved EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
return response_body
def retrieve_entityfacts_sheet_obs(gnd_identifier):
return of(gnd_identifier).pipe(op.map(lambda gndid: retrieve_entityfacts_sheet(gnd_identifier)),
op.filter(lambda value: value is not None))
def retrieve_entityfacts_sheet(gnd_identifier):
entityfacts_sheets_uri = ENTITYFACTS_BASE_URI + gnd_identifier
response_tuple = entityfacts_request(entityfacts_sheets_uri, gnd_identifier)
if response_tuple is None:
return None
entityfacts_sheet_tuple = (response_tuple, gnd_identifier)
return entityfacts_sheet_tuple
def format_entityfacts_sheet_obs(entityfacts_sheet_tuple_obs):
return entityfacts_sheet_tuple_obs.pipe(op.map(lambda ef_sheet_tuple: format_entityfacts_sheet(ef_sheet_tuple)))
def format_entityfacts_sheet(entityfacts_sheet_tuple):
gnd_identifier = entityfacts_sheet_tuple[1]
eprint("format EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
entityfacts_sheet_json = json.loads(entityfacts_sheet_tuple[0])
flat_entityfacts_sheet_json = json.dumps(entityfacts_sheet_json, indent=None)
return flat_entityfacts_sheet_json, gnd_identifier
def write_entityfacts_sheet_obs(flat_entityfacts_sheet_json_tuple_obs):
return flat_entityfacts_sheet_json_tuple_obs.pipe(op.map(lambda flat_ef_sheet_json_tuple: write_entityfacts_sheet(
flat_ef_sheet_json_tuple)))
def write_entityfacts_sheet(flat_entityfacts_sheet_json_tuple):
gnd_identifier = flat_entityfacts_sheet_json_tuple[1]
eprint("write EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
sys.stdout.write(flat_entityfacts_sheet_json_tuple[0] + LINEBREAK)
return gnd_identifier
def push_input(observer, scheduler):
for line in sys.stdin:
observer.on_next(line)
return observer.on_completed()
def run():
parser = argparse.ArgumentParser(prog='entityfactssheetsharvester',
description='Retrieves EntityFacts sheets from a given CSV with GND identifiers and returns them as line-delimited JSON records.',
epilog='example: entityfactssheetsharvester < [INPUT CSV FILE WITH GND IDENTIFIERS] > [PATH TO THE OUTPUT LINE-DELIMITED JSON RECORDS FILE]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
if hasattr(args, 'help') and args.help:
parser.print_usage(sys.stderr)
exit(-1)
source = create(push_input)
all_in_one = source.pipe(op.map(lambda line: get_gnd_identifier(line)),
op.map(lambda gnd_identifier: retrieve_entityfacts_sheet_obs(gnd_identifier)),
op.map(lambda ef_sheet_tuple_obs: format_entityfacts_sheet_obs(ef_sheet_tuple_obs)),
op.map(lambda flat_ef_sheet_json_tuple_obs: write_entityfacts_sheet_obs(
flat_ef_sheet_json_tuple_obs)),
op.flat_map(lambda x: x))
all_in_one.subscribe(
on_next=lambda gnd_identifier: eprint(
"PROCESSED GND identifier '{0}': {1}".format(gnd_identifier, current_thread().name)),
on_error=lambda e: eprint(e),
on_completed=lambda: eprint("PROCESS done!"),
scheduler=THREAD_POOL_SCHEDULER)
if __name__ == "__main__":
run()
| 41.463768 | 178 | 0.67791 | 677 | 5,722 | 5.420975 | 0.243722 | 0.120436 | 0.049046 | 0.045777 | 0.320163 | 0.236785 | 0.145504 | 0.118529 | 0.101635 | 0.101635 | 0 | 0.009574 | 0.23331 | 5,722 | 137 | 179 | 41.766423 | 0.826989 | 0.009962 | 0 | 0.082474 | 0 | 0.030928 | 0.164959 | 0.022783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113402 | false | 0 | 0.103093 | 0.030928 | 0.329897 | 0.123711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e51032e8f05343cce31308455d21b22aca3ea53e | 5,086 | py | Python | pyner/util/optimizer.py | chantera/pyner | 6de19713871e923c997495c07e2ec249bded8671 | [
"MIT"
] | 1 | 2019-06-16T00:52:26.000Z | 2019-06-16T00:52:26.000Z | pyner/util/optimizer.py | chantera/pyner | 6de19713871e923c997495c07e2ec249bded8671 | [
"MIT"
] | null | null | null | pyner/util/optimizer.py | chantera/pyner | 6de19713871e923c997495c07e2ec249bded8671 | [
"MIT"
] | null | null | null | from chainer import optimizer_hooks
from chainer import optimizers
from chainer import training
import numpy
import logging
logger = logging.getLogger(__name__)
def create_optimizer(configs):
"""
:param optimizer_config: dict, 学習のパラメータを含む辞書
"""
if 'optimizer' not in configs:
raise Exception('Optimizer configurations are not found')
optimizer_configs = configs['optimizer']
optimizer_ = optimizer_configs['name']
optimizer_ = optimizer_.lower()
if optimizer_ == 'sgd':
optimizer = optimizers.SGD(lr=optimizer_configs['learning_rate'])
elif optimizer_ == 'momentumsgd':
optimizer = optimizers.MomentumSGD(
lr=optimizer_configs['learning_rate'])
elif optimizer_ == 'adadelta':
optimizer = optimizers.AdaDelta()
elif optimizer_ == 'adam':
optimizer = optimizers.Adam(alpha=optimizer_configs['alpha'],
beta1=optimizer_configs['beta1'],
beta2=optimizer_configs['beta2'])
elif optimizer_ == 'adabound':
optimizer = optimizers.Adam(alpha=optimizer_configs['alpha'],
beta1=optimizer_configs['beta1'],
beta2=optimizer_configs['beta2'],
adabound=True,
final_lr=optimizer_configs['final_lr']) # NOQA
else:
raise Exception
return optimizer
def add_hooks(optimizer, configs):
"""
:param optimizer: chainer.Optimizer, chainerのオプティマイザ
:param configs: pyner.util.config.ConfigParser
"""
if 'optimizer' not in configs:
raise Exception('Optimizer configurations are not found')
optimizer_configs = configs['optimizer']
if optimizer_configs.get('weight_decay'):
logger.debug('\x1b[31mSet weight decay\x1b[0m')
optimizer.add_hook(optimizer_hooks.WeightDecay(
optimizer_configs['weight_decay']))
if 'gradient_clipping' in optimizer_configs:
clipping_threshold = optimizer_configs['gradient_clipping']
msg = 'Enable gradient clipping:'
msg += f' threshold \x1b[31m{clipping_threshold}\x1b[0m'
logger.debug(msg)
optimizer.add_hook(
optimizer_hooks.GradientClipping(clipping_threshold)
)
return optimizer
class LearningRateDecay(training.extension.Extension):
"""Exception to decay learning rate as in Ma+
(http://www.aclweb.org/anthology/P16-1101)
Learning rate would be updated to
``rate * / (1 + (1 + iteration)) * decay``
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Exponent of polynomial shift.
max_count (int): Number of this extension to be invoked.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
invoke_before_training = True
def __init__(self, attr, rate, decay, target=None,
optimizer=None):
self._attr = attr
self._rate = rate
self._decay = decay
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._rate)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._rate / (1 + (self._decay * self._t))
if self._target is not None:
if self._rate > 0:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if self._target / value > 1:
value = self._target
else:
# ditto
if self._target / value < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 33.682119 | 83 | 0.621707 | 557 | 5,086 | 5.468582 | 0.278276 | 0.09455 | 0.038411 | 0.023638 | 0.282994 | 0.200263 | 0.200263 | 0.155614 | 0.133946 | 0.133946 | 0 | 0.008845 | 0.288635 | 5,086 | 150 | 84 | 33.906667 | 0.833057 | 0.220016 | 0 | 0.215909 | 0 | 0 | 0.101453 | 0.009081 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.056818 | 0.011364 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e510c7426f5e3c38449cb80f147daf9524ba1a19 | 4,553 | py | Python | 5th_pipeline.py | Jose-Oton/airflow_project | 1b65a83975be63ad15cab95ad2947f6526400368 | [
"Apache-2.0"
] | 1 | 2021-07-08T12:29:34.000Z | 2021-07-08T12:29:34.000Z | 5th_pipeline.py | Jose-Oton/airflow_project | 1b65a83975be63ad15cab95ad2947f6526400368 | [
"Apache-2.0"
] | null | null | null | 5th_pipeline.py | Jose-Oton/airflow_project | 1b65a83975be63ad15cab95ad2947f6526400368 | [
"Apache-2.0"
] | null | null | null | #1. Documentación de un DAG
"""
## PYSPARK DAG
Este pipeline toma data de Covid compartida de forma pública por Google y calcula unos KPIs.
"""
from airflow import DAG
from datetime import timedelta, datetime
from airflow.utils.dates import days_ago
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocCreateClusterOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocDeleteClusterOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocSubmitPySparkJobOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocSubmitJobOperator
from airflow.utils import trigger_rule
# DataprocSubmitPySparkJobOperator(
# task_id="store_stock",
# main="gs://your_bucket/datapipelines/pyspark/pyspark_transformation_joseOton.py",
# cluster_name="spark-cluster-{{ ds_nodash }}",
# dataproc_jars=["gs://spark-lib/bigquery/spark-bigquery-latest.jar"], #JAR para que Spark pueda leer de BigQuery
# region='us-central1',
# gcp_conn_id='google_cloud_default'
# ).generate_job()
#2. Utilizar Variables
PROJECT_ID = Variable.get("project")
STORAGE_BUCKET = Variable.get("storage_bucket")
default_dag_args = {
"start_date": days_ago(1),
"owner": "José Otón"
}
def is_weekend(execution_date=None):
date = datetime.strptime(execution_date, "%Y-%m-%d")
if date.isoweekday() < 6:
return "store_stock"
return "weekend"
# DEFINIMOS DAG
with DAG(
dag_id='5th_exercise',
description='Running a PySpark Job on GCP',
schedule_interval='@daily',
default_args=default_dag_args,
max_active_runs=1,
user_defined_macros={"project": PROJECT_ID},#5. Macros en Airflow
) as dag:
dag.doc_md = __doc__ #Para documentar un DAG
create_dataproc = DataprocCreateClusterOperator(
task_id="create_dataproc",
project_id='{{ project }}',
cluster_name="spark-cluster-{{ ds_nodash }}",
num_workers=2,
storage_bucket=STORAGE_BUCKET,
region="us-central1"
)
create_dataproc.doc_md = """## Crear cluster de Dataproc
Crea un cluster de Dataproc en el proyecto de GCP
"""
# 3. Agregar elementos de lógica para ejecutar uno u otro pipeline
do_analytics = BranchPythonOperator(
task_id="do_analytics",
python_callable=is_weekend,
op_kwargs={"execution_date": "{{ ds }}"}, # 4. Jinja Templating
)
do_analytics.doc_md = """## Evalua que dia de la semana es
Crea un cluster de Dataproc en el proyecto de GCP.
"""
store_stock = DataprocSubmitJobOperator(
task_id="store_stock",
project_id=PROJECT_ID,
location='us-central1',
job={
'reference': {'project_id': '{{ project }}',
'job_id': '{{task.task_id}}_{{ds_nodash}}_2446afcc_joseOton'}, ## si puede haber cambio.
'placement': {'cluster_name': 'spark-cluster-{{ ds_nodash }}'},
'labels': {'airflow-version': 'v2-1-0'},
'pyspark_job': {
'jar_file_uris': ['gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar'],
'main_python_file_uri': 'gs://your_bucket/datapipelines/pyspark/pyspark_transformation_joseOton.py'
}
},
gcp_conn_id='google_cloud_default'
)
store_stock.doc_md = """## Spark Transformation
Ejecuta las transformaciones con Spark.
"""
weekend = BashOperator(
task_id="weekend",
bash_command='echo "\'$TODAY\' is weekend so the pipeline hasnt been executed."',
env={'TODAY': '2021-06-20'},
)
weekend.doc_md = """## Imprime el día de la semana
Se ejecuta en caso sea fin de semana.
"""
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster",
project_id=PROJECT_ID,
cluster_name="spark-cluster-{{ ds_nodash }}",
trigger_rule="all_done",
region='us-central1'
#zone='us-central1-a'
)
delete_cluster.doc_md = """## Borrar Cluster de Dataproc
Elimina el cluster de Dataproc.
"""
# SETEAR LAS DEPEDENDENCIAS DEL DAG
(create_dataproc >>
do_analytics >> [
store_stock,
weekend,
] >> delete_cluster)
| 34.755725 | 123 | 0.656929 | 524 | 4,553 | 5.494275 | 0.385496 | 0.038208 | 0.029524 | 0.036124 | 0.234109 | 0.234109 | 0.172282 | 0.146579 | 0.071553 | 0.027787 | 0 | 0.009461 | 0.233912 | 4,553 | 130 | 124 | 35.023077 | 0.81594 | 0.175928 | 0 | 0.097826 | 0 | 0 | 0.317385 | 0.048679 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01087 | false | 0 | 0.119565 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5137ca9a23bdb8d2e99e6fe8d556b4318b8b2ca | 9,531 | py | Python | components/fatfs/fatfsgen_utils/fs_object.py | iPlon-org/esp-idf | a5227db2a75102ca1a17860188c3c352a529a01b | [
"Apache-2.0"
] | 5 | 2021-11-22T06:47:54.000Z | 2022-01-04T06:58:43.000Z | components/fatfs/fatfsgen_utils/fs_object.py | iPlon-org/esp-idf | a5227db2a75102ca1a17860188c3c352a529a01b | [
"Apache-2.0"
] | null | null | null | components/fatfs/fatfsgen_utils/fs_object.py | iPlon-org/esp-idf | a5227db2a75102ca1a17860188c3c352a529a01b | [
"Apache-2.0"
] | 2 | 2022-01-05T05:09:13.000Z | 2022-02-09T22:32:54.000Z | # SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import os
from typing import List, Optional, Tuple
from .entry import Entry
from .exceptions import FatalError, WriteDirectoryException
from .fat import FAT, Cluster
from .fatfs_state import FATFSState
from .utils import required_clusters_count, split_content_into_sectors, split_to_name_and_extension
class File:
"""
The class File provides API to write into the files. It represents file in the FS.
"""
ATTR_ARCHIVE = 0x20
ENTITY_TYPE = ATTR_ARCHIVE
def __init__(self, name: str, fat: FAT, fatfs_state: FATFSState, entry: Entry, extension: str = '') -> None:
self.name = name
self.extension = extension
self.fatfs_state = fatfs_state
self.fat = fat
self.size = 0
self._first_cluster = None
self._entry = entry
@property
def entry(self) -> Entry:
return self._entry
@property
def first_cluster(self) -> Optional[Cluster]:
return self._first_cluster
@first_cluster.setter
def first_cluster(self, value: Cluster) -> None:
self._first_cluster = value
def name_equals(self, name: str, extension: str) -> bool:
return self.name == name and self.extension == extension
def write(self, content: str) -> None:
self.entry.update_content_size(len(content))
# we assume that the correct amount of clusters is allocated
current_cluster = self._first_cluster
for content_part in split_content_into_sectors(content, self.fatfs_state.sector_size):
content_as_list = content_part.encode()
if current_cluster is None:
raise FatalError('No free space left!')
address = current_cluster.cluster_data_address
self.fatfs_state.binary_image[address: address + len(content_part)] = content_as_list
current_cluster = current_cluster.next_cluster
class Directory:
"""
The Directory class provides API to add files and directories into the directory
and to find the file according to path and write it.
"""
ATTR_DIRECTORY = 0x10
ATTR_ARCHIVE = 0x20
ENTITY_TYPE = ATTR_DIRECTORY
def __init__(self,
name,
fat,
fatfs_state,
entry=None,
cluster=None,
size=None,
extension='',
parent=None):
# type: (str, FAT, FATFSState, Optional[Entry], Cluster, Optional[int], str, Directory) -> None
self.name = name
self.fatfs_state = fatfs_state
self.extension = extension
self.fat = fat
self.size = size or self.fatfs_state.sector_size
# if directory is root its parent is itself
self.parent: Directory = parent or self
self._first_cluster = cluster
# entries will be initialized after the cluster allocation
self.entries: List[Entry] = []
self.entities = [] # type: ignore
self._entry = entry # currently not in use (will use later for e.g. modification time, etc.)
@property
def is_root(self) -> bool:
return self.parent is self
@property
def first_cluster(self) -> Cluster:
return self._first_cluster
@first_cluster.setter
def first_cluster(self, value: Cluster) -> None:
self._first_cluster = value
def name_equals(self, name: str, extension: str) -> bool:
return self.name == name and self.extension == extension
def create_entries(self, cluster: Cluster) -> list:
return [Entry(entry_id=i,
parent_dir_entries_address=cluster.cluster_data_address,
fatfs_state=self.fatfs_state)
for i in range(self.size // self.fatfs_state.entry_size)]
def init_directory(self) -> None:
self.entries = self.create_entries(self._first_cluster)
if not self.is_root:
# the root directory doesn't contain link to itself nor the parent
free_entry1 = self.find_free_entry() or self.chain_directory()
free_entry1.allocate_entry(first_cluster_id=self.first_cluster.id,
entity_name='.',
entity_extension='',
entity_type=self.ENTITY_TYPE)
self.first_cluster = self._first_cluster
free_entry2 = self.find_free_entry() or self.chain_directory()
free_entry2.allocate_entry(first_cluster_id=self.parent.first_cluster.id,
entity_name='..',
entity_extension='',
entity_type=self.parent.ENTITY_TYPE)
self.parent.first_cluster = self.parent.first_cluster
def lookup_entity(self, object_name: str, extension: str): # type: ignore
for entity in self.entities:
if entity.name == object_name and entity.extension == extension:
return entity
return None
def recursive_search(self, path_as_list, current_dir): # type: ignore
name, extension = split_to_name_and_extension(path_as_list[0])
next_obj = current_dir.lookup_entity(name, extension)
if next_obj is None:
raise FileNotFoundError('No such file or directory!')
if len(path_as_list) == 1 and next_obj.name_equals(name, extension):
return next_obj
return self.recursive_search(path_as_list[1:], next_obj)
def find_free_entry(self) -> Optional[Entry]:
for entry in self.entries:
if entry.is_empty:
return entry
return None
def _extend_directory(self) -> None:
current = self.first_cluster
while current.next_cluster is not None:
current = current.next_cluster
new_cluster = self.fat.find_free_cluster()
current.set_in_fat(new_cluster.id)
current.next_cluster = new_cluster
self.entries += self.create_entries(new_cluster)
def chain_directory(self) -> Entry:
self._extend_directory()
free_entry = self.find_free_entry()
if free_entry is None:
raise FatalError('No more space left!')
return free_entry
def allocate_object(self,
name,
entity_type,
path_from_root=None,
extension=''):
# type: (str, int, Optional[List[str]], str) -> Tuple[Cluster, Entry, Directory]
"""
Method finds the target directory in the path
and allocates cluster (both the record in FAT and cluster in the data region)
and entry in the specified directory
"""
free_cluster = self.fat.find_free_cluster()
target_dir = self if not path_from_root else self.recursive_search(path_from_root, self)
free_entry = target_dir.find_free_entry() or target_dir.chain_directory()
free_entry.allocate_entry(first_cluster_id=free_cluster.id,
entity_name=name,
entity_extension=extension,
entity_type=entity_type)
return free_cluster, free_entry, target_dir
def new_file(self, name: str, extension: str, path_from_root: Optional[List[str]]) -> None:
free_cluster, free_entry, target_dir = self.allocate_object(name=name,
extension=extension,
entity_type=Directory.ATTR_ARCHIVE,
path_from_root=path_from_root)
file = File(name, fat=self.fat, extension=extension, fatfs_state=self.fatfs_state, entry=free_entry)
file.first_cluster = free_cluster
target_dir.entities.append(file)
def new_directory(self, name, parent, path_from_root):
# type: (str, Directory, Optional[List[str]]) -> None
free_cluster, free_entry, target_dir = self.allocate_object(name=name,
entity_type=Directory.ATTR_DIRECTORY,
path_from_root=path_from_root)
directory = Directory(name=name, fat=self.fat, parent=parent, fatfs_state=self.fatfs_state, entry=free_entry)
directory.first_cluster = free_cluster
directory.init_directory()
target_dir.entities.append(directory)
def write_to_file(self, path: List[str], content: str) -> None:
"""
Writes to file existing in the directory structure.
:param path: path split into the list
:param content: content as a string to be written into a file
:returns: None
:raises WriteDirectoryException: raised is the target object for writing is a directory
"""
entity_to_write = self.recursive_search(path, self)
if isinstance(entity_to_write, File):
clusters_cnt = required_clusters_count(cluster_size=self.fatfs_state.sector_size, content=content)
self.fat.allocate_chain(entity_to_write.first_cluster, clusters_cnt)
entity_to_write.write(content)
else:
raise WriteDirectoryException(f'`{os.path.join(*path)}` is a directory!')
| 42.172566 | 117 | 0.618193 | 1,129 | 9,531 | 4.977857 | 0.159433 | 0.057651 | 0.034164 | 0.013523 | 0.281139 | 0.21637 | 0.140569 | 0.140569 | 0.127046 | 0.112456 | 0 | 0.003474 | 0.305319 | 9,531 | 225 | 118 | 42.36 | 0.845341 | 0.135663 | 0 | 0.234568 | 0 | 0 | 0.013091 | 0.002841 | 0 | 0 | 0.001482 | 0 | 0 | 1 | 0.135802 | false | 0 | 0.04321 | 0.04321 | 0.314815 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e514935f4c1b7392f48ec35e4356650b174def56 | 6,625 | py | Python | addons14/calendar_base_booking/models/bookable_mixin.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-06-10T14:59:13.000Z | 2021-06-10T14:59:13.000Z | addons14/calendar_base_booking/models/bookable_mixin.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | null | null | null | addons14/calendar_base_booking/models/bookable_mixin.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-04-09T09:44:44.000Z | 2021-04-09T09:44:44.000Z | # Copyright 2020 Akretion (http://www.akretion.com).
# @author Sébastien BEAU <sebastien.beau@akretion.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from odoo import _, fields, models
from odoo.exceptions import UserError
from odoo.osv import expression
# Concept
# open_slot is the range of time where the ressource can be book
# available_slot is the range of time where the ressource is available for booking
# booked_slot is a slot already booked
# bookable_slot is a slot (with a size if slot_duration) that fit into
# an available slot
class BookableMixin(models.AbstractModel):
_name = "bookable.mixin"
_description = "Bookable Mixin"
slot_duration = fields.Float()
slot_capacity = fields.Integer()
def _get_slot_duration(self):
return self.slot_duration
def _get_slot_capacity(self):
return self.slot_capacity
def _get_booked_slot(self, start, stop):
domain = self._get_domain(start, stop)
return self.env["calendar.event"].search(
expression.AND([domain, [("booking_type", "=", "booked")]])
)
def _build_timeline_load(self, start, stop):
timeline = defaultdict(int)
timeline.update({start: 0, stop: 0})
for booked_slot in self._get_booked_slot(start, stop):
if booked_slot.start < start:
timeline[start] += 1
else:
timeline[booked_slot.start] += 1
if booked_slot.stop < stop:
timeline[booked_slot.stop] -= 1
timeline = list(timeline.items())
timeline.sort()
return timeline
def _get_available_slot(self, start, stop):
load_timeline = self._build_timeline_load(start, stop)
load = 0
slots = []
slot = None
capacity = self._get_slot_capacity()
for dt, load_delta in load_timeline:
load += load_delta
if not slot and load < capacity:
slot = [dt, None]
slots.append(slot)
else:
slot[1] = dt
if load >= capacity:
slot = None
return slots
def _prepare_bookable_slot(self, open_slot, start, stop):
# If need you can inject extra information from the open_slot
return {"start": start, "stop": stop}
def _build_bookable_slot(self, open_slot, start, stop):
bookable_slots = []
# now we have to care about datetime vs string
delta = self._get_slot_duration()
while True:
slot_stop = start + relativedelta(minutes=delta)
if slot_stop > stop:
break
bookable_slots.append(
self._prepare_bookable_slot(open_slot, start, slot_stop)
)
start += relativedelta(minutes=delta)
return bookable_slots
def get_open_slot(self, start, stop):
domain = self._get_domain(start, stop)
domain = expression.AND([domain, [("booking_type", "=", "bookable")]])
return self.env["calendar.event"].search(domain, order="start_date")
def get_bookable_slot(self, start, stop):
start = fields.Datetime.to_datetime(start)
stop = fields.Datetime.to_datetime(stop)
slots = []
for open_slot in self.get_open_slot(start, stop):
for slot_start, slot_stop in self._get_available_slot(
max(open_slot.start, start), min(open_slot.stop, stop)
):
slots += self._build_bookable_slot(open_slot, slot_start, slot_stop)
return slots
def _get_domain_for_current_object(self):
return [
("res_model", "=", self._name),
("res_id", "=", self.id),
]
def _get_domain(self, start, stop):
# be carefull we need to search for every slot (bookable and booked)
# that exist in the range start/stop
# This mean that we need the slot
# - started before and finishing in the range
# - started and finished in the range
# - started in the range and fisnish after
# In an other expression it's
# - all slot that start in the range
# - all slot that finish in the range
domain = self._get_domain_for_current_object()
return expression.AND(
[
domain,
[
"|",
"&",
("start", ">=", start),
("start", "<", stop),
"&",
("stop", ">", start),
("stop", "<=", stop),
],
]
)
def _check_load(self, start, stop):
load_timeline = self._build_timeline_load(start, stop)
capacity = self._get_slot_capacity()
load = 0
for _dt, load_delta in load_timeline:
load += load_delta
if load > capacity:
raise UserError(_("The slot is not available anymore"))
def _prepare_booked_slot(self, vals):
vals.update(
{
"res_model_id": self.env["ir.model"]
.search([("model", "=", self._name)])
.id,
"res_id": self.id,
"booking_type": "booked",
"start": fields.Datetime.to_datetime(vals["start"]),
"stop": fields.Datetime.to_datetime(vals["stop"]),
}
)
return vals
def _check_duration(self, start, stop):
duration = (stop - start).total_seconds() / 60.0
if duration != self._get_slot_duration():
raise UserError(_("The slot duration is not valid"))
def _check_on_open_slot(self, start, stop):
domain = self._get_domain_for_current_object()
domain = expression.AND(
[
domain,
[
("start", "<=", start),
("stop", ">=", stop),
],
]
)
open_slot = self.env["calendar.event"].search(domain)
if not open_slot:
raise UserError(_("The slot is not on a bookable zone"))
def book_slot(self, vals):
self.ensure_one()
vals = self._prepare_booked_slot(vals)
self._check_on_open_slot(vals["start"], vals["stop"])
self._check_duration(vals["start"], vals["stop"])
slot = self.env["calendar.event"].create(vals)
self._check_load(vals["start"], vals["stop"])
return slot
| 34.505208 | 84 | 0.570717 | 764 | 6,625 | 4.730366 | 0.205497 | 0.059768 | 0.032374 | 0.02352 | 0.306586 | 0.226065 | 0.144162 | 0.111787 | 0.111787 | 0.079137 | 0 | 0.003801 | 0.324981 | 6,625 | 191 | 85 | 34.685864 | 0.804338 | 0.135849 | 0 | 0.180556 | 0 | 0 | 0.068724 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.034722 | 0.027778 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |