hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d0960ad1b06bca915068cdce59adb7d2b15f5f3 | 4,848 | py | Python | dotfiles/spectrwm/purp/.config/sublime-text-3/Packages/LaTeXTools/biblatex_crossref_completions.py | jturne19/jordans_things | 9d7abc850d009898ec69daf199a78f33795af4a1 | [
"MIT"
] | null | null | null | dotfiles/spectrwm/purp/.config/sublime-text-3/Packages/LaTeXTools/biblatex_crossref_completions.py | jturne19/jordans_things | 9d7abc850d009898ec69daf199a78f33795af4a1 | [
"MIT"
] | null | null | null | dotfiles/spectrwm/purp/.config/sublime-text-3/Packages/LaTeXTools/biblatex_crossref_completions.py | jturne19/jordans_things | 9d7abc850d009898ec69daf199a78f33795af4a1 | [
"MIT"
] | null | null | null | from __future__ import print_function
import sublime
import sublime_plugin
import re
import sys
try:
from latextools_utils import is_bib_buffer, is_biblatex_buffer
except ImportError:
from .latextools_utils import is_bib_buffer, is_biblatex_buffer
if sys.version_info > (3, 0):
strbase = str
else:
strbase = basestring
# Regexes to detect the various types of crossref fields
# Expected field in the format:
# <field> = {<value>,<value>}
# Should support partials approaching this format
#
# I've tried to simply the comprehensibility of the backwards regexes used by
# constructing them here
#
# VALUE_REGEX is a common suffix to hand the `= {<value>,<value>}` part
VALUE_REGEX = r'(?!.*\})\s*(?P<ENTRIES>(?:,[^,]*)+\b)?\s*(?P<OPEN>\{)?(?P<EQUALS>\s*=\s*)?'
CROSSREF_REGEX = re.compile(
VALUE_REGEX + r'crossref'[::-1] + r'\b',
re.IGNORECASE | re.UNICODE
)
BIBLATEX_REGEX = re.compile(
VALUE_REGEX + r'(?:' + r'|'.join((s[::-1] for s in ('xref', 'related'))) + r')' + r'\b',
re.IGNORECASE | re.UNICODE
)
ENTRY_SET_REGEX = re.compile(
VALUE_REGEX + r'entryset'[::-1] + r'\b',
re.IGNORECASE | re.UNICODE
)
XDATA_REGEX = re.compile(
VALUE_REGEX + r'xdata'[::-1] + r'\b',
re.IGNORECASE | re.UNICODE
)
# set indicating entries that have their own special handling...
SPECIAL_ENTRIES = set(['@xdata', '@set'])
def _get_keys_by_type(view, valid_types):
if not valid_types:
return []
if callable(valid_types):
validator = valid_types
elif type(valid_types) == strbase:
def validator(s):
return s == valid_types
else:
def validator(s):
return s in valid_types
keys = []
contents = view.substr(sublime.Region(0, view.size()))
for entry_type, key in re.findall(
r'(@(?!preamble|comment|string)[a-zA-Z]+)\s*\{\s*([^,]+)\b',
contents,
re.IGNORECASE | re.UNICODE
):
if validator(entry_type):
keys.append(key)
return keys
# BibLaTeX supports custom user-defined keys specified in the `id` field
def _get_keys_from_id_field(view):
keys = []
contents = view.substr(sublime.Region(0, view.size()))
# TODO: Should probably figure out how to work out the entry-type
for ids in re.findall(
r'\bids\s*=\s*\{([^}]+)\}',
contents,
re.IGNORECASE | re.UNICODE | re.DOTALL
):
for key in re.findall(
r'\b([^,]+)\b',
ids,
re.IGNORECASE | re.UNICODE
):
keys.append(key)
return keys
def _get_cite_keys_validator(s):
return s not in SPECIAL_ENTRIES
def get_cite_keys(view):
return _get_keys_by_type(view, _get_cite_keys_validator) + \
_get_keys_from_id_field(view)
def get_xdata_keys(view):
return _get_keys_by_type(view, '@xdata')
def get_entryset_keys(view):
return _get_keys_by_type(view, '@set')
def get_text_to_cursor(view):
cursor = view.sel()[0].b
current_region = sublime.Region(0, cursor)
return view.substr(current_region)
# builds the replacement string depending on the current context of the line
def _get_replacement(matcher, key):
if not matcher.group('ENTRIES'):
return u'{0}{1}{2}{3}'.format(
u'' if matcher.group('EQUALS') else u'= ',
u'' if matcher.group('OPEN') else u'{',
key,
u'' if matcher.group('OPEN') else u'}'
)
return '{0}{1}'.format(
u',' if matcher.group('ENTRIES')[0] != u',' else u'',
key
)
def get_completions_if_matches(regex, line, get_key_list_func, view):
matcher = regex.match(line)
if matcher:
return ([(key, _get_replacement(matcher, key))
for key in sorted(set(get_key_list_func(view)))],
sublime.INHIBIT_WORD_COMPLETIONS |
sublime.INHIBIT_EXPLICIT_COMPLETIONS)
else:
return []
class BiblatexCrossrefCompletions(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if not is_bib_buffer(view):
return []
current_line = get_text_to_cursor(view)[::-1]
if current_line.startswith(prefix[::-1]):
current_line = current_line[len(prefix):]
result = get_completions_if_matches(
CROSSREF_REGEX, current_line, get_cite_keys, view)
if result:
return result
if not is_biblatex_buffer(view):
return []
return get_completions_if_matches(
BIBLATEX_REGEX, current_line, get_cite_keys, view) or \
get_completions_if_matches(
XDATA_REGEX, current_line, get_xdata_keys, view) or \
get_completions_if_matches(
ENTRY_SET_REGEX, current_line, get_entryset_keys, view) or \
[] | 29.204819 | 92 | 0.626238 | 643 | 4,848 | 4.497667 | 0.251944 | 0.018672 | 0.033887 | 0.05083 | 0.345436 | 0.239281 | 0.181535 | 0.098548 | 0.06639 | 0.035961 | 0 | 0.005204 | 0.246906 | 4,848 | 166 | 93 | 29.204819 | 0.786908 | 0.124587 | 0 | 0.247934 | 0 | 0.008264 | 0.066415 | 0.036162 | 0 | 0 | 0 | 0.006024 | 0 | 1 | 0.099174 | false | 0 | 0.066116 | 0.049587 | 0.322314 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d09bc63d9c7eb8423e9b0e5da7103e3ef8feb8b | 1,018 | py | Python | problems/TLane_solutions/problem_005.py | tshralper/tabula-rasa_project-euler | 3eb924ae4a38d877098f6b8f8e1118f8ae3514e2 | [
"MIT"
] | null | null | null | problems/TLane_solutions/problem_005.py | tshralper/tabula-rasa_project-euler | 3eb924ae4a38d877098f6b8f8e1118f8ae3514e2 | [
"MIT"
] | null | null | null | problems/TLane_solutions/problem_005.py | tshralper/tabula-rasa_project-euler | 3eb924ae4a38d877098f6b8f8e1118f8ae3514e2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tues Aug 21 11:42:00 2018
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
Problem: What is the smallest positive number that is evenly divisible by all
of the numbers from 1 to 20?
Answer: 232792560
Program completes in less than 10 seconds, but could be sped up by multiplying all factors of numbers in the range.
@author: tlane
"""
#Ask for input of number range
raw = input('Range from 1 to _')
if raw == '': raw = 10
end = int(raw)
#Make a list of numbers in the range
rng = list(range(1,(end + 1)))
largest = rng[len(rng) - 1]
#Check numbers incrementally and find out which one is divisible by all numbers in the range
num = 0 + largest
lcd = None
gnr = list(reversed(rng))
while lcd == None:
goal = 0
for n in gnr:
if num % n != 0: break
goal = goal + 1
if goal == len(gnr):
lcd = num
num = num + largest
print(lcd)
| 24.829268 | 116 | 0.639489 | 169 | 1,018 | 3.846154 | 0.514793 | 0.023077 | 0.032308 | 0.078462 | 0.116923 | 0.058462 | 0 | 0 | 0 | 0 | 0 | 0.059701 | 0.276031 | 1,018 | 40 | 117 | 25.45 | 0.822252 | 0.570727 | 0 | 0 | 0 | 0 | 0.044041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d0cd0cb05f33dc84ee05240b7d4e69a6891ad7c | 3,710 | py | Python | utils/coco_eval.py | jundeli/Scaled-YOLOv4-tensorflow2 | dd2ce523258f9a5b851bd6f391a6c07a4999662e | [
"Apache-2.0"
] | 30 | 2021-01-29T13:57:47.000Z | 2022-02-09T13:17:57.000Z | utils/coco_eval.py | jundeli/Scaled-YOLOv4-tensorflow2 | dd2ce523258f9a5b851bd6f391a6c07a4999662e | [
"Apache-2.0"
] | 13 | 2021-04-16T06:30:27.000Z | 2022-03-16T18:42:23.000Z | utils/coco_eval.py | jundeli/Scaled-YOLOv4-tensorflow2 | dd2ce523258f9a5b851bd6f391a6c07a4999662e | [
"Apache-2.0"
] | 16 | 2021-04-28T06:51:58.000Z | 2022-03-23T23:47:52.000Z |
import numpy as np
from utils import coco_tools
class CocoEvalidation():
def __init__(self, groundtruth_boxes,groundtruth_classes,groundtruth_valids,class_names):
self.groundtruth_boxes = groundtruth_boxes
self.groundtruth_classes = groundtruth_classes
self.groundtruth_valids = groundtruth_valids
self.class_names = class_names
self.groundtruth_dict = self.convert_gt_to_coco(groundtruth_boxes,groundtruth_classes,groundtruth_valids,class_names)
# print(self.groundtruth_dict)
self.groundtruth = coco_tools.COCOWrapper(self.groundtruth_dict)
pass
def convert_gt_to_coco(self, groundtruth_boxes,groundtruth_classes,groundtruth_valids,class_names):
categories=[{'id': id,'name': name} for id, name in enumerate(class_names)]
annotation_id = 1
num_imgs = groundtruth_classes.shape[0]
if num_imgs == 0:
raise ValueError('the number of groundtruth_boxes must be greater than zero.')
coco_groundtruth = []
image_export_list = []
for image_index in range(num_imgs):
num_boxes = groundtruth_valids[image_index]
for box_index in range(num_boxes):
box_wh = groundtruth_boxes[image_index][box_index][2:4]-groundtruth_boxes[image_index][box_index][0:2]
box_area = box_wh[0]*box_wh[1]
export_dict = {
'id':
annotation_id + box_index,
'image_id':
image_index,
'category_id':
int(groundtruth_classes[image_index][box_index]),
'bbox':
list(np.concatenate([groundtruth_boxes[image_index][box_index][0:2],box_wh],axis=-1)),
'area': box_area,
'iscrowd': 0
}
coco_groundtruth.append(export_dict)
image_export_list.append({'id': image_index})
annotation_id += num_boxes
groundtruth_dict = {
'annotations': coco_groundtruth,
'images': image_export_list,
'categories': categories
}
return groundtruth_dict
def convert_detection_to_coco(self, detection_boxes,detection_scores,detection_classes,detection_valids):
num_images = detection_classes.shape[0]
if detection_boxes.shape[0] == 0:
raise ValueError('the number of detection_boxes must be greater than zero.')
coco_groundtruth = []
for img_index in range(num_images):
num_boxes = detection_valids[img_index]
for box_index in range(num_boxes):
export_dict = {
'image_id':
img_index,
'category_id':
int(detection_classes[img_index,box_index]),
'bbox':
list(np.concatenate([detection_boxes[img_index, box_index][0:2], detection_boxes[img_index, box_index][2:4]-detection_boxes[img_index, box_index][0:2]], axis=-1)),
'score':
float(detection_scores[img_index,box_index]),
}
coco_groundtruth.append(export_dict)
return coco_groundtruth
def get_coco_mAP(self,detection_boxes,detection_scores,detection_classes,detection_valids):
detections_list = self.convert_detection_to_coco(detection_boxes, detection_scores, detection_classes, detection_valids)
# print(detections_list)
detections = self.groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(self.groundtruth, detections)
summary_metrics, _ = evaluator.ComputeMetrics()
return summary_metrics
#
| 42.643678 | 185 | 0.643666 | 416 | 3,710 | 5.384615 | 0.197115 | 0.042857 | 0.052232 | 0.035714 | 0.409375 | 0.379911 | 0.327232 | 0.296875 | 0.15 | 0 | 0 | 0.008506 | 0.271159 | 3,710 | 86 | 186 | 43.139535 | 0.819896 | 0.013747 | 0 | 0.197183 | 0 | 0 | 0.058308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0.014085 | 0.028169 | 0 | 0.140845 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d0e8db9852fae9b33e4425c27943f5b3b25e471 | 847 | py | Python | JsonDB/Models.py | Ajay1290/JsonDB | 21213bc2cc826cec8f483eafeab00f9401492a0a | [
"MIT"
] | 1 | 2021-01-03T17:58:54.000Z | 2021-01-03T17:58:54.000Z | JsonDB/Models.py | Ajay1290/JsonDB | 21213bc2cc826cec8f483eafeab00f9401492a0a | [
"MIT"
] | null | null | null | JsonDB/Models.py | Ajay1290/JsonDB | 21213bc2cc826cec8f483eafeab00f9401492a0a | [
"MIT"
] | null | null | null | import inspect
class Map:
map = {}
def __init_subclass__(cls, **kwargs):
Map.map = cls.map
@staticmethod
def harvest_map():
return Map.map
class Model:
nodes = []
def __init_subclass__(cls, **kwargs):
Model.nodes.append(Model.harvest_attr(cls))
@classmethod
def harvest_attr(cls, c):
attributes = inspect.getmembers(c, lambda a:not(inspect.isroutine(a)))
attr = []
for a in attributes:
if not(a[0].startswith('__') and a[0].endswith('__')):
if a[0] != 'nodes':
attr.append(a)
try:
d = { c.__tablename__ : attr }
except Exception:
d = { c.__name__ : attr }
return d
@staticmethod
def harvest_nodes():
return Model.nodes | 22.891892 | 78 | 0.524203 | 93 | 847 | 4.494624 | 0.397849 | 0.043062 | 0.07177 | 0.086124 | 0.114833 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005525 | 0.358914 | 847 | 37 | 79 | 22.891892 | 0.764273 | 0 | 0 | 0.142857 | 0 | 0 | 0.010613 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.035714 | 0.071429 | 0.464286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d14dedaefe2f00be87040a1e016a739fed81d62 | 3,400 | py | Python | vmware_nsxlib/tests/unit/v3/test_trust_management.py | salv-orlando/vmware-nsxlib | 283eff2881b99c57b3908d03fb1c91da7dbdf46e | [
"Apache-2.0"
] | null | null | null | vmware_nsxlib/tests/unit/v3/test_trust_management.py | salv-orlando/vmware-nsxlib | 283eff2881b99c57b3908d03fb1c91da7dbdf46e | [
"Apache-2.0"
] | null | null | null | vmware_nsxlib/tests/unit/v3/test_trust_management.py | salv-orlando/vmware-nsxlib | 283eff2881b99c57b3908d03fb1c91da7dbdf46e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
from vmware_nsxlib.tests.unit.v3 import test_constants as consts
class TestNsxLibTrustManagement(nsxlib_testcase.NsxClientTestCase):
def test_create_cert_list(self):
fake_cert_list = consts.FAKE_CERT_LIST
fake_pem = (fake_cert_list[0]['pem_encoded'] +
fake_cert_list[1]['pem_encoded'])
fake_private_key = 'fake_key'
cert_api = self.nsxlib.trust_management
body = {
'pem_encoded': fake_pem,
'private_key': fake_private_key,
'tags': consts.FAKE_TAGS
}
with mock.patch.object(self.nsxlib.client, 'create') as create:
cert_api.create_cert_list(
cert_pem=fake_pem,
private_key=fake_private_key,
tags=consts.FAKE_TAGS)
create.assert_called_with(
'trust-management/certificates?action=import',
body)
def test_find_cert_with_pem_empty(self):
pem = 'abc'
with mock.patch.object(self.nsxlib.client, 'get',
return_value={'results': []}):
results = self.nsxlib.trust_management.find_cert_with_pem(pem)
self.assertEqual(0, len(results))
def test_find_cert_with_pem_found(self):
pem = consts.FAKE_CERT_PEM
with mock.patch.object(
self.nsxlib.client, 'get',
return_value={'results': consts.FAKE_CERT_LIST}):
results = self.nsxlib.trust_management.find_cert_with_pem(pem)
self.assertEqual(1, len(results))
def test_find_cert_with_pem_rn_found(self):
pem = consts.FAKE_CERT_PEM.replace('\n', '\r\n')
with mock.patch.object(
self.nsxlib.client, 'get',
return_value={'results': consts.FAKE_CERT_LIST}):
results = self.nsxlib.trust_management.find_cert_with_pem(pem)
self.assertEqual(1, len(results))
def test_create_identity_with_cert(self):
fake_pem = consts.FAKE_CERT_PEM
name = "test-identity"
cert_api = self.nsxlib.trust_management
body = {
'name': name,
'certificate_pem': fake_pem,
'node_id': 'test_node_id',
'role': 'enterprise_admin',
'is_protected': True
}
with mock.patch.object(self.nsxlib.client, 'create') as create:
cert_api.create_identity_with_cert(
name=name,
cert_pem=fake_pem,
node_id='test_node_id',
role='enterprise_admin')
create.assert_called_with(
'trust-management/principal-identities/with-certificate',
body)
| 38.636364 | 78 | 0.630294 | 420 | 3,400 | 4.845238 | 0.309524 | 0.04914 | 0.035381 | 0.044226 | 0.543489 | 0.533661 | 0.486486 | 0.390172 | 0.367076 | 0.367076 | 0 | 0.006107 | 0.277647 | 3,400 | 87 | 79 | 39.08046 | 0.822476 | 0.175588 | 0 | 0.365079 | 0 | 0 | 0.113025 | 0.034804 | 0 | 0 | 0 | 0 | 0.079365 | 1 | 0.079365 | false | 0 | 0.063492 | 0 | 0.15873 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d166b2da9d8e5e3174f2b578f39c39f0835046d | 745 | py | Python | app/pods/pods.py | veryWrong/kube | f3716e962c7db0594d230a701fb862059f0c9578 | [
"Apache-2.0"
] | null | null | null | app/pods/pods.py | veryWrong/kube | f3716e962c7db0594d230a701fb862059f0c9578 | [
"Apache-2.0"
] | null | null | null | app/pods/pods.py | veryWrong/kube | f3716e962c7db0594d230a701fb862059f0c9578 | [
"Apache-2.0"
] | null | null | null | from flask import jsonify
from flask_login import login_required
from .podClass import Pod
from . import pod
@pod.route('/', methods=['GET', ])
@login_required
def pod_count():
ret = Pod().all_list()
online, offline = 0, 0
for i in ret.items:
item = i.status.container_statuses[-1]
if item.ready is True and item.state.running is not None:
online += 1
else:
offline += 1
return jsonify({'code': 200, 'msg': '成功获取所有pod', 'data': {
'count': len(ret.items),
'online': online,
'offline': offline,
}})
@pod.route('/exec', methods=['POST', ])
@login_required
def tty():
res = Pod().exec()
print(res)
return jsonify({'code': 200, 'msg': 'ok'})
| 24.032258 | 65 | 0.585235 | 97 | 745 | 4.42268 | 0.525773 | 0.090909 | 0.074592 | 0.09324 | 0.107226 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01982 | 0.255034 | 745 | 30 | 66 | 24.833333 | 0.753153 | 0 | 0 | 0.076923 | 0 | 0 | 0.080537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d17ef8038b1cc6628d9eda55dcc9584a7f4d287 | 1,058 | py | Python | text-preprocessing/language-model-sentence-extraction.py | azagsam/cross-lingual-summarization | 402871dcf7a385cda90914574de24aad7133acf9 | [
"Unlicense"
] | null | null | null | text-preprocessing/language-model-sentence-extraction.py | azagsam/cross-lingual-summarization | 402871dcf7a385cda90914574de24aad7133acf9 | [
"Unlicense"
] | null | null | null | text-preprocessing/language-model-sentence-extraction.py | azagsam/cross-lingual-summarization | 402871dcf7a385cda90914574de24aad7133acf9 | [
"Unlicense"
] | null | null | null | import xml.etree.ElementTree as ET
import os
ns = "{http://www.tei-c.org/ns/1.0}"
n = 0
for file in os.listdir('tei'):
# traverse files
if file.startswith('GF'):
path = os.path.join('tei', file)
# open file
for doc in os.listdir(path):
n += 1
print(n) # should be approx. 38k when finished
full_file = os.path.join(path, doc)
tree = ET.parse(full_file)
# extract all sentences from a file
for sent in tree.iter(ns + 's'):
full_sent = []
for words in sent.iter():
# full_sent += words.text
if words.tag == ns + 'w':
full_sent.append(words.text)
elif words.tag == ns + 'pc':
full_sent.append(words.text)
# write sentence to disk
with open("language-model-tokenized.txt", "a") as myfile:
myfile.write(" ".join(full_sent))
myfile.write('\n') | 34.129032 | 73 | 0.482987 | 132 | 1,058 | 3.818182 | 0.484848 | 0.079365 | 0.043651 | 0.075397 | 0.09127 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009419 | 0.397921 | 1,058 | 31 | 74 | 34.129032 | 0.78179 | 0.13327 | 0 | 0.090909 | 0 | 0 | 0.080132 | 0.030735 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d1d401c75cecb335ed3a64c63f335987af9024a | 2,351 | py | Python | python/examples/session_trees/session_trees.py | pushtechnology/diffusion-examples | 06248aea8c632e935e3c648dc1732c7cb9ac9042 | [
"Apache-2.0"
] | 11 | 2016-01-24T00:33:27.000Z | 2021-08-23T06:21:06.000Z | python/examples/session_trees/session_trees.py | pushtechnology/diffusion-examples | 06248aea8c632e935e3c648dc1732c7cb9ac9042 | [
"Apache-2.0"
] | 5 | 2015-07-21T21:05:56.000Z | 2020-09-02T13:03:01.000Z | python/examples/session_trees/session_trees.py | pushtechnology/diffusion-examples | 06248aea8c632e935e3c648dc1732c7cb9ac9042 | [
"Apache-2.0"
] | 18 | 2016-03-20T19:29:10.000Z | 2022-03-10T16:58:46.000Z | import asyncio
import diffusion.datatypes
from diffusion.features.control.session_trees.branch_mapping_table import (
BranchMappingTable,
)
server_url = "ws://localhost:8080"
principal = "control"
credentials = diffusion.Credentials("password")
path = "foo/bar"
topic_type = diffusion.datatypes.STRING
value = "bla bla"
# Because Python SDK for Diffusion is async, all the code needs to be
# wrapped inside a coroutine function, and executed using asyncio.run.
async def main():
# creating the session
async with diffusion.Session(
url=server_url, principal="control", credentials=credentials
) as session:
# adding a topic, setting its value
try:
table = (
BranchMappingTable.Builder()
.add_branch_mapping("$Principal is 'control'", "target/1")
.add_branch_mapping("all", "target/2")
.create("source/path")
)
await session.session_trees.put_branch_mapping_table(table)
print(f"""\
Branch mapping table created for session tree branch '{table.session_tree_branch}'."""
)
except Exception as ex:
print(f"Failed to create branch mapping table : {ex}.")
return
try:
print("Retrieving session tree branches.")
list_session_tree_branches = (
await session.session_trees.get_session_tree_branches_with_mappings()
)
except Exception as ex:
print(f"Failed to retrieve session tree branches : {ex}.")
return
try:
print("Retrieving branch mapping table:")
for session_tree_branch in list_session_tree_branches:
branch_mapping_table = await session.session_trees.get_branch_mapping_table(
session_tree_branch
)
for branch_mapping in branch_mapping_table.branch_mappings:
print(
f"""\
Session tree branch: '{session_tree_branch}',
Session filter: '{branch_mapping.session_filter}',
Topic tree branch: '{branch_mapping.topic_tree_branch}'"""
)
except Exception as ex:
print(f"Failed to retrieve a branch mapping : {ex}.")
if __name__ == "__main__":
asyncio.run(main())
| 33.112676 | 92 | 0.623139 | 257 | 2,351 | 5.478599 | 0.338521 | 0.129261 | 0.102273 | 0.051136 | 0.171165 | 0.095881 | 0.095881 | 0.095881 | 0.095881 | 0.06108 | 0 | 0.003599 | 0.29094 | 2,351 | 70 | 93 | 33.585714 | 0.841032 | 0.081242 | 0 | 0.148148 | 0 | 0 | 0.258005 | 0.057541 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.018519 | 0.055556 | 0 | 0.092593 | 0.12963 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2008f8ed89f74206d38081357c68a4d36a753c | 1,757 | py | Python | src/chat.py | DanCh11/virtual-assistant | b6601f20bd851864f4a76dd4c73c8c5266a0014f | [
"MIT"
] | null | null | null | src/chat.py | DanCh11/virtual-assistant | b6601f20bd851864f4a76dd4c73c8c5266a0014f | [
"MIT"
] | null | null | null | src/chat.py | DanCh11/virtual-assistant | b6601f20bd851864f4a76dd4c73c8c5266a0014f | [
"MIT"
] | null | null | null | import random
import json
from numpy.lib.utils import source
import torch
import speech_recognition as sr
r = sr.Recognizer()
from .model import NeuralNetwork
from .nltk_utils import bag_of_words, tokenize
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open('./src/data/data.json', 'r') as f:
intents = json.load(f)
FILE = "./src/data/data.pth"
data = torch.load(FILE)
input_size = data['input_size']
hidden_size = data['hidden_size']
output_size = data['output_size']
all_words = data['all_words']
tags = data['tags']
model_state = data['model_state']
model = NeuralNetwork(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = "Daycu"
while True:
with sr.Microphone() as source:
audio = r.listen(source)
voice_data = ''
try:
voice_data = r.recognize_google(audio)
voice_data = tokenize(voice_data)
x = bag_of_words(voice_data, all_words)
x = x.reshape(1, x.shape[0])
x = torch.from_numpy(x)
output = model(x)
_ ,predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents["intents"]:
if tag == intent["tag"]:
print(f"{bot_name}: {random.choice(intent['responses'])}")
else:
print(f"{bot_name}: I do not understand...")
except sr.UnknownValueError:
pass
except sr.RequestError:
print('Sorry, my speech service is down')
| 24.068493 | 82 | 0.599886 | 228 | 1,757 | 4.469298 | 0.403509 | 0.044161 | 0.019627 | 0.037291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006294 | 0.276608 | 1,757 | 72 | 83 | 24.402778 | 0.795437 | 0 | 0 | 0 | 0 | 0 | 0.132194 | 0.020513 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.020833 | 0.145833 | 0 | 0.145833 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d211cab8a1e4dc92883081ebdefc1ba4f0b85a9 | 407 | py | Python | aoj/alds/alds1_11_c.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | aoj/alds/alds1_11_c.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | aoj/alds/alds1_11_c.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | from collections import deque
V = int(input())
edge = [[] for _ in range(V)]
for _ in range(V):
u, _, *v = map(lambda x: int(x)-1, input().split())
edge[u] = v
dist = [-1] * V
dist[0] = 0
que = deque([0])
while len(que):
v = que.popleft()
for c in edge[v]:
if dist[c] == -1:
dist[c] = dist[v] + 1
que.append(c)
for i, d in enumerate(dist):
print(i+1, d)
| 21.421053 | 55 | 0.511057 | 71 | 407 | 2.887324 | 0.422535 | 0.04878 | 0.097561 | 0.107317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 0.292383 | 407 | 18 | 56 | 22.611111 | 0.684028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d21d1ee1ed65ea490436b216844a040ad4eba70 | 1,350 | py | Python | lib/config.py | NHGmaniac/voctorec | 96c088b6775214b9eeff312201a29f82ba0e4bb0 | [
"MIT"
] | 1 | 2019-04-14T12:05:49.000Z | 2019-04-14T12:05:49.000Z | lib/config.py | zo-edv/voctorec | 96c088b6775214b9eeff312201a29f82ba0e4bb0 | [
"MIT"
] | null | null | null | lib/config.py | zo-edv/voctorec | 96c088b6775214b9eeff312201a29f82ba0e4bb0 | [
"MIT"
] | null | null | null | import os.path
import logging
from configparser import ConfigParser
from lib.args import Args
import lib.connection as Connection
__all__ = ['Config']
def getlist(self, section, option):
return [x.strip() for x in self.get(section, option).split(',')]
def fetchServerConfig(self):
log = logging.getLogger('Config')
log.info("reading server-config")
server_config = Connection.fetchServerConfig()
log.info("merging server-config %s", server_config)
self.read_dict(server_config)
ConfigParser.getlist = getlist
ConfigParser.fetchServerConfig = fetchServerConfig
files = [
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../default-config.ini'),
os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../config.ini'),
'/etc/voctomix/voctorec.ini',
os.path.expanduser('~/.config/voctomix/voctorec.ini'),
os.path.expanduser('~/.voctorec.ini'),
]
if Args.ini_file is not None:
files.append(Args.ini_file)
Config = ConfigParser()
readfiles = Config.read(files)
log = logging.getLogger('ConfigParser')
log.debug('considered config-files: \n%s',
"\n".join(["\t\t" + os.path.normpath(file) for file in files]))
log.debug('successfully parsed config-files: \n%s',
"\n".join(["\t\t" + os.path.normpath(file) for file in readfiles]))
| 28.723404 | 77 | 0.687407 | 175 | 1,350 | 5.2 | 0.331429 | 0.072527 | 0.02967 | 0.026374 | 0.27033 | 0.27033 | 0.193407 | 0.193407 | 0.193407 | 0.193407 | 0 | 0 | 0.157037 | 1,350 | 46 | 78 | 29.347826 | 0.799649 | 0 | 0 | 0.058824 | 0 | 0 | 0.188889 | 0.057778 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.147059 | 0.029412 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d25115d04e08b90aa66b7cfb606030c77fa82e9 | 1,674 | py | Python | common_python/tests/util/test_dataframe.py | ScienceStacks/common_python | 2732f928e13592f2089269731c8e2b04f856a77d | [
"MIT"
] | 1 | 2019-05-01T00:22:32.000Z | 2019-05-01T00:22:32.000Z | common_python/tests/util/test_dataframe.py | ScienceStacks/PythonCommon | 2732f928e13592f2089269731c8e2b04f856a77d | [
"MIT"
] | 1 | 2019-05-31T21:59:30.000Z | 2019-05-31T21:59:30.000Z | common_python/tests/util/test_dataframe.py | ScienceStacks/PythonCommon | 2732f928e13592f2089269731c8e2b04f856a77d | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import unittest
import common_python.util.dataframe as dataframe
IGNORE_TEST = False
IS_PLOT = False
COL_A = 'a'
COL_B = 'b'
COL_C = 'c'
DF = pd.DataFrame({COL_A: range(3)})
DF[COL_B] = 10*DF[COL_A]
SIZE = 3
DFS = [DF for _ in range(SIZE)]
DF1 = pd.DataFrame({COL_A: range(SIZE), COL_B: range(SIZE)})
DF2 = pd.DataFrame({COL_A: range(SIZE), COL_C: range(SIZE)})
DF1.index = [10, 20, 30]
DF2.index = [10, 30, 40]
class TestFunctions(unittest.TestCase):
def testisLessEqual(self):
if IGNORE_TEST:
return
df2 = DF.applymap(lambda v: v - 1)
self.assertTrue(dataframe.isLessEqual(df2, DF))
self.assertFalse(dataframe.isLessEqual(DF, df2))
self.assertTrue(dataframe.isLessEqual(DF, DF))
def testMean(self):
if IGNORE_TEST:
return
df_mean = dataframe.mean(DFS)
df_mean = df_mean.applymap(lambda v: int(v))
self.assertTrue(df_mean.equals(DF))
def testStd(self):
if IGNORE_TEST:
return
df_std = dataframe.std(DFS)
df_falses = df_std.applymap(lambda v: not np.isclose(v, 0))
self.assertEqual(df_falses.sum().sum(), 0)
def testIntersection(self):
if IGNORE_TEST:
return
def test(axis, predicate):
df = DF1.copy()
if axis == 1:
items = DF2.columns
else:
items = DF2.index
df = dataframe.subset(df, items, axis=axis)
self.assertTrue(predicate(df))
#
predicate = lambda df: (len(df.columns) == 1) and (len(df) == SIZE)
test(1, predicate)
predicate = lambda df: (len(df.columns) == 2) and (len(df) == SIZE - 1)
test(0, predicate)
if __name__ == '__main__':
unittest.main()
| 24.985075 | 75 | 0.646953 | 249 | 1,674 | 4.212851 | 0.297189 | 0.047664 | 0.045758 | 0.06101 | 0.213537 | 0.152526 | 0.051478 | 0 | 0 | 0 | 0 | 0.026596 | 0.213859 | 1,674 | 66 | 76 | 25.363636 | 0.770517 | 0 | 0 | 0.148148 | 0 | 0 | 0.006575 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.092593 | false | 0 | 0.074074 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2646ddaa7ba21f35f69ed1044171f259cfecec | 1,327 | py | Python | Walker_1.1.py | ey3lock3r/The-Nature-of-Code | cca3a0359a46570b1cf0b02315be8cee1728a01a | [
"MIT"
] | null | null | null | Walker_1.1.py | ey3lock3r/The-Nature-of-Code | cca3a0359a46570b1cf0b02315be8cee1728a01a | [
"MIT"
] | null | null | null | Walker_1.1.py | ey3lock3r/The-Nature-of-Code | cca3a0359a46570b1cf0b02315be8cee1728a01a | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import noise
map = lambda n, start1, stop1, start2, stop2: ((n-start1)/(stop1-start1))*(stop2-start2)+start2
class PVector():
def __init__(self, _x, _y):
self.x = _x
self.y = _y
def add(self, v):
self.x += v.x
self.y += v.y
class Walker():
def __init__(self):
self.fig, self.ax = plt.subplots(figsize=(8, 5), subplot_kw=dict(aspect="equal", adjustable='datalim', anchor='C'))
self.fig.set_dpi(100)
self.w = 320
self.h = 180
self.ax.set_xlim((-self.w,self.w))
self.ax.set_ylim((-self.h,self.h))
self.n = PVector(0, 10000)
def step(self, data):
v_noice = PVector(noise.pnoise1(self.n.x), noise.pnoise1(self.n.y))
location = PVector(map(v_noice.x, 0, 1, 0, self.w), map(v_noice.y, 0, 1, 0, self.h))
self.point.center = (location.x, location.y)
self.n.add(PVector(0.01, 0.01))
return [self.point]
def display(self):
self.point = plt.Circle((None,None), 10, color='red', alpha=1)
self.ax.add_patch(self.point)
ani = animation.FuncAnimation(self.fig, self.step, frames=500, interval=40, blit=True)
plt.show()
agent = Walker()
agent.display() | 31.595238 | 123 | 0.600603 | 204 | 1,327 | 3.808824 | 0.392157 | 0.030888 | 0.034749 | 0.043758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048467 | 0.238131 | 1,327 | 42 | 124 | 31.595238 | 0.720079 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2a58865f6970d3204e6af6aaac5002cdd6877f | 962 | py | Python | make/photon/prepare/commands/gencerts.py | n-marton/harbor | 2859cd8b6981d329d2ef6720b90bbb074d370708 | [
"Apache-2.0"
] | 1 | 2019-06-06T02:39:40.000Z | 2019-06-06T02:39:40.000Z | make/photon/prepare/commands/gencerts.py | koulq/harbor | fdb82ae4fa1d5e8987caa076feb7a61f5baae902 | [
"Apache-2.0"
] | null | null | null | make/photon/prepare/commands/gencerts.py | koulq/harbor | fdb82ae4fa1d5e8987caa076feb7a61f5baae902 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import click
import pathlib
from subprocess import check_call, PIPE, STDOUT
from utils.cert import openssl_installed
from utils.misc import get_realpath
gen_tls_script = pathlib.Path(__file__).parent.parent.joinpath('scripts/gencert.sh').absolute()
@click.command()
@click.option('-p', '--path', default='/etc/harbor/tls/internal')
@click.option('-d', '--days', default='365')
def gencert(path, days):
path = get_realpath(path)
click.echo('Check openssl ...')
if not openssl_installed():
raise(Exception('openssl not installed'))
click.echo("start generate internal tls certs")
if not os.path.exists(path):
click.echo('path {} not exist, create it...'.format(path))
os.makedirs(path, exist_ok=True)
shell_stat = check_call([gen_tls_script, days], stdout=PIPE, stderr=STDOUT, cwd=path)
if shell_stat != 0:
click.echo('Can not generate internal tls certs')
sys.exit(-1)
| 32.066667 | 95 | 0.697505 | 135 | 962 | 4.844444 | 0.474074 | 0.055046 | 0.036697 | 0.073395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006188 | 0.160083 | 962 | 29 | 96 | 33.172414 | 0.803218 | 0 | 0 | 0 | 0 | 0 | 0.205821 | 0.024948 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.291667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2b6f960487325e486555e1a8e76de0fbf8d2ff | 11,572 | py | Python | discord/threads.py | lewistham9x/discord.py | 9abe8eacef8ea318f464184bac47f1c37860b73b | [
"MIT"
] | null | null | null | discord/threads.py | lewistham9x/discord.py | 9abe8eacef8ea318f464184bac47f1c37860b73b | [
"MIT"
] | null | null | null | discord/threads.py | lewistham9x/discord.py | 9abe8eacef8ea318f464184bac47f1c37860b73b | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from .mixins import Hashable
from .abc import Messageable
from .enums import ChannelType, try_enum
from . import utils
__all__ = (
'Thread',
'ThreadMember',
)
if TYPE_CHECKING:
from .types.threads import (
Thread as ThreadPayload,
ThreadMember as ThreadMemberPayload,
ThreadMetadata,
ThreadArchiveDuration,
)
from .guild import Guild
from .channel import TextChannel
from .member import Member
from .message import Message
from .abc import Snowflake
class Thread(Messageable, Hashable):
"""Represents a Discord thread.
.. container:: operations
.. describe:: x == y
Checks if two threads are equal.
.. describe:: x != y
Checks if two threads are not equal.
.. describe:: hash(x)
Returns the thread's hash.
.. describe:: str(x)
Returns the thread's name.
.. versionadded:: 2.0
Attributes
-----------
name: :class:`str`
The thread name.
guild: :class:`Guild`
The guild the thread belongs to.
id: :class:`int`
The thread ID.
parent_id: :class:`int`
The parent :class:`TextChannel` ID this thread belongs to.
owner_id: :class:`int`
The user's ID that created this thread.
last_message_id: Optional[:class:`int`]
The last message ID of the message sent to this thread. It may
*not* point to an existing or valid message.
message_count: :class:`int`
An approximate number of messages in this thread. This caps at 50.
member_count: :class:`int`
An approximate number of members in this thread. This caps at 50.
me: Optional[:class:`ThreadMember`]
A thread member representing yourself, if you've joined the thread.
This could not be available.
archived: :class:`bool`
Whether the thread is archived.
archiver_id: Optional[:class:`int`]
The user's ID that archived this thread.
auto_archive_duration: :class:`int`
The duration in minutes until the thread is automatically archived due to inactivity.
Usually a value of 60, 1440, 4320 and 10080.
archive_timestamp: :class:`datetime.datetime`
An aware timestamp of when the thread's archived status was last updated in UTC.
"""
__slots__ = (
'name',
'id',
'guild',
'_type',
'_state',
'owner_id',
'last_message_id',
'message_count',
'member_count',
'me',
'archived',
'archiver_id',
'auto_archive_duration',
'archive_timestamp',
)
def __init__(self, *, guild: Guild, data: ThreadPayload):
self._state = guild._state
self.guild = guild
self._from_data(data)
async def _get_channel(self):
return self
def _from_data(self, data: ThreadPayload):
self.id = int(data['id'])
self.parent_id = int(data['parent_id'])
self.owner_id = int(data['owner_id'])
self.name = data['name']
self.type = try_enum(ChannelType, data['type'])
self.last_message_id = utils._get_as_snowflake(data, 'last_message_id')
self._unroll_metadata(data['thread_metadata'])
try:
member = data['member']
except KeyError:
self.me = None
else:
self.me = ThreadMember(member, self._state)
def _unroll_metadata(self, data: ThreadMetadata):
self.archived = data['archived']
self.archiver_id = utils._get_as_snowflake(data, 'archiver_id')
self.auto_archive_duration = data['auto_archive_duration']
self.archive_timestamp = utils.parse_time(data['archive_timestamp'])
def _update(self, data):
try:
self.name = data['name']
except KeyError:
pass
try:
self._unroll_metadata(data['thread_metadata'])
except KeyError:
pass
@property
def parent(self) -> Optional[TextChannel]:
"""Optional[:class:`TextChannel`]: The parent channel this thread belongs to."""
return self.guild.get_channel(self.parent_id)
@property
def owner(self) -> Optional[Member]:
"""Optional[:class:`Member`]: The member this thread belongs to."""
return self.guild.get_member(self.owner_id)
@property
def last_message(self) -> Optional[Message]:
"""Fetches the last message from this channel in cache.
The message might not be valid or point to an existing message.
.. admonition:: Reliable Fetching
:class: helpful
For a slightly more reliable method of fetching the
last message, consider using either :meth:`history`
or :meth:`fetch_message` with the :attr:`last_message_id`
attribute.
Returns
---------
Optional[:class:`Message`]
The last message in this channel or ``None`` if not found.
"""
return self._state._get_message(self.last_message_id) if self.last_message_id else None
def is_private(self) -> bool:
""":class:`bool`: Whether the thread is a private thread."""
return self.type is ChannelType.private_thread
async def edit(
self,
*,
name: str = ...,
archived: bool = ...,
auto_archive_duration: ThreadArchiveDuration = ...,
):
"""|coro|
Edits the thread.
To unarchive a thread :attr:`~.Permissions.send_messages` is required. Otherwise,
:attr:`~.Permissions.manage_messages` is required to edit the thread.
Parameters
------------
name: :class:`str`
The new name of the thread.
archived: :class:`bool`
Whether to archive the thread or not.
auto_archive_duration: :class:`int`
The new duration to auto archive threads for inactivity.
Raises
-------
Forbidden
You do not have permissions to edit the thread.
HTTPException
Editing the thread failed.
"""
payload = {}
if name is not ...:
payload['name'] = str(name)
if archived is not ...:
payload['archived'] = archived
if auto_archive_duration is not ...:
payload['auto_archive_duration'] = auto_archive_duration
await self._state.http.edit_channel(self.id, **payload)
async def join(self):
"""|coro|
Joins this thread.
You must have :attr:`~Permissions.send_messages` and :attr:`~Permissions.use_threads`
to join a public thread. If the thread is private then :attr:`~Permissions.send_messages`
and either :attr:`~Permissions.use_private_threads` or :attr:`~Permissions.manage_messages`
is required to join the thread.
Raises
-------
Forbidden
You do not have permissions to join the thread.
HTTPException
Joining the thread failed.
"""
await self._state.http.join_thread(self.id)
async def leave(self):
"""|coro|
Leaves this thread.
Raises
-------
HTTPException
Leaving the thread failed.
"""
await self._state.http.leave_thread(self.id)
async def add_user(self, user: Snowflake):
"""|coro|
Adds a user to this thread.
You must have :attr:`~Permissions.send_messages` and :attr:`~Permissions.use_threads`
to add a user to a public thread. If the thread is private then :attr:`~Permissions.send_messages`
and either :attr:`~Permissions.use_private_threads` or :attr:`~Permissions.manage_messages`
is required to add a user to the thread.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to add to the thread.
Raises
-------
Forbidden
You do not have permissions to add the user to the thread.
HTTPException
Adding the user to the thread failed.
"""
await self._state.http.add_user_to_thread(self.id, user.id)
async def remove_user(self, user: Snowflake):
"""|coro|
Removes a user from this thread.
You must have :attr:`~Permissions.manage_messages` or be the creator of the thread to remove a user.
Parameters
-----------
user: :class:`abc.Snowflake`
The user to add to the thread.
Raises
-------
Forbidden
You do not have permissions to remove the user from the thread.
HTTPException
Removing the user from the thread failed.
"""
await self._state.http.remove_user_from_thread(self.id, user.id)
async def delete(self):
"""|coro|
Deletes this thread.
You must have :attr:`~Permissions.manage_channels` to delete threads.
Raises
-------
Forbidden
You do not have permissions to delete this thread.
HTTPException
Deleting the thread failed.
"""
await self._state.http.delete_channel(self.id)
class ThreadMember(Hashable):
"""Represents a Discord thread member.
.. container:: operations
.. describe:: x == y
Checks if two thread members are equal.
.. describe:: x != y
Checks if two thread members are not equal.
.. describe:: hash(x)
Returns the thread member's hash.
.. describe:: str(x)
Returns the thread member's name.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The thread member's ID.
thread_id: :class:`int`
The thread's ID.
joined_at: :class:`datetime.datetime`
The time the member joined the thread in UTC.
"""
__slots__ = (
'id',
'thread_id',
'joined_at',
'flags',
'_state',
)
def __init__(self, data: ThreadMemberPayload, state):
self._state = state
self._from_data(data)
def _from_data(self, data: ThreadMemberPayload):
self.id = int(data['user_id'])
self.thread_id = int(data['id'])
self.joined_at = utils.parse_time(data['join_timestamp'])
self.flags = data['flags']
| 30.293194 | 108 | 0.616488 | 1,406 | 11,572 | 4.955903 | 0.202703 | 0.046498 | 0.014208 | 0.015499 | 0.339122 | 0.283439 | 0.239524 | 0.179535 | 0.120551 | 0.098881 | 0 | 0.003279 | 0.288541 | 11,572 | 381 | 109 | 30.372703 | 0.843071 | 0.338922 | 0 | 0.184 | 0 | 0 | 0.086697 | 0.01445 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0.016 | 0.096 | 0 | 0.248 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2c0268ef322fad9aa747f4d00280b173987f3e | 3,222 | py | Python | hardware/testbenches/common/drivers/axi4stream/init.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | 6 | 2021-06-28T05:52:15.000Z | 2022-03-27T20:45:28.000Z | hardware/testbenches/common/drivers/axi4stream/init.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | null | null | null | hardware/testbenches/common/drivers/axi4stream/init.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cocotb_bus.drivers import Driver
from cocotb.triggers import RisingEdge
from .common import AXI4StreamTransaction
class AXI4StreamInitiator(Driver):
""" Testbench driver acting as an initiator of an AXI4-Stream interface """
def __init__(self, entity, clock, reset, intf):
""" Initialise the AXI4StreamInitiator instance.
Args:
entity : Pointer to the testbench/DUT
clock : Clock signal for the interface
reset : Reset signal for the interface
intf : Interface
"""
self.entity = entity
self.clock = clock
self.reset = reset
self.intf = intf
self.busy = False
super().__init__()
async def _driver_send(self, transaction, sync=True, **kwargs):
""" Send queued transactions onto the interface.
Args:
transaction: Transaction to send
sync : Align to the rising clock edge before sending
**kwargs : Any other arguments
"""
# Lock
self.busy = True
# Check for the correct transaction type
assert isinstance(transaction, AXI4StreamTransaction), \
"Bad AXI4-Stream transaction object"
# Synchronise to the rising edge
if sync: await RisingEdge(self.clock)
# Wait for reset to clear
while self.reset == 1: await RisingEdge(self.clock)
# Drive the transaction interface
all_bytes = transaction.data[:]
data_width = self.intf.width("tdata")
num_bytes = data_width // 8
for chunk, strobe in transaction.pack(num_bytes):
# Setup compulsory fields
self.intf.tdata <= chunk
self.intf.tvalid <= 1
self.intf.tlast <= 0 if all_bytes else 1
# Setup optional fields
self.intf.set("tstrb" , strobe)
self.intf.set("tkeep" , strobe)
self.intf.set("tid" , transaction.id)
self.intf.set("tdest" , transaction.dest)
self.intf.set("tuser" , transaction.user)
self.intf.set("twakeup", transaction.wakeup)
# Wait for transaction to be accepted
while True:
await RisingEdge(self.clock)
if self.intf.tready == 1: break
# Clear the valid
self.intf.tvalid <= 0
# Release
self.busy = False
async def idle(self):
await RisingEdge(self.clock)
if not self._sendQ and not self.busy: return
while self._sendQ or self.busy: await RisingEdge(self.clock)
| 37.905882 | 79 | 0.625078 | 389 | 3,222 | 5.128535 | 0.437018 | 0.05213 | 0.033083 | 0.06015 | 0.026065 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009267 | 0.29671 | 3,222 | 84 | 80 | 38.357143 | 0.871139 | 0.337678 | 0 | 0.102564 | 0 | 0 | 0.038037 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 1 | 0.025641 | false | 0 | 0.076923 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2ca382df7cba262c4e6017e9421c3abf0eb32c | 8,871 | py | Python | src/bsc/mace.py | bryant1410/arxiv2018-bayesian-ensembles | d97cf64270d34b2301903678e6fbfe170c4c2105 | [
"Apache-2.0"
] | null | null | null | src/bsc/mace.py | bryant1410/arxiv2018-bayesian-ensembles | d97cf64270d34b2301903678e6fbfe170c4c2105 | [
"Apache-2.0"
] | null | null | null | src/bsc/mace.py | bryant1410/arxiv2018-bayesian-ensembles | d97cf64270d34b2301903678e6fbfe170c4c2105 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from scipy.special import logsumexp
from scipy.special.basic import psi
class MACEWorker():
# Worker model: MACE-like spammer model --------------------------------------------------------------------------------
# alpha[0,:] and alpha[1,:] are parameters for the spamming probability
# alpha[2:2+nscores,:] are parameters for the spamming pattern
# similarly for lnPi:
# lnPi[1, :] = ln p(correct answer)
# lnPi[0, :] = ln p(incorrect/spam answer)
# lnPi[2:2+nscores, :] = ln p(label given worker is spamming/incorrect)
def _init_alpha0(alpha0_diags, alpha0_factor, L):
alpha0 = alpha0_factor * np.ones((2 + L))
alpha0[1] = alpha0_diags # diags are bias toward correct answer
alpha0_data = np.copy(alpha0)
alpha0_data[:] = alpha0_factor
alpha0_data[1] = alpha0_diags
return alpha0, alpha0_data
def _init_lnPi(alpha0):
# Returns the initial values for alpha and lnPi
psi_alpha_sum = np.zeros_like(alpha0)
psi_alpha_sum[0, :] = psi(alpha0[0,:] + alpha0[1, :])
psi_alpha_sum[1, :] = psi_alpha_sum[0, :]
psi_alpha_sum[2:, :] = psi(np.sum(alpha0[2:, :], 0))[None, :]
lnPi = psi(alpha0) - psi_alpha_sum
# init to prior
alpha = np.copy(alpha0)
return alpha, lnPi
def _calc_q_pi(alpha):
'''
Update the annotator models.
'''
psi_alpha_sum = np.zeros_like(alpha)
psi_alpha_sum[0, :] = psi(alpha[0,:] + alpha[1, :])
psi_alpha_sum[1, :] = psi_alpha_sum[0, :]
psi_alpha_sum[2:, :] = psi(np.sum(alpha[2:, :], 0))[None, :]
ElnPi = psi(alpha) - psi_alpha_sum
# ElnPi[0, :] = np.log(0.5)
# ElnPi[1, :] = np.log(0.5)
# ElnPi[2:, :] = np.log(1.0 / float(alpha.shape[1] - 2))
return ElnPi
def _post_alpha(E_t, C, alpha0, alpha, doc_start, nscores, before_doc_idx=-1): # Posterior Hyperparameters
'''
Update alpha.
'''
# Reusing some equations from the Java MACE implementation.,,
# strategyMarginal[i,k] = <scalar per worker> p(k knows vs k is spamming for item i | pi, C, E_t)? = ...
# ... = \sum_j{ E_t[i, j] / (pi[0,k]*pi[2+C[i,k],k] + pi[1,k]*[C[i,k]==j] } * pi[0,k] * pi[2+C[i,k],k]
# instanceMarginal = \sum_j p(t_i = j) = term used for normalisation
# spamming = competence = accuracy = pi[1]
# a = annotator
# d = item number
# ai = index of annotator a's annotation for item d
# goldlabelmarginals[d] = p(C, t_i = j) = prior(t_i=j) * \prod_k (pi[0,k] * pi[2+C[i,k],k] + pi[1,k] * [C[i,k]==j])
# [labels[d][ai]] = C[i, :]
# thetas = pi[2:,:] = strategy params
# strategyExpectedCounts[a][labels[d][ai]] = pseudo-count for each spamming action = alpha[2+C[i,k], k] += ...
# ... += strategyMarginal[i,k] / instanceMarginal
# knowingExpectedCounts[a][0]+=strategyMarginal/instanceMarginal ->alpha[0,k]+=strategyMarginal/instanceMarginal
# knowingExpectedCounts[a][1] += (goldLabelMarginals[d][labels[d][ai]] * spamming[a][1] / (spamming[a][0] *
# ...thetas[a][labels[d][ai]] + spamming[a][1])) / instanceMarginal;
# ... -> alpha[1,k] += E_t[i, C[i,k]] * pi[1,k] / (pi[0,k]*pi[2+C[i,k],k] + pi[1,k]) / instanceMarginal
# ... everything is normalised by instanceMarginal because goldlabelMarginals is not normalised and is actually
# a joint probability
# start by determining the probability of not spamming at each data point using current estimates of pi
pknowing = 0
pspamming = 0
Pi = np.zeros_like(alpha)
Pi[0, :] = alpha[0, :] / (alpha[0, :] + alpha[1, :])
Pi[1, :] = alpha[1, :] / (alpha[0, :] + alpha[1, :])
Pi[2:, :] = alpha[2:, :] / np.sum(alpha[2:, :], 0)[None, :]
pspamming_j_unnormed = Pi[0, :][None, :] * Pi[C + 1, np.arange(C.shape[1])[None, :]]
for j in range(E_t.shape[1]):
Tj = E_t[:, j:j+1]
pknowing_j_unnormed = (Pi[1,:][None, :] * (C == j + 1))
pknowing_j = pknowing_j_unnormed / (pknowing_j_unnormed + pspamming_j_unnormed)
pspamming_j = pspamming_j_unnormed / (pknowing_j_unnormed + pspamming_j_unnormed)
# The cases where worker has not given a label are not really spam!
pspamming_j[C==0] = 0
pknowing += pknowing_j * Tj
pspamming += pspamming_j * Tj
correct_count = np.sum(pknowing, 0)
incorrect_count = np.sum(pspamming, 0)
alpha[1, :] = alpha0[1, :] + correct_count
alpha[0, :] = alpha0[0, :] + incorrect_count
for l in range(nscores):
strategy_count_l = np.sum((C == l + 1) * pspamming, 0)
alpha[l+2, :] = alpha0[l+2, :] + strategy_count_l
return alpha
def _post_alpha_data(E_t, C, alpha0, alpha, doc_start, nscores, before_doc_idx=-1): # Posterior Hyperparameters
'''
Update alpha when C is the votes for one annotator, and each column contains a probability of a vote.
'''
# start by determining the probability of not spamming at each data point using current estimates of pi
pknowing = 0
pspamming = 0
Pi = np.zeros_like(alpha)
Pi[0, :] = alpha[0, :] / (alpha[0, :] + alpha[1, :])
Pi[1, :] = alpha[1, :] / (alpha[0, :] + alpha[1, :])
Pi[2:, :] = alpha[2:, :] / np.sum(alpha[2:, :], 0)[None, :]
pspamming_j_unnormed = 0
for j in range(C.shape[1]):
pspamming_j_unnormed += Pi[0, :] * Pi[j, :] * C[:, j:j+1]
for j in range(E_t.shape[1]):
Tj = E_t[:, j:j+1]
pknowing_j_unnormed = (Pi[1,:][None, :] * (C[:, j:j+1]))
pknowing_j = pknowing_j_unnormed / (pknowing_j_unnormed + pspamming_j_unnormed)
pspamming_j = pspamming_j_unnormed / (pknowing_j_unnormed + pspamming_j_unnormed)
pknowing += pknowing_j * Tj
pspamming += pspamming_j * Tj
correct_count = np.sum(pknowing, 0)
incorrect_count = np.sum(pspamming, 0)
alpha[1, :] = alpha0[1, :] + correct_count
alpha[0, :] = alpha0[0, :] + incorrect_count
for l in range(nscores):
strategy_count_l = np.sum((C[:, l:l+1]) * pspamming, 0)
alpha[l+2, :] = alpha0[l+2, :] + strategy_count_l
return alpha
def _read_lnPi(lnPi, l, C, Cprev, Krange, nscores):
ll_incorrect = lnPi[0, Krange] + lnPi[C+2, Krange]
if np.isscalar(C):
N = 1
if C == -1:
ll_incorrect = 0
else:
N = C.shape[0]
ll_incorrect[C == -1] = 0
if np.isscalar(Krange):
K = 1
else:
K = Krange.shape[-1]
if l is None:
ll_correct = np.zeros((nscores, N, K))
for m in range(nscores):
if np.isscalar(C) and C == m:
ll_correct[m] = lnPi[1, Krange]
elif np.isscalar(C) and C != m:
ll_correct[m] = - np.inf
else:
idx = (C == m).astype(int)
ll_correct[m] = lnPi[1, Krange] * idx
ll_correct[m, idx==0] = -np.inf
ll_incorrect = np.tile(ll_incorrect, (nscores, 1, 1))
else:
if np.isscalar(C) and C == l:
ll_correct = lnPi[1, Krange]
elif np.isscalar(C) and C != l:
ll_correct = - np.inf
else:
idx = (C == l).astype(int)
ll_correct = lnPi[1, Krange] * idx
ll_correct[idx == 0] = - np.inf
return logsumexp([ll_correct, ll_incorrect], axis=0)
def _expand_alpha0(alpha0, alpha0_data, K, nscores, uniform_priors):
'''
Take the alpha0 for one worker and expand.
:return:
'''
L = alpha0.shape[0]
# set priors
if alpha0 is None:
# dims: true_label[t], current_annoc[t], previous_anno c[t-1], annotator k
alpha0 = np.ones((nscores + 2, K))
alpha0[1, :] += 1.0
else:
alpha0 = alpha0[:, None]
alpha0 = np.tile(alpha0, (1, K))
alpha0[:, uniform_priors] = alpha0[0, uniform_priors]
if alpha0_data is None:
alpha0_data = np.ones((nscores + 2, 1))
alpha0_data[1, :] += 1.0
elif alpha0_data.ndim == 1:
alpha0_data = alpha0_data[:, None]
return alpha0, alpha0_data
def _calc_EPi(alpha):
pi = np.zeros_like(alpha)
pi[0] = alpha[0] / (alpha[0] + alpha[1])
pi[1] = alpha[1] / (alpha[0] + alpha[1])
pi[2:] = alpha[2:] / np.sum(alpha[2:], axis=0)[None, :]
return pi | 36.356557 | 125 | 0.535791 | 1,210 | 8,871 | 3.790083 | 0.147934 | 0.022242 | 0.028783 | 0.018317 | 0.469472 | 0.424335 | 0.386611 | 0.386611 | 0.37898 | 0.357828 | 0 | 0.036464 | 0.307519 | 8,871 | 244 | 126 | 36.356557 | 0.710077 | 0.284748 | 0 | 0.366412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061069 | false | 0 | 0.022901 | 0 | 0.152672 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d2dd29aaf64d74ebb6ddf68f9ba97a38795078e | 2,069 | py | Python | worker intelligence/segment_path.py | hotpoor/XdHacks_201910_1920_automove | 47ecfd3470d1586b07dc1c44422eb5253f3a6659 | [
"MIT"
] | null | null | null | worker intelligence/segment_path.py | hotpoor/XdHacks_201910_1920_automove | 47ecfd3470d1586b07dc1c44422eb5253f3a6659 | [
"MIT"
] | null | null | null | worker intelligence/segment_path.py | hotpoor/XdHacks_201910_1920_automove | 47ecfd3470d1586b07dc1c44422eb5253f3a6659 | [
"MIT"
] | 2 | 2019-11-13T06:11:25.000Z | 2020-03-13T06:19:00.000Z |
import numpy as np
import argparse
import imutils
import time
import cv2
from mss import mss
COLORS = open("RoadSeg/seg-colors.txt").read().strip().split("\n")
COLORS = [np.array(c.split(",")).astype("int") for c in COLORS]
COLORS = np.array(COLORS, dtype="uint8")
net = cv2.dnn.readNet("RoadSeg/seg-model.net")
#读取配置文件
screen_config = []
f = open("screen_config.txt", "r")
for line in f:
screen_config.append(line)
f.close()
while True:
#截屏
sct = mss()
monitor = {'left': int(screen_config[0]), 'top': int(screen_config[1]), 'width': int(screen_config[2]), 'height': int(screen_config[3])}
imgRaw = sct.grab(monitor)
img = np.array(imgRaw)
image2 = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
blob = cv2.dnn.blobFromImage(image2, 1 / 255.0, (256, 256), 0,
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
(numClasses, height, width) = output.shape[1:4]
classMap = np.argmax(output[0], axis=0)
mask = COLORS[classMap]
mask = cv2.resize(mask, (image2.shape[1], image2.shape[0]),
interpolation=cv2.INTER_NEAREST)
#---------------------------------------
gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (15, 15),15)
ret, binary = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY_INV)
zoneSegNum = 10
zoneHeight = int(int(screen_config[3])/10)
zoneBin = []
for i in range(10):
temp = binary[:]
temp[i:, :] = 255
temp[i:, :] = 255
cv2.imshow("bin3", temp)
key = cv2.waitKey(1) & 0xFF
bin3 = binary[:]
bin3[100:300, :] = 255
bin3[350:, :] = 255
cv2.imshow("bin3", bin3)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # for opencv4.1
if (len(contours) > 1 ):
mom = cv2.moments(contours[1])
pt = (int(mom['m10'] / mom['m00']), int(mom['m01'] / mom['m00']))
cv2.circle(image2, pt, 2, (0, 255, 255), 2)
cv2.imshow("binaray", binary)
# ---------------------------------------
#渲染
output = ((image2) + (0.5 * mask)).astype("uint8")
cv2.imshow("Frame", output)
key = cv2.waitKey(1) & 0xFF | 24.341176 | 137 | 0.632189 | 304 | 2,069 | 4.25 | 0.424342 | 0.074303 | 0.05805 | 0.024768 | 0.027864 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071914 | 0.146448 | 2,069 | 85 | 138 | 24.341176 | 0.659683 | 0.049783 | 0 | 0.070175 | 0 | 0 | 0.064796 | 0.021939 | 0 | 0 | 0.004082 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d30310fe782f189939b1f87e316277289556088 | 6,229 | py | Python | custom_components/sleepiq_custom/switch.py | brianlich/sleepiq-custom | 2610f945c9037d7f63213190e3f6aebcf91f172f | [
"MIT"
] | null | null | null | custom_components/sleepiq_custom/switch.py | brianlich/sleepiq-custom | 2610f945c9037d7f63213190e3f6aebcf91f172f | [
"MIT"
] | null | null | null | custom_components/sleepiq_custom/switch.py | brianlich/sleepiq-custom | 2610f945c9037d7f63213190e3f6aebcf91f172f | [
"MIT"
] | null | null | null | import logging
from homeassistant import config_entries
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.components.switch import SwitchEntity, DEVICE_CLASS_SWITCH
from . import SleepIQDataUpdateCoordinator, SleepIQDevice
from .const import ATTRIBUTION_TEXT, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass, config_entry: config_entries.ConfigEntry, async_add_entities
):
"""Set up a bed from a config entry."""
coordinator: SleepIQDataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
switches = []
# if coordinator.data.light1 is not None:
switches.append(ResponsiveAirSwitch(coordinator, "left"))
switches.append(ResponsiveAirSwitch(coordinator, "right"))
switches.append(PrivacyModeSwitch(coordinator))
async_add_entities(switches)
class PrivacyModeSwitch(SleepIQDevice, SwitchEntity):
"""Representation of a SleepIQ responsive air switch."""
def __init__(self, coordinator: SleepIQDataUpdateCoordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self._coordinator = coordinator
self._unique_id = DOMAIN + "_" + self._coordinator.data.bedId + "_privacy_mode"
self._name = "Sleep Number privacy mode"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {
"accountId": self._coordinator.data.privacy_mode.accountId,
"bedId": self._coordinator.data.privacy_mode.bedId,
"pauseMode": self._coordinator.data.privacy_mode.pauseMode,
ATTR_ATTRIBUTION: ATTRIBUTION_TEXT,
}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_SWITCH
async def async_turn_on(self):
"""Send the on command."""
_LOGGER.debug("Turning on privacy mode")
self._coordinator.data.privacy_mode.pauseMode = "on"
await self._coordinator.sleepiq.turn_on_privacy_mode()
async def async_turn_off(self, **kwargs):
"""Send the off command."""
_LOGGER.debug("Turning off privacy mode")
self._coordinator.data.privacy_mode.pauseMode = "off"
await self._coordinator.sleepiq.turn_off_privacy_mode()
@property
def is_on(self):
"""Get whether the switch is in on state."""
if self._coordinator.data.privacy_mode.pauseMode == "off":
return False
elif self._coordinator.data.privacy_mode.pauseMode == "on":
return True
class ResponsiveAirSwitch(SleepIQDevice, SwitchEntity):
"""Representation of a SleepIQ responsive air switch."""
def __init__(self, coordinator: SleepIQDataUpdateCoordinator, side):
"""Initialize the sensor."""
super().__init__(coordinator)
self._coordinator = coordinator
self._side = side
self._unique_id = (
DOMAIN
+ "_"
+ self._coordinator.data.bedId
+ "_"
+ self._side
+ "responsive_air"
)
if self._side.lower() == "left":
self._name = (
self._coordinator.data.left_side.sleeper.firstName + " responsive air"
)
elif self._side.lower() == "right":
self._name = (
self._coordinator.data.right_side.sleeper.firstName + " responsive air"
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {
"adjustmentThreshold": self._coordinator.data.responsive_air.adjustmentThreshold,
"inBedTimeout": self._coordinator.data.responsive_air.inBedTimeout,
"leftSideEnabled": self._coordinator.data.responsive_air.leftSideEnabled,
"outOfBedTimeout": self._coordinator.data.responsive_air.outOfBedTimeout,
"pollFrequency": self._coordinator.data.responsive_air.pollFrequency,
"prefSyncState": self._coordinator.data.responsive_air.prefSyncState,
"rightSideEnabled": self._coordinator.data.responsive_air.rightSideEnabled,
ATTR_ATTRIBUTION: ATTRIBUTION_TEXT,
}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_SWITCH
async def async_turn_on(self, **kwargs):
"""Send the on command."""
_LOGGER.debug("Turning on %s", self._name)
if self._side.lower() == "left":
self._coordinator.data.responsive_air.leftSideEnabled = True
elif self._side.lower() == "right":
self._coordinator.data.responsive_air.leftSideEnabled = True
await self._coordinator.sleepiq.turn_on_responsive_air(self._side)
async def async_turn_off(self, **kwargs):
"""Send the off command."""
_LOGGER.debug("Turning off %s", self._name)
if self._side.lower() == "left":
self._coordinator.data.responsive_air.leftSideEnabled = False
elif self._side.lower() == "right":
self._coordinator.data.responsive_air.leftSideEnabled = False
await self._coordinator.sleepiq.turn_off_responsive_air(self._side)
# await self.tesla_device.stop_charge()
# self.async_write_ha_state()
@property
def is_on(self):
"""Get whether the switch is in on state."""
if self._side.lower() == "left":
return self._coordinator.data.responsive_air.leftSideEnabled
elif self._side.lower() == "right":
return self._coordinator.data.responsive_air.rightSideEnabled
else:
return None
# if self.tesla_device.is_charging() is None:
# return None
# return self.tesla_device.is_charging() | 36.215116 | 93 | 0.655964 | 662 | 6,229 | 5.912387 | 0.15861 | 0.122637 | 0.116505 | 0.096321 | 0.699796 | 0.593255 | 0.484926 | 0.458866 | 0.394481 | 0.394481 | 0 | 0.000212 | 0.241451 | 6,229 | 172 | 94 | 36.215116 | 0.828148 | 0.10997 | 0 | 0.444444 | 0 | 0 | 0.063862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.051282 | 0 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d304089ceaf983f4579b7204f7deafcca5ebf04 | 9,711 | py | Python | cdk/cdk/cdk_stack.py | yyolk/issue.cash | ba931032df833cf81065b6bdc33a7baf425c5a0d | [
"0BSD"
] | null | null | null | cdk/cdk/cdk_stack.py | yyolk/issue.cash | ba931032df833cf81065b6bdc33a7baf425c5a0d | [
"0BSD"
] | null | null | null | cdk/cdk/cdk_stack.py | yyolk/issue.cash | ba931032df833cf81065b6bdc33a7baf425c5a0d | [
"0BSD"
] | null | null | null | from aws_cdk import core as cdk
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import aws_lambda as lambda_
from aws_cdk import aws_stepfunctions as sfn
from aws_cdk import aws_stepfunctions_tasks as tasks
from aws_cdk import aws_dynamodb as dynamodb
from aws_cdk import aws_sam as sam
from aws_cdk import aws_iam as iam
bundle_python_function_with_requirements = cdk.BundlingOptions(
image=lambda_.Runtime.PYTHON_3_9.bundling_docker_image,
command=[
"/bin/bash",
"-c",
(
"python -m venv .venv &&"
".venv/bin/python -m pip install -r /asset-input/requirements.txt &&"
"cp -r .venv/lib/python3.9/site-packages/* /asset-output/"
"; cp /asset-input/*.py /asset-output/"
),
],
user="root",
)
class CdkStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# issuers_table = dynamodb.Table(
# self,
# "IssuersTable",
# billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
# )
issuers_table_key_schema = dynamodb.CfnTable.KeySchemaProperty(
attribute_name="issuer_currency", key_type="HASH"
)
issuers_table = dynamodb.CfnTable(
self,
"IssuersTable",
table_name="IssuersTable",
key_schema=[issuers_table_key_schema],
attribute_definitions=[
dynamodb.CfnTable.AttributeDefinitionProperty(
attribute_name="issuer_currency", attribute_type="S"
),
],
billing_mode="PAY_PER_REQUEST",
)
# issuers_table = sam.CfnSimpleTable(
# self,
# "IssuersTable",
# table_name="IssuersTable",
# key_schema=[issuers_table_key_schema],
# attribute_definitions=[
# dynamodb.CfnTable.AttributeDefinitionProperty(
# attribute_name="issuer_currency", attribute_type="S"
# ),
# ],
# billing_mode="PAY_PER_REQUEST",
# )
generate_issuers_function = lambda_.Function(
self,
"GenerateIssuersFunction",
code=lambda_.Code.from_asset(
"functions/generate_issuers/",
bundling=bundle_python_function_with_requirements,
),
runtime=lambda_.Runtime.PYTHON_3_9,
handler="function.handler",
timeout=cdk.Duration.seconds(60),
environment={
"ISSUERS_TABLE_NAME": issuers_table.table_name,
},
)
persist_issuers_function = lambda_.Function(
self,
"PersistIssuersFunction",
code=lambda_.Code.from_asset(
"functions/persist_issuers/",
bundling=bundle_python_function_with_requirements,
),
runtime=lambda_.Runtime.PYTHON_3_9,
handler="function.handler",
timeout=cdk.Duration.seconds(19),
environment={
"ISSUERS_TABLE_NAME": issuers_table.table_name,
},
)
# add table r/w permissions to our issuer generator
issuers_table_dynamodb_crud_statement = iam.PolicyStatement(
actions=[
"dynamodb:BatchGetItem",
"dynamodb:GetItem",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:BatchWriteItem",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:aws:dynamodb:*:*:table/{issuers_table.table_name}",
],
)
generate_issuers_function.add_to_role_policy(
issuers_table_dynamodb_crud_statement
)
persist_issuers_function.add_to_role_policy(
issuers_table_dynamodb_crud_statement
)
generate_faucet_wallet_function = lambda_.Function(
self,
"GenerateFaucetWalletFunction",
code=lambda_.Code.from_asset(
"functions/faucet_wallet/",
bundling=bundle_python_function_with_requirements,
),
runtime=lambda_.Runtime.PYTHON_3_9,
handler="function.handler",
timeout=cdk.Duration.seconds(65),
memory_size=256,
)
grab_order_book_function = lambda_.Function(
self,
"GrabOrderBookFunction",
code=lambda_.Code.from_asset(
"functions/grab_order_book/",
bundling=bundle_python_function_with_requirements,
),
runtime=lambda_.Runtime.PYTHON_3_9,
handler="function.handler",
timeout=cdk.Duration.seconds(18),
)
generate_orders_function = lambda_.Function(
self,
"GenerateOrdersFunction",
code=lambda_.Code.from_asset(
"functions/generate_orders_function/",
bundling=bundle_python_function_with_requirements,
),
runtime=lambda_.Runtime.PYTHON_3_9,
handler="function.handler",
timeout=cdk.Duration.seconds(900),
memory_size=512,
)
state_machine = sfn.StateMachine(
self,
"CreateMarketClone",
# .next(
# tasks.LambdaInvoke(
# self,
# "GenerateFaucetWalletTask",
# lambda_function=generate_faucet_wallet_function,
# )
# )
definition=tasks.LambdaInvoke(
self,
"GenerateFaucetWalletTask",
lambda_function=generate_faucet_wallet_function,
).next(
tasks.LambdaInvoke(
self,
"GenerateIssuers",
input_path="$.Payload",
lambda_function=generate_issuers_function,
# what are we picking from the output?
result_selector={"issuers.$": "$.Payload.issuers"},
)
)
# .next(
# tasks.LambdaInvoke(
# self,
# "GenerateFaucetWalletTask",
# lambda_function=generate_faucet_wallet_function,
# )
# )
.next(
tasks.LambdaInvoke(
self,
"GrabOrderBookTask",
lambda_function=grab_order_book_function,
# what are we picking from output?
result_selector={
"work.$": "$.Payload.distinct_accounts",
},
# where do we put the output in the state?
result_path="$.orders",
)
)
.next(
sfn.Map(
self,
"GenerateOrderWallets",
# not relevant with output_path changed above
items_path="$.orders.work",
# parameters={
# "issuers.$": "$.issuers",
# "work.$": "$.orders.work",
# },
#
#
# concurrency
#
# works pretty good with the faucet endpoint, this is also
# the expected max txns the faucet can put in a single
# ledger
max_concurrency=3,
# max_concurrency=4,
# max_concurrency=5,
# max_concurrency=10,
# CRAZZZY
# max_concurrency=30,
#
#
# results
#
# does this work?
result_path=sfn.JsonPath.DISCARD,
).iterator(
tasks.LambdaInvoke(
self,
"GenerateOrderWalletFromFaucet",
lambda_function=generate_faucet_wallet_function,
# parameters=
# pick from the output
result_selector={
"seed.$": "$.Payload.seed",
"account.$": "$.Payload.account",
},
# place the output in the state
result_path="$.wallet",
)
.next(
tasks.LambdaInvoke(
self,
"GenerateOrdersFromState",
lambda_function=generate_orders_function,
)
)
.next(sfn.Succeed(self, "DistinctOrdersCreated"))
)
)
.next(
tasks.LambdaInvoke(
self,
"PersistIssuers",
lambda_function=persist_issuers_function,
)
)
.next(sfn.Succeed(self, "CreatedMarket")),
)
| 36.923954 | 82 | 0.500463 | 780 | 9,711 | 5.951282 | 0.294872 | 0.036191 | 0.036191 | 0.024128 | 0.496984 | 0.417062 | 0.361482 | 0.331754 | 0.311073 | 0.311073 | 0 | 0.006719 | 0.417568 | 9,711 | 262 | 83 | 37.064886 | 0.814003 | 0.169293 | 0 | 0.342246 | 0 | 0.010695 | 0.144089 | 0.067358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005348 | false | 0 | 0.037433 | 0 | 0.048128 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d31952ea492f717453060dc4a8247a16873ad5a | 2,356 | py | Python | v2/liveread.py | jelson/aqi | 96e3d9646130a8128aba9c190dcb85d7a7efba50 | [
"MIT"
] | 7 | 2021-08-25T08:00:22.000Z | 2022-01-10T19:04:08.000Z | v2/liveread.py | jelson/aqi | 96e3d9646130a8128aba9c190dcb85d7a7efba50 | [
"MIT"
] | null | null | null | v2/liveread.py | jelson/aqi | 96e3d9646130a8128aba9c190dcb85d7a7efba50 | [
"MIT"
] | 1 | 2021-11-03T04:20:05.000Z | 2021-11-03T04:20:05.000Z | #!/usr/bin/env python3
# read PMS3001 data from the serial port. timestamp each line when it
# arrives. batch into 30-record chunks and insert all records into the
# database. also write json-formatted records to stdout.
import aqi
import argparse
import datetime
import json
import psycopg2
import psycopg2.extras
import sys
MAX_CACHE_SIZE = 30
logfile = sys.stdout
def say(s):
if logfile:
logfile.write(s)
logfile.write("\n")
def insert_batch(db, data):
sys.stderr.write(f"inserting {len(data)} records\n")
insert_query = 'insert into particulate (time, pm10, pm25, pm100, aqi) values %s'
cursor = db.cursor()
psycopg2.extras.execute_values(
cursor,
insert_query,
data,
template=None,
)
db.commit()
def line_arrived(cache, db, t, line):
data = json.loads(line)
printable_data = data.copy()
printable_data['time'] = t.timestamp()
printable_data['ftime'] = t.strftime("%Y-%m-%d %H:%M:%S.%f")
say(json.dumps(printable_data))
sys.stdout.flush()
data['time'] = t
data['aqi'] = int(aqi.to_iaqi(
aqi.POLLUTANT_PM25,
data['pm2.5'],
algo=aqi.ALGO_EPA))
db_record = [
data['time'],
data['pm1.0'],
data['pm2.5'],
data['pm10.0'],
data['aqi'],
]
cache.append(db_record)
if len(cache) >= MAX_CACHE_SIZE:
insert_batch(db, cache)
cache.clear()
def read_forever(db, f):
cache = []
while True:
line = f.readline()
if not line:
say("Got EOF! Terminating")
return
line = line.rstrip()
if line:
line_arrived(cache, db, datetime.datetime.now(), line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--port",
help="Port to read from",
action='store',
required='true',
)
parser.add_argument(
"-l", "--log",
help='Filename to log to',
action='store'
)
args = parser.parse_args()
say(f"Starting; args: {args}")
if args.log:
global logfile
logfile = open(args.log, "a")
infile = open(args.port, "r")
say("Opened file")
db = psycopg2.connect(database="airquality")
read_forever(db, infile)
say("Read failed!")
main()
| 22.873786 | 85 | 0.58489 | 303 | 2,356 | 4.465347 | 0.419142 | 0.038433 | 0.017738 | 0.026608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018192 | 0.27674 | 2,356 | 102 | 86 | 23.098039 | 0.775822 | 0.090407 | 0 | 0.049383 | 0 | 0 | 0.141187 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0 | 0.08642 | 0 | 0.160494 | 0.049383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d32f4ff6ee71312c513d4994af8ed77bd5fe5e9 | 165 | py | Python | examples/simple.py | TassieBruce/mplot-pybind | fbed1a131d9fead0dae363b9988daa57ca018330 | [
"MIT"
] | null | null | null | examples/simple.py | TassieBruce/mplot-pybind | fbed1a131d9fead0dae363b9988daa57ca018330 | [
"MIT"
] | null | null | null | examples/simple.py | TassieBruce/mplot-pybind | fbed1a131d9fead0dae363b9988daa57ca018330 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import matplotlib.pyplot as plt
x = [0, 1, 2, 3]
y = [0, 1, 4, 9]
fig, ax = plt.subplots()
fig.suptitle("simple")
ax.plot(x, y, "r")
plt.show() | 15 | 31 | 0.6 | 32 | 165 | 3.09375 | 0.75 | 0.040404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065693 | 0.169697 | 165 | 11 | 32 | 15 | 0.656934 | 0.10303 | 0 | 0 | 0 | 0 | 0.047297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d3467136f870f9065110eb11da1c3f9d8589a35 | 2,804 | py | Python | app/spiders/nowcoder_spider.py | Kyooooma/view-oj-backend | 3b2fc9ed0b8b52029b45cd30f90e8dd925a09d35 | [
"Apache-2.0"
] | 6 | 2019-08-05T13:01:19.000Z | 2021-07-16T09:59:45.000Z | app/spiders/nowcoder_spider.py | Kyooooma/view-oj-backend | 3b2fc9ed0b8b52029b45cd30f90e8dd925a09d35 | [
"Apache-2.0"
] | null | null | null | app/spiders/nowcoder_spider.py | Kyooooma/view-oj-backend | 3b2fc9ed0b8b52029b45cd30f90e8dd925a09d35 | [
"Apache-2.0"
] | 6 | 2019-12-05T13:04:38.000Z | 2020-07-05T15:05:40.000Z | import re
from bs4 import BeautifulSoup
from app.config.setting import DEFAULT_PROBLEM_RATING
from app.libs.spider_http import SpiderHttp
from app.spiders.base_spider import BaseSpider
class NowcoderSpider(BaseSpider):
def get_user_info(self, oj_username, accept_problems):
username = oj_username.oj_username
success = False
uid = NowcoderSpider._get_id_by_username(username)
if uid:
username = uid
index = 1
accept_problem_list = []
ok = False
while not ok:
url = 'https://ac.nowcoder.com/acm/contest/profile/{}/' \
'practice-coding?pageSize=200&statusTypeFilter=5&orderType=DESC&page={}'.format(
username, index)
res = SpiderHttp().get(url=url)
if res.status_code != 200:
break
if '<title>页面找不到了</title>' in res.text:
break
if '用户不存在' in res.text:
break
if '没有找到你想要的内容呢' in res.text:
break
success = True
soup = BeautifulSoup(res.text, 'lxml')
trs = soup.find_all('tr')[1:]
for tr in trs:
tds = tr.find_all('td')
accept_time = tds[8].text
problem_id = re.findall(r'/acm/problem/(\d+)', tds[1].find('a')['href'])[0]
if accept_problems.get('nowcoder-' + problem_id) == accept_time:
ok = True
continue
time = accept_problems.get('nowcoder-' + problem_id)
if time is None or time >= accept_time:
accept_problems['nowcoder-' + problem_id] = accept_time
accept_problem_list.append({
'oj': 'nowcoder',
'problem_pid': problem_id,
'accept_time': accept_time
})
index += 1
return {'success': success, 'data': accept_problem_list}
def get_problem_info(self, problem_id):
star_rating = [DEFAULT_PROBLEM_RATING, 800, 1200, 1600, 2000, 2400]
try:
url = 'https://ac.nowcoder.com/acm/problem/list?keyword={}'.format(problem_id)
res = SpiderHttp().get(url=url)
data = re.findall(r'<td>\n(\d+)星\n</td>', res.text)
star = int(data[0][0])
rating = star_rating[star]
except:
rating = DEFAULT_PROBLEM_RATING
return {'rating': rating}
@staticmethod
def _get_id_by_username(username):
url = 'https://www.nowcoder.com/search?type=all&query={}'.format(username)
res = SpiderHttp().get(url=url)
result = re.findall(r'/profile/(\d+)', res.text)
if not result:
return None
return result[0]
| 37.386667 | 98 | 0.548146 | 320 | 2,804 | 4.640625 | 0.353125 | 0.042424 | 0.040404 | 0.038384 | 0.220875 | 0.078114 | 0 | 0 | 0 | 0 | 0 | 0.019355 | 0.336662 | 2,804 | 74 | 99 | 37.891892 | 0.779032 | 0 | 0 | 0.104478 | 0 | 0 | 0.140514 | 0.032454 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.074627 | 0 | 0.19403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d352171e20c80fe211b08987f0ec929af6bf855 | 556 | py | Python | datasets/bspline.py | jiafeng5513/relaynet_pytorch | aa533f7bb08ec640baf5c5bdd3d806a6ec76e4f7 | [
"MIT"
] | null | null | null | datasets/bspline.py | jiafeng5513/relaynet_pytorch | aa533f7bb08ec640baf5c5bdd3d806a6ec76e4f7 | [
"MIT"
] | null | null | null | datasets/bspline.py | jiafeng5513/relaynet_pytorch | aa533f7bb08ec640baf5c5bdd3d806a6ec76e4f7 | [
"MIT"
] | null | null | null | import numpy as np
import pylab as pl
from scipy import interpolate
import matplotlib.pyplot as plt
x = np.linspace(0, 2*np.pi+np.pi/4, 10)
y = np.sin(x)
x_new = np.linspace(0, 2*np.pi+np.pi/4, 100)
#f_linear = interpolate.interp1d(x, y)
tck = interpolate.splrep(x, y) # 原始点(xi,yi)
y_bspline = interpolate.splev(x_new, tck) # 插值之后的点是x_new[i],y_bspline[i]
plt.xlabel(u'安培/A')
plt.ylabel(u'伏特/V')
plt.plot(x, y, "o", label=u"原始数据")
#plt.plot(x_new, f_linear(x_new), label=u"线性插值")
plt.plot(x_new, y_bspline, label=u"B-spline插值")
pl.legend()
pl.show() | 25.272727 | 72 | 0.697842 | 114 | 556 | 3.307018 | 0.447368 | 0.05305 | 0.06366 | 0.06366 | 0.111406 | 0.111406 | 0.111406 | 0.111406 | 0.111406 | 0 | 0 | 0.02449 | 0.118705 | 556 | 22 | 73 | 25.272727 | 0.744898 | 0.223022 | 0 | 0 | 0 | 0 | 0.053613 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d38c4e32571e359cdb435e64dddbc4b00e04991 | 3,794 | py | Python | meiduo_mall/apps/meiduo_admin/views/statistical.py | Wang-TaoTao/meiduo_project | f95f097c2a85f500d0fd264a58e2f0d92771fff6 | [
"MIT"
] | null | null | null | meiduo_mall/apps/meiduo_admin/views/statistical.py | Wang-TaoTao/meiduo_project | f95f097c2a85f500d0fd264a58e2f0d92771fff6 | [
"MIT"
] | null | null | null | meiduo_mall/apps/meiduo_admin/views/statistical.py | Wang-TaoTao/meiduo_project | f95f097c2a85f500d0fd264a58e2f0d92771fff6 | [
"MIT"
] | null | null | null | from datetime import date, timedelta
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.goods.models import GoodsVisitCount
from apps.meiduo_admin.serializers.statistical import GoodsVisitCountSerialzer
from apps.orders.models import OrderInfo
from apps.users.models import User
# 统计日分类商品访问量
class UserCategoryCountAPIView(APIView):
def get(self,request):
# 1.获取当天日期
now_date = date.today()
# 2.查询当天每个分类商品的访问量
try:
goods = GoodsVisitCount.objects.filter(date=now_date)
except:
return Response(404)
# data = []
# for good in goods:
# data.append({
# 'count':good.count,
# 'category':good.category.name,
# })
# 3.使用序列化器
s = GoodsVisitCountSerialzer(instance=goods,many=True)
# 4.响应结果
return Response(s.data)
# 统计月增用户
class UserMonthCountAPIView(APIView):
def get(self,request):
# 1.获取当天日期
now_date = date.today()
# 2.根据当天日期获取30天前的日期
month_start_date = now_date - timedelta(days=30)
data = []
# 3.遍历
for i in range(30):
# 3.1 求出30天前的第一天日期
start_date = month_start_date + timedelta(i)
# 3.2 求出30天前的第二天日期
end_date = month_start_date + timedelta(i+1)
# 3.3 根据日期求出每天增加的人数
try:
count = User.objects.filter(date_joined__gte=start_date,date_joined__lte=end_date).count()
except:
return Response(404)
# 3.4 将每日的赠数量追加到列表
data.append({
'count':count,
'date':start_date,
})
# 4.响应结果
return Response(data)
# 统计日下单用户量
class UserDailyOrderCountAPIView(APIView):
# 设置权限
permission_classes = [IsAdminUser]
def get(self,request):
# 1.获取当天日期
now_date = date.today()
# 2.获取当天下单的所有用户对象
try:
users = User.objects.filter(orderinfo__create_time__gte=now_date)
except:
return Response(404)
count_list = []
# 3.遍历所有当天下单的用户对象,去重
for user in users:
# 3.1 判断该用户id是否已存在列表中
if user.id not in count_list:
# 3.2 如果不存在,将用户id添加到列表中
count_list.append(user.id)
# 4.求出列表长度,也就是用户的数量
count= len(count_list)
# 5.响应结果
return Response({
'count':count,
'date':now_date,
})
# 统计日活跃用户
class UserDailyActiveCountAPIView(APIView):
def get(self,request):
# 1.获取当天日期
now_date = date.today()
# 2.查询user表中最后登录日期是今天的用户
try:
count = User.objects.filter(last_login__gte=now_date).count()
except:
return Response(404)
# 3.响应结果
return Response({
'count':count,
'date':now_date,
})
# 统计日增用户
class UserDailyCountAPIView(APIView):
def get(self,request):
# 1.获取当天日期
now_date = date.today()
# 2.查询创建日期为今天的所有用户数量
try:
count = User.objects.filter(date_joined__gte=now_date).count()
except:
return Response(404)
# 3.响应结果
return Response({
'count':count,
'date':now_date,
})
# 统计用户总数
class UserTotalCountAPIView(APIView):
def get(self,request):
# 1.获取今天日期
now_date = date.today()
# 2.查询所有用户
try:
count = User.objects.all().count()
except:
return Response(404)
# 3.响应结果
return Response({
'count':count,
'date':now_date,
}) | 22.317647 | 106 | 0.557986 | 390 | 3,794 | 5.294872 | 0.287179 | 0.050847 | 0.029056 | 0.049395 | 0.380145 | 0.359806 | 0.291525 | 0.275545 | 0.221308 | 0.221308 | 0 | 0.025516 | 0.349236 | 3,794 | 170 | 107 | 22.317647 | 0.810855 | 0.140749 | 0 | 0.571429 | 0 | 0 | 0.01398 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.095238 | 0 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d38e3ab88ea80e866640d3de02dfdd15dddc370 | 5,732 | py | Python | gdd-car-sales/explore.py | kgdunn/car-sales | 3d17f5c6a2ddc4d740c4298f7f3ec832565f8882 | [
"BSD-3-Clause"
] | null | null | null | gdd-car-sales/explore.py | kgdunn/car-sales | 3d17f5c6a2ddc4d740c4298f7f3ec832565f8882 | [
"BSD-3-Clause"
] | null | null | null | gdd-car-sales/explore.py | kgdunn/car-sales | 3d17f5c6a2ddc4d740c4298f7f3ec832565f8882 | [
"BSD-3-Clause"
] | null | null | null | """
https://towardsdatascience.com/custom-transformers-and-ml-data-pipelines-with-python-20ea2a7adb65
Download the dataset from Kaggle,
https://www.kaggle.com/harlfoxem/housesalesprediction?select=kc_house_data.csv
and place it in the 'data' directory of this repo.
"""
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import FeatureUnion, Pipeline
import numpy as np
import pandas as pd
data = pd.read_csv("data/kc_house_data.csv")
class FeatureSelector(BaseEstimator, TransformerMixin):
"""
Custom Transformer that extracts columns passed as argument to its constructor
"""
def __init__(self, feature_names):
self._feature_names = feature_names
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self._feature_names]
class CategoricalTransformer(BaseEstimator, TransformerMixin):
"""
Custom transformer that breaks dates column into year, month and day into separate columns and
converts certain features to binary
"""
def __init__(self, use_dates=["year", "month", "day"]):
self._use_dates = use_dates
def fit(self, X, y=None):
return self
def get_year(self, obj):
return str(obj)[:4]
def get_month(self, obj):
return str(obj)[4:6]
def get_day(self, obj):
return str(obj)[6:8]
def create_binary(self, obj):
"""
Helper function that converts values to Binary depending on input
"""
if obj == 0:
return "No"
else:
return "Yes"
def transform(self, X, y=None):
"""
Depending on constructor argument break dates column into specified units
using the helper functions written above
"""
for spec in self._use_dates:
exec("X.loc[:,'{}'] = X['date'].apply(self.get_{})".format(spec, spec))
# Drop unusable column
X = X.drop("date", axis=1)
# Convert these columns to binary for one-hot-encoding later
X.loc[:, "waterfront"] = X["waterfront"].apply(self.create_binary)
X.loc[:, "view"] = X["view"].apply(self.create_binary)
X.loc[:, "yr_renovated"] = X["yr_renovated"].apply(self.create_binary)
return X.values
class NumericalTransformer(BaseEstimator, TransformerMixin):
"""
Custom transformer we wrote to engineer features (bathrooms per bedroom and/or how old the
house is in 2019) passed as boolen arguements to its constructor.
"""
def __init__(self, bath_per_bed=True, years_old=True):
self._bath_per_bed = bath_per_bed
self._years_old = years_old
# Return self, nothing else to do here
def fit(self, X, y=None):
return self
# Custom transform method we wrote that creates aformentioned features and drops redundant ones
def transform(self, X, y=None):
if self._bath_per_bed:
# create new column
X.loc[:, "bath_per_bed"] = X["bathrooms"] / X["bedrooms"]
# drop redundant column
X.drop("bathrooms", axis=1)
if self._years_old:
# create new column
X.loc[:, "years_old"] = 2019 - X["yr_built"]
# drop redundant column
X.drop("yr_built", axis=1)
# Converting any infinity values in the dataset to Nan
X = X.replace([np.inf, -np.inf], np.nan)
return X.values
# Categrical features to pass down the categorical pipeline
categorical_features = ["date", "waterfront", "view", "yr_renovated"]
# Numerical features to pass down the numerical pipeline
numerical_features = [
"bedrooms",
"bathrooms",
"sqft_living",
"sqft_lot",
"floors",
"condition",
"grade",
"sqft_basement",
"yr_built",
]
# Defining the steps in the categorical pipeline
categorical_pipeline = Pipeline(
steps=[
("cat_selector", FeatureSelector(categorical_features)),
("cat_transformer", CategoricalTransformer()),
("one_hot_encoder", OneHotEncoder(sparse=False)),
]
)
# Defining the steps in the numerical pipeline
numerical_pipeline = Pipeline(
steps=[
("num_selector", FeatureSelector(numerical_features)),
("num_transformer", NumericalTransformer()),
("imputer", SimpleImputer(strategy="median")),
("std_scaler", StandardScaler()),
]
)
# Combining numerical and categorical piepline into one full big pipeline horizontally
# using FeatureUnion
full_pipeline = FeatureUnion(
transformer_list=[
("categorical_pipeline", categorical_pipeline),
("numerical_pipeline", numerical_pipeline),
]
)
# Leave it as a dataframe becuase our pipeline is called on a
# pandas dataframe to extract the appropriate columns, remember?
X = data.drop("price", axis=1)
# You can covert the target variable to numpy
y = data["price"].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# The full pipeline as a step in another pipeline with an estimator as the final step
full_pipeline_m = Pipeline(steps=[("full_pipeline", full_pipeline), ("model", LinearRegression())])
# Can call fit on it just like any other pipeline
full_pipeline_m.fit(X_train, y_train)
# Can predict with it like any other pipeline
y_pred = full_pipeline_m.predict(X_test)
error = y_pred - y_test
print(error.describe())
| 30.328042 | 99 | 0.681089 | 735 | 5,732 | 5.159184 | 0.331973 | 0.023207 | 0.009494 | 0.015823 | 0.186709 | 0.108914 | 0.054325 | 0.054325 | 0.015295 | 0 | 0 | 0.006243 | 0.217551 | 5,732 | 188 | 100 | 30.489362 | 0.839242 | 0.315771 | 0 | 0.153061 | 0 | 0 | 0.123317 | 0.013203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132653 | false | 0 | 0.102041 | 0.071429 | 0.377551 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d3b796689fd089c3ff8084e44253e175cdfeb5a | 5,965 | py | Python | ir_axioms/modules/similarity.py | heinrichreimer/ir_axioms | f7349c4adde96cfa19c7247824a70a4662c07582 | [
"MIT"
] | 5 | 2022-03-11T15:28:04.000Z | 2022-03-11T15:28:58.000Z | ir_axioms/modules/similarity.py | heinrichreimer/ir_axioms | f7349c4adde96cfa19c7247824a70a4662c07582 | [
"MIT"
] | null | null | null | ir_axioms/modules/similarity.py | heinrichreimer/ir_axioms | f7349c4adde96cfa19c7247824a70a4662c07582 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from functools import lru_cache, cached_property
from itertools import product, combinations
from statistics import mean
from typing import (
final, Final, Iterable, Dict, Collection, Optional, Tuple, Sequence
)
from nltk.corpus import wordnet
from pymagnitude import Magnitude
from ir_axioms import logger
from ir_axioms.utils.nltk import download_nltk_dependencies
@lru_cache(None)
def synonym_set(
term: str,
smoothing: int = 0
) -> Sequence[str]:
cutoff = smoothing + 1
return wordnet.synsets(term)[:cutoff]
@lru_cache(None)
def synonym_set_similarity(
term1: str,
term2: str,
smoothing: int = 0
) -> float:
synonyms_term1 = synonym_set(term1, smoothing)
synonyms_term2 = synonym_set(term2, smoothing)
n = 0
similarity_sum = 0
for synonym1, synonym2 in product(synonyms_term1, synonyms_term2):
similarity = wordnet.wup_similarity(synonym1, synonym2)
if similarity is not None:
similarity_sum += similarity
n += 1
if n == 0:
return 0
return similarity_sum / n
class TermSimilarityMixin(ABC):
@abstractmethod
def similarity(self, term1: str, term2: str) -> float:
pass
@final
def similarity_sums(self, terms: Iterable[str]) -> Dict[str, float]:
similarity_sums: Dict[str, float] = {
term: 0
for term in terms
}
for term1, term2 in combinations(similarity_sums.keys(), 2):
similarity = self.similarity(term1, term2)
similarity_sums[term1] += similarity
similarity_sums[term2] += similarity
return similarity_sums
@final
def average_similarity(
self,
terms1: Collection[str],
terms2: Collection[str]
) -> float:
if len(terms1) == 0 or len(terms2) == 0:
return 0
return mean(
self.similarity(term1, term2)
for term1 in terms1
for term2 in terms2
)
def _pair_similarity(self, terms: Tuple[str, str]) -> float:
term1, term2 = terms
return self.similarity(term1, term2)
@final
def most_similar_pair(
self,
terms1: Collection[str],
terms2: Collection[str]
) -> Optional[Tuple[str, str]]:
if len(terms1) == 0 or len(terms2) == 0:
return None
most_similar_pairs: Sequence[Tuple[str, str]] = tuple(sorted(
product(terms1, terms2),
key=self._pair_similarity,
reverse=True,
))
most_similar_pair = most_similar_pairs[0]
if (
len(most_similar_pairs) > 1 and
self._pair_similarity(most_similar_pair) ==
self._pair_similarity(most_similar_pairs[1])
):
# No definite winner.
logger.debug(
f"Cannot find most similar term pair. "
f"The following pairs were equally similar: "
f"{', '.join(str(pair) for pair in most_similar_pairs)}"
)
return None
return most_similar_pair
@final
def most_similar_term(
self,
terms: Collection[str],
) -> Optional[str]:
if len(terms) == 0:
return None
similarity_sums = self.similarity_sums(terms)
most_similar_terms: Sequence[str] = tuple(sorted(
terms,
key=lambda term: similarity_sums[term],
reverse=True,
))
most_similar_term = most_similar_terms[0]
if (
len(most_similar_terms) > 1 and
similarity_sums[most_similar_term] ==
similarity_sums[most_similar_terms[1]]
):
# No definite winner.
logger.debug(
f"Cannot find most similar term. "
f"The following terms were equally similar: "
f"{', '.join(most_similar_terms)}"
)
return None
return most_similar_term
@final
def least_similar_term(
self,
terms: Collection[str],
) -> Optional[str]:
if len(terms) == 0:
return None
similarity_sums = self.similarity_sums(terms)
least_similar_terms: Sequence[str] = tuple(sorted(
terms,
key=lambda term: similarity_sums[term],
reverse=False,
))
least_similar_term = least_similar_terms[0]
if (
len(least_similar_terms) > 1 and
similarity_sums[least_similar_term] ==
similarity_sums[least_similar_terms[1]]
):
# No definite winner.
logger.debug(
f"Cannot find least similar term. "
f"The following terms were equally similar: "
f"{', '.join(least_similar_terms)}"
)
return None
return least_similar_term
class WordNetSynonymSetTermSimilarityMixin(TermSimilarityMixin):
smoothing: int = 0
def __init__(self):
self.__post_init__()
# noinspection PyMethodMayBeStatic
def __post_init__(self):
download_nltk_dependencies("wordnet", "omw-1.4")
@final
@lru_cache(None)
def similarity(self, term1: str, term2: str) -> float:
return synonym_set_similarity(term1, term2, self.smoothing)
class MagnitudeTermSimilarityMixin(TermSimilarityMixin, ABC):
embeddings_path: str = NotImplemented
@cached_property
def _embeddings(self):
return Magnitude(self.embeddings_path)
@final
@lru_cache(None)
def similarity(self, term1: str, term2: str):
return float(self._embeddings.similarity(term1, term2))
class FastTextWikiNewsTermSimilarityMixin(MagnitudeTermSimilarityMixin):
embeddings_path: Final[str] = "fasttext/medium/wiki-news-300d-1M.magnitude"
| 29.529703 | 79 | 0.599162 | 642 | 5,965 | 5.370717 | 0.180685 | 0.063805 | 0.026102 | 0.017401 | 0.362819 | 0.295534 | 0.263631 | 0.239269 | 0.226798 | 0.209397 | 0 | 0.019834 | 0.315339 | 5,965 | 201 | 80 | 29.676617 | 0.824437 | 0.015423 | 0 | 0.385542 | 0 | 0 | 0.067825 | 0.016701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0.006024 | 0.054217 | 0.018072 | 0.295181 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d3ee9cc24210f010fd549b409d1c00016953241 | 590 | py | Python | day03/vec_reg_linear_grad.py | elbourki1/Machine-Learning-bootcamp-42 | cf6a987ede555d8d208aed5b915cafe8078dd848 | [
"Apache-2.0"
] | null | null | null | day03/vec_reg_linear_grad.py | elbourki1/Machine-Learning-bootcamp-42 | cf6a987ede555d8d208aed5b915cafe8078dd848 | [
"Apache-2.0"
] | null | null | null | day03/vec_reg_linear_grad.py | elbourki1/Machine-Learning-bootcamp-42 | cf6a987ede555d8d208aed5b915cafe8078dd848 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def vec_reg_linear_grad(x, y,theta, lambda_):
m = x.shape[0]
x_t = x.transpose()
error = x.dot(theta) - y
nabela = x_t.dot(error) / m
# print(nabela)
nabela[1:] = nabela[1:] + theta[1:] * (lambda_ / m)
return nabela
if __name__ == "__main__":
X = np.array([
[ -6, -7, -9],
[ 13, -2, 14],
[ -7, 14, -1],
[ -8, -4, 6],
[ -5, -9, 6],
[ 1, -5, 11],
[ 9, -11, 8]])
Y = np.array([2, 14, -13, 5, 12, 4, -19])
Z = np.array([3,10.5,-6])
print(vec_reg_linear_grad(X,Y, Z, 1)) | 23.6 | 55 | 0.454237 | 97 | 590 | 2.57732 | 0.443299 | 0.084 | 0.096 | 0.128 | 0.144 | 0.144 | 0 | 0 | 0 | 0 | 0 | 0.11809 | 0.325424 | 590 | 25 | 56 | 23.6 | 0.51005 | 0.022034 | 0 | 0 | 0 | 0 | 0.013889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.15 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4039809a0d69d1289df06615122acf02856f92 | 12,032 | py | Python | client/src/dolbyio_rest_apis/communications/monitor/models.py | dolbyio-samples/dolbyio-rest-apis-client-python | 37354dc10f967c4656776f9e2651a2284a11f530 | [
"MIT"
] | 1 | 2021-12-23T17:55:06.000Z | 2021-12-23T17:55:06.000Z | client/src/dolbyio_rest_apis/communications/monitor/models.py | dolbyio-samples/dolbyio-rest-apis-client-python | 37354dc10f967c4656776f9e2651a2284a11f530 | [
"MIT"
] | null | null | null | client/src/dolbyio_rest_apis/communications/monitor/models.py | dolbyio-samples/dolbyio-rest-apis-client-python | 37354dc10f967c4656776f9e2651a2284a11f530 | [
"MIT"
] | null | null | null | """
dolbyio_rest_apis.communications.monitor.models
~~~~~~~~~~~~~~~
This module contains the models used by the Dolby.io APIs.
"""
from dolbyio_rest_apis.core.helpers import get_value_or_default, in_and_not_none
from typing import List
class PagedResponse(dict):
"""Representation of a paged response."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.first = get_value_or_default(self, 'first', None)
self.next = get_value_or_default(self, 'next', None)
class ConferenceOwner(dict):
"""Representation of a Conference Owner."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.user_id = get_value_or_default(self, 'userID', None)
if in_and_not_none(self, 'metadata'):
self.metadata = UserMetadata(self['metadata'])
class ConferenceStatisticsMaxParticipants(dict):
"""Representation of a Conference Statistics Max Participants."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.user = get_value_or_default(self, 'USER', 0)
self.listener = get_value_or_default(self, 'LISTENER', 0)
self.mixer = get_value_or_default(self, 'MIXER', 0)
self.pstn = get_value_or_default(self, 'PSTN', 0)
class ConferenceParticipant(dict):
"""Representation of a Conference Participant."""
def __init__(self, user_id, dictionary: dict):
self.user_id = user_id
dict.__init__(self, dictionary)
#if in_and_not_none(self, 'connections'):
# self.connections = ConferenceOwner(self['connections'])
#if in_and_not_none(self, 'stats'):
# self.stats = ConferenceStatistics(self['stats'])
class ConferenceParticipants(PagedResponse):
"""Representation of a Conference participants."""
def __init__(self, dictionary: dict):
PagedResponse.__init__(self, dictionary)
self.participants: List[ConferenceParticipant] = []
if in_and_not_none(self, 'participants'):
for key in self['participants'].keys():
participant = ConferenceParticipant(key, self['participants'][key])
self.participants.append(participant)
class ConferenceStatisticsMaxRate(dict):
"""Representation of a Conference Statistics Max Rate."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.dtls = get_value_or_default(self, 'DTLS', 0)
self.rtcp = get_value_or_default(self, 'RTCP', 0)
self.rtp = get_value_or_default(self, 'RTP', 0)
self.stun = get_value_or_default(self, 'STUN', 0)
class ConferenceStatisticsMaxStreams(dict):
"""Representation of a Conference Statistics Max Streams."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.audio = get_value_or_default(self, 'AUDIO', 0)
self.video = get_value_or_default(self, 'VIDEO', 0)
self.screenshare = get_value_or_default(self, 'SCREENSHARE', 0)
class ConferenceStatisticsNetwork(dict):
"""Representation of a Conference Statistics Network."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
if in_and_not_none(self, 'maxRxBytesRate'):
self.max_rx_bytes_rate = ConferenceStatisticsMaxRate(self['maxRxBytesRate'])
if in_and_not_none(self, 'maxRxPacketsRate'):
self.max_rx_packets_rate = ConferenceStatisticsMaxRate(self['maxRxPacketsRate'])
if in_and_not_none(self, 'maxRxStreams'):
self.max_rx_streams = ConferenceStatisticsMaxStreams(self['maxRxStreams'])
if in_and_not_none(self, 'maxTxBytesRate'):
self.max_tx_bytes_rate = ConferenceStatisticsMaxRate(self['maxTxBytesRate'])
if in_and_not_none(self, 'maxTxPacketsRate'):
self.max_tx_packets_rate = ConferenceStatisticsMaxRate(self['maxTxPacketsRate'])
if in_and_not_none(self, 'maxTxStreams'):
self.max_tx_streams = ConferenceStatisticsMaxStreams(self['maxTxStreams'])
class ConferenceStatistics(dict):
"""Representation of a Conference Statistics."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
if in_and_not_none(self, 'maxParticipants'):
self.max_participants = ConferenceStatisticsMaxParticipants(self['maxParticipants'])
if in_and_not_none(self, 'network'):
self.network = ConferenceStatisticsNetwork(self['network'])
class ConferenceSummary(dict):
"""Representation of a Conference Summary."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.conf_id = get_value_or_default(self, 'confId', None)
self.alias = get_value_or_default(self, 'alias', None)
self.region = get_value_or_default(self, 'region', None)
self.start = get_value_or_default(self, 'start', 0)
self.live = get_value_or_default(self, 'live', False)
self.end = get_value_or_default(self, 'end', 0)
self.duration = get_value_or_default(self, 'duration', 0)
self.type = get_value_or_default(self, 'type', None)
self.presence_duration = get_value_or_default(self, 'presenceDuration', 0)
self.recording_duration = get_value_or_default(self, 'recordingDuration', 0)
self.mixer_live_recording = get_value_or_default(self, 'mixerLiveRecording', 0)
self.mixer_hls_streaming = get_value_or_default(self, 'mixerHlsStreaming', 0)
self.mixer_rtmp_streaming = get_value_or_default(self, 'mixerRtmpStreaming', 0)
self.nb_users = get_value_or_default(self, 'nbUsers', 0)
self.nb_listeners = get_value_or_default(self, 'nbListeners', 0)
self.nb_pstn = get_value_or_default(self, 'nbPstn', 0)
if in_and_not_none(self, 'owner'):
self.mix = ConferenceOwner(self['owner'])
if in_and_not_none(self, 'statistics'):
self.statistics = ConferenceStatistics(self['statistics'])
class GetConferencesResponse(PagedResponse):
"""Representation of a Conferences response."""
def __init__(self, dictionary: dict):
PagedResponse.__init__(self, dictionary)
self.conferences: List[ConferenceSummary] = []
if in_and_not_none(self, 'conferences'):
for conference in self['conferences']:
self.conferences.append(ConferenceSummary(conference))
class RecordingMix(dict):
"""Representation of a Recording Mix."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.mp4 = get_value_or_default(self, 'mp4', 0)
self.mp3 = get_value_or_default(self, 'mp3', 0)
self.region = get_value_or_default(self, 'region', None)
class UserMetadata(dict):
"""Representation of a User Metadata."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.user_id = get_value_or_default(self, 'userID', None)
self.external_name = get_value_or_default(self, 'externalName', None)
self.external_id = get_value_or_default(self, 'externalId', None)
self.external_photo_url = get_value_or_default(self, 'externalPhotoUrl', None)
self.ip_address = get_value_or_default(self, 'ipAddress', None)
class RecordingSplit(dict):
"""Representation of a Recording Split."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.start_time = get_value_or_default(self, 'startTime', 0)
self.duration = get_value_or_default(self, 'duration', 0)
self.size = get_value_or_default(self, 'size', 0)
self.file_name = get_value_or_default(self, 'fileName', None)
self.url = get_value_or_default(self, 'url', None)
if in_and_not_none(self, 'metadata'):
self.metadata = UserMetadata(self['metadata'])
class RecordingRecord(dict):
"""Representation of a Recording Record."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.start_time = get_value_or_default(self, 'startTime', 0)
self.duration = get_value_or_default(self, 'duration', 0)
self.size = get_value_or_default(self, 'size', 0)
self.file_name = get_value_or_default(self, 'fileName', None)
self.url = get_value_or_default(self, 'url', None)
self.splits = []
if in_and_not_none(self, 'splits'):
for split in self['splits']:
self.splits.append(RecordingSplit(split))
class RecordingAudio(dict):
"""Representation of a Recording Audio."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.region = get_value_or_default(self, 'region', None)
if in_and_not_none(self, 'mix'):
self.mix = RecordingMix(self['mix'])
self.records = []
if in_and_not_none(self, 'records'):
for record in self['records']:
self.records.append(RecordingRecord(record))
class Recording(dict):
"""Representation of a Recording."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.conf_id = get_value_or_default(self, 'confId', None)
self.alias = get_value_or_default(self, 'alias', None)
self.duration = get_value_or_default(self, 'duration', 0)
self.ts = get_value_or_default(self, 'ts', 0)
self.region = get_value_or_default(self, 'region', None)
if in_and_not_none(self, 'mix'):
self.mix = RecordingMix(self['mix'])
if in_and_not_none(self, 'audio'):
self.audio = RecordingAudio(self['audio'])
class GetRecordingsResponse(PagedResponse):
"""Representation of a Recordings response."""
def __init__(self, dictionary: dict):
PagedResponse.__init__(self, dictionary)
self.recordings = []
if in_and_not_none(self, 'recordings'):
for recording in self['recordings']:
self.recordings.append(Recording(recording))
class DolbyVoiceRecording(dict):
"""Representation of a Dolby Voice Recording."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.region = get_value_or_default(self, 'region', None)
self.conf_id = None
self.conf_alias = None
if in_and_not_none(self, 'conference'):
self.conf_id = get_value_or_default(self, 'confId', None)
self.conf_alias = get_value_or_default(self, 'confAlias', None)
self.records = []
if in_and_not_none(self, 'records'):
for record in self['records']:
self.records.append(RecordingRecord(record))
class WebHookResponse(dict):
"""Representation of a WebHook event response."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.status = get_value_or_default(self, 'status', None)
self.headers = get_value_or_default(self, 'headers', None)
class WebHook(dict):
"""Representation of a WebHook event."""
def __init__(self, dictionary: dict):
dict.__init__(self, dictionary)
self.id = get_value_or_default(self, 'id', None)
self.webhook = get_value_or_default(self, 'webhook', None)
self.url = get_value_or_default(self, 'url', None)
self.conf_id = get_value_or_default(self, 'confId', None)
self.third_party_id = get_value_or_default(self, 'thirdPartyId', None)
self.ts = get_value_or_default(self, 'ts', None)
if in_and_not_none(self, 'response'):
self.response = WebHookResponse(self['response'])
class GetWebHookResponse(PagedResponse):
"""Representation of a WebHook response."""
def __init__(self, dictionary: dict):
PagedResponse.__init__(self, dictionary)
self.webhooks = []
if in_and_not_none(self, 'webhooks'):
for wbk in self['webhooks']:
self.webhooks.append(WebHook(wbk))
| 38.688103 | 96 | 0.673953 | 1,430 | 12,032 | 5.313986 | 0.116084 | 0.069483 | 0.086854 | 0.147651 | 0.587051 | 0.487169 | 0.387946 | 0.364785 | 0.357679 | 0.357679 | 0 | 0.003786 | 0.209691 | 12,032 | 310 | 97 | 38.812903 | 0.795352 | 0.102144 | 0 | 0.429293 | 0 | 0 | 0.084909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.010101 | 0 | 0.232323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d412b3852785d25c5a8a4284b141fa51cb72aac | 51,094 | py | Python | coverlovin2/test/test_coverlovin2.py | salexan2001/coverlovin2 | 1fdaf572d1729326e8ccdd428840ab51b08c0aac | [
"Apache-2.0"
] | null | null | null | coverlovin2/test/test_coverlovin2.py | salexan2001/coverlovin2 | 1fdaf572d1729326e8ccdd428840ab51b08c0aac | [
"Apache-2.0"
] | null | null | null | coverlovin2/test/test_coverlovin2.py | salexan2001/coverlovin2 | 1fdaf572d1729326e8ccdd428840ab51b08c0aac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Test the coverlovin2 project using pytest.
Technique and recommendations taken from https://docs.pytest.org/en/latest/
Parts of this file follow breaks formatting conventions. Allowed since this is
test code and since long lines of repetitive test cases deserve exemption.
"""
__author__ = 'James Thomas Moon'
__url__ = 'https://github.com/jtmoon79/coverlovin2/test'
# standard library imports
import os
import logging
from pathlib import Path
import pytest
import tempfile
import typing
import queue
# non-standard library imports
from mutagen.id3 import ID3NoHeaderError
from mutagen.asf import ASFHeaderError
from mutagen.flac import FLACNoHeaderError
# custom imports
from ..coverlovin2 import (
Artist,
Album,
ArtAlb,
ArtAlb_new,
ArtAlb_empty,
ArtAlb_is,
DirArtAlb,
DirArtAlb_List,
GoogleCSE_Opts,
ImageSize,
ImageType,
Result,
WrOpts,
URL,
str_AA,
str_ArtAlb,
func_name,
similar,
log_new,
LOGFORMAT,
get_artist_album_mp3,
get_artist_album_mp4,
get_artist_album_flac,
get_artist_album_ogg,
get_artist_album_asf,
get_artist_album,
ImageSearcher,
ImageSearcher_Medium_Disk,
ImageSearcher_Medium_Network,
ImageSearcher_LikelyCover,
ImageSearcher_EmbeddedMedia,
ImageSearcher_MusicBrainz,
ImageSearcher_GoogleCSE,
process_dir,
process_dirs,
parse_args_opts,
)
# all committed test resources should be under this directory
resources = Path.joinpath(Path(__file__).parent, 'test_resources')
emp_Art = Artist('')
emp_Alb = Album('')
class RequestClassNoop(object):
"""stub class to override actual requests"""
def __init__(self, *args, **kwargs):
self.full_url = ""
def exists_or_skip(*args) -> typing.Union[Path, None]:
"""helper for skipping a test if path is not available"""
fp = resources.joinpath(*args)
if not fp.exists():
pytest.skip('test resource not available "%s"' % fp)
return None
return fp
class Test_GoogleCSE_Opts(object):
@pytest.mark.parametrize('ti',
(
pytest.param(('', '', '',), id='empty param 1 2 3'),
pytest.param(('foo', '', '',), id='empty param 2 3'),
pytest.param(('foo', 'bar', '',), id='empty param 3'),
pytest.param(('foo', 'bar', None,), id='None param 3'),
pytest.param((None, 'bar', None,), id='None param 1 3'),
pytest.param((None, None, None,), id='None param 1 2 3'),
)
)
def test_init_False(self, ti):
gc = GoogleCSE_Opts(*ti)
assert not gc
@pytest.mark.parametrize('ti',
(
pytest.param(('foo', 'bar', 'baz',), id='basic case #1'),
pytest.param(('foo', r'as jo2u3 lj;las; :L@)(* ;23', 'baz',), id='basic case #2'),
)
)
def test_init_True(self, ti):
gc = GoogleCSE_Opts(*ti)
assert gc
@pytest.mark.parametrize('ti',
(
pytest.param((), id='()'),
pytest.param(('',), id='("")'),
pytest.param(('', ''), id='("","")'),
pytest.param(('', '', '', '',), id='("","","","")'),
)
)
def test_init_TypeError(self, ti):
with pytest.raises(TypeError):
GoogleCSE_Opts(*ti)
class Test_helpers(object):
@pytest.mark.parametrize('artist, album, ti_exp',
(
pytest.param('', '', '''[ "" • "" ]''', id='empty'),
pytest.param('Foo', 'Bar', '''[ "Foo" • "Bar" ]''', id='Foo Bar'),
)
)
def test_str_AA(self, artist, album, ti_exp):
saa1 = str_AA(Artist(artist), Album(album))
assert saa1 == ti_exp
@pytest.mark.parametrize('artalb, tf',
(
pytest.param(ArtAlb_empty, False, id='False: (empty)'),
pytest.param(ArtAlb_new('Foo', ''), True, id='True: Foo _'),
pytest.param(ArtAlb_new('', 'Foo'), True, id='True: _ Foo'),
pytest.param(ArtAlb_new('Foo', 'Bar'), True, id='True: _ Foo'),
)
)
def test_ArtAlb_is(self, artalb, tf):
assert ArtAlb_is(artalb) == tf
@pytest.mark.parametrize('artist, album, artalb',
(
pytest.param(Artist(''), Album(''), ArtAlb_empty),
pytest.param(Artist(''), Album(''), ArtAlb_new('', '')),
pytest.param(Artist('art'), Album(''), ArtAlb_new('art', '')),
pytest.param(Artist(''), Album('alb'), ArtAlb_new('', 'alb')),
pytest.param(Artist('art'), Album('alb'), ArtAlb_new('art', 'alb')),
)
)
def test_ArtAlb_new(self, artist, album, artalb):
assert (artist, album) == artalb
@pytest.mark.parametrize('ti',
(
pytest.param('http://', id='http'),
pytest.param('https://', id='https'),
pytest.param('https://foo', id='https://foo'),
)
)
def test_URL_init(self, ti):
URL(ti)
@pytest.mark.parametrize('ti',
(
pytest.param('foo', id='foo'),
pytest.param('', id='""'),
#pytest.param(bytes('https://', encoding='utf8'), id='type<bytes>'),
)
)
def test_URL_ValueError(self, ti):
with pytest.raises(ValueError):
URL(ti)
def test_URL_TypeErrpr(self):
with pytest.raises(TypeError):
URL(bytes('https://', encoding='utf8'))
def test_URL_False(self):
assert not URL()
def test_URL_True(self):
assert URL('https://foo.com')
def test_log_new_1(self):
log1 = log_new('log1', logging.DEBUG)
assert log1.hasHandlers()
def test_log_new_2(self):
log2a = log_new('log2a', logging.DEBUG)
log2b = log_new('log2b', logging.DEBUG)
assert log2a is log2b
def test_log_new_same_id(self):
log3a = log_new('log3', logging.DEBUG)
log3b = log_new('log3', logging.DEBUG)
assert log3a is log3b
assert id(log3a) == id(log3b)
def test_func_name_1(self):
assert func_name() == 'test_func_name_1'
def test_similar_type(self):
assert type(similar('', '')) is float
_str_odd1 = \
r'an874987()#&_@( 87398skjEQhe]w?a]fuheusn-09- klnknd\#(! njbBIOE'
_str_un2 = r'''¶棲摓Ⲫ⸙A'''
_str_long3 = 'abkjadliuewoijkblhlkjaoiquweaghbkjhkljhldkjhaldkh'
@pytest.mark.parametrize('ti_a, ti_b, ti_exp',
(
pytest.param('', '', 1.0, id='""≟"" == 1.0'),
pytest.param('a', 'a', 1.0, id='"a"≟"a" == 1.0'),
pytest.param(_str_odd1, _str_odd1, 1.0, id='_str_odd1 ≟ _str_odd1 == 1.0'),
pytest.param(_str_un2, _str_un2, 1.0, id='_str_un2 ≟ _str_un2 == 1.0'),
pytest.param('abcdefg', 'defghijk', (0.5333, 0.534), id='0.533 ≤ overlap ≤ 0.534'),
pytest.param('', _str_long3, (0, 0.001), id='""≟"jslkjsdlkjf…"'),
pytest.param(_str_long3, '', (0, 0.001), id='"jslkjsdlkjf…"≟""'),
)
)
def test_similar(self, ti_a, ti_b, ti_exp):
score = similar(ti_a, ti_b)
if type(ti_exp) is int or type(ti_exp) is float:
assert score == ti_exp
elif type(ti_exp) is tuple and len(ti_exp) == 2:
assert ti_exp[0] <= score <= ti_exp[1]
else:
raise TypeError('bad test case input type %s' % type(ti_exp))
class Test_ImageSize(object):
def test_list(self):
assert ImageSize.list()
class Test_ImageType(object):
@pytest.mark.parametrize('ti',
(
'unknown type',
'',
5,
{},
'.gif'
)
)
def test_init_ValueError(self, ti):
with pytest.raises(ValueError):
ImageType('unknown type')
@pytest.mark.parametrize('ti',
(
'jpg',
ImageType.PNG,
ImageType.GIF,
ImageType.JPG
)
)
def test_init(self, ti):
ImageType(ti)
def test_check_len_types(self):
"""ensure previous tests cover all possible cases. If not then new test
cases will need to be added.
"""
assert len(ImageType.list()) == 3
jpg = ImageType.JPG
gif = ImageType.GIF
png = ImageType.PNG
class Test_overrides(object):
"""
Cannot test @overrides on made-up functions because @overrides check runs at
some point prior to run-time, during some sort of Python pre-run phase,
prior to pytest being ready.
"""
def test_ImageSearcher_Medium_Disk(self):
with pytest.raises(TypeError):
ImageSearcher_Medium_Disk()
def test_ImageSearcher_Medium_Network(self):
with pytest.raises(TypeError):
ImageSearcher_Medium_Network()
# placeholder image url for testing downloading
image_url = 'http://via.placeholder.com/2'
class Test_ImageSearcher(object):
# make `log` class-wide (can not implement `__init__` for pytest processed
# class)
log = log_new(LOGFORMAT, logging.DEBUG, __qualname__)
@pytest.mark.dependency(name='net_access_ping')
def test_net_access_ping(self):
"""check Internet access. ping of known stable IP."""
# TODO: complete this!
pass
@pytest.mark.dependency(name='net_access_dns',
depends=['net_access_ping'])
def test_net_access_dns(self):
"""check Internet access. attempt DNS lookup."""
# TODO: complete this!
pass
@pytest.mark.dependency(name='net_access', depends=['net_access_ping',
'net_access_dns'])
def test_net_access(self):
"""Wrapper of two net access dependency for simpler `depends` params"""
pass
@pytest.mark.dependency(name='init_is')
def test_init(self):
with pytest.raises(TypeError):
ImageSearcher(ArtAlb_empty, '', False)
def test_download_url_ValueError(self):
with pytest.raises(ValueError):
"""bad url should raise"""
ImageSearcher.download_url(URL(''), self.log)
def test_download_url_return_None(self):
"""non-exists download URL should return None"""
assert not ImageSearcher.download_url(r'http://NOTEXISTURL.TESTFOO',
self.log)
def test_download_url__1(self):
assert ImageSearcher.download_url(image_url, self.log)
def test_download_url__2(self):
data = ImageSearcher.download_url(image_url, self.log)
assert type(data) is bytes
class Test_ImageSearcher_LikelyCover(object):
def _new_imagesearcher_likelycover(self, image_type: ImageType = jpg) ->\
ImageSearcher_LikelyCover:
"""return a new bland instance of ImageSearcher_LikelyCover"""
return ImageSearcher_LikelyCover(ArtAlb_empty, image_type, Path(''), WrOpts(False, False), True)
@pytest.mark.dependency(name='init_likelyc')
def test_init(self):
self._new_imagesearcher_likelycover()
@pytest.mark.dependency(depends=['init_likelyc'])
def test_WrongUseError(self):
is_ = self._new_imagesearcher_likelycover()
with pytest.raises(ImageSearcher_LikelyCover.WrongUseError):
is_.write_album_image()
A1_Dir = 'test_ImageSearcher_LikelyCover1' # actual sub-directory
A1_Mp3 = 'ID3v1 [Bob Dylan] [Highway 61 Revisited].mp3' # actual test file
A1_fp = resources.joinpath(A1_Dir, A1_Mp3) # existent file path
@pytest.mark.dependency(name='test_res_A1')
def test_A1_resources_exist_and_correct(self):
"""test resources must exist"""
assert self.A1_fp
assert self.A1_fp.exists()
"""there should be no image files in the test resource directory"""
# TODO: check this
# pytest.param
# (
# ImageType,
# (
# Path_to_match1,
# Path_to_match2,
# ...
# ),
# Path_expected_to_match
# ),
@pytest.mark.parametrize('image_type, paths, image_path',
(
pytest.param
(
jpg,
[],
None,
id='empty List (returns None)'
),
pytest.param
(
jpg,
(),
None,
id='empty Tuple (returns None)'
),
*( # generate a simple test case for all ImageTypes
pytest.param
(
it,
(
Path.joinpath(resources, 'DOES NOT EXIST foo' + it.suffix),
Path.joinpath(resources, 'DOES NOT EXIST bar' + it.suffix),
),
None,
id='quick test of ImageType ' + it.value + ' (returns None)'
) for it in ImageType
),
pytest.param
(
jpg,
(
Path('nope' + jpg.suffix),
Path('nope' + png.suffix),
),
None,
id='(no match) nope' + png.suffix + ' (returns None)'
),
pytest.param
(
jpg,
(
Path('AlbumArt_Small' + jpg.suffix),
Path('AlbumArt_Large' + jpg.suffix)
),
Path('AlbumArt_Large' + jpg.suffix),
id='AlbumArt_Large' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('front-here' + jpg.suffix),
Path('here-front' + jpg.suffix)
),
Path('front-here' + jpg.suffix),
id='front-here' + jpg.suffix
),
pytest.param
(
png,
(
Path('fronthere' + png.suffix),
Path('here-front' + png.suffix)
),
Path('here-front' + png.suffix),
id='here-front' + png.suffix
),
pytest.param
(
jpg,
(
Path('foo (front)' + jpg.suffix),
Path('folder' + jpg.suffix)
),
Path('foo (front)' + jpg.suffix),
id='foo (front)' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('AlbumArt01' + jpg.suffix),
Path('foo (front)' + jpg.suffix)
),
Path('AlbumArt01' + jpg.suffix),
id='AlbumArt01' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('foo (front)' + jpg.suffix),
Path('AlbumArt01' + jpg.suffix)
),
Path('AlbumArt01' + jpg.suffix),
id='AlbumArt01' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('R-3512668-1489953889-2577 cover.jpeg' + jpg.suffix),
Path('nomatch' + jpg.suffix)
),
Path('R-3512668-1489953889-2577 cover.jpeg' + jpg.suffix),
id='R-3512668-1489953889-2577 cover.jpeg' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('album_cover.jpeg' + jpg.suffix),
Path('nomatch' + jpg.suffix)
),
Path('album_cover.jpeg' + jpg.suffix),
id='album_cover.jpeg' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('nomatch' + jpg.suffix),
Path('Something (front) blarg' + jpg.suffix)
),
Path('Something (front) blarg' + jpg.suffix),
id='Something (front) blarg' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('Something-front-blarg' + jpg.suffix),
Path('Something (front) blarg' + jpg.suffix)
),
Path('Something (front) blarg' + jpg.suffix),
id='Something (front) blarg' + jpg.suffix
),
pytest.param
(
png,
(
Path('Something-front-blarg' + png.suffix),
Path('Something (front) blarg' + png.suffix)
),
Path('Something (front) blarg' + png.suffix),
id='Something (front) blarg' + png.suffix
),
pytest.param
(
gif,
(
Path('Something-front-blarg' + gif.suffix),
Path('Something (front) blarg' + gif.suffix)
),
Path('Something (front) blarg' + gif.suffix),
id='Something (front) blarg' + gif.suffix
),
pytest.param
(
jpg,
(
Path('Something-front-blarg' + jpg.suffix),
Path('Something' + png.suffix),
Path('Something' + jpg.suffix),
Path('Something' + gif.suffix)
),
Path('Something-front-blarg' + jpg.suffix),
id='Something-front-blarg' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('folder' + png.suffix),
Path('folder' + jpg.suffix),
Path('folder' + gif.suffix)
),
Path('folder' + jpg.suffix),
id='folder' + jpg.suffix
),
pytest.param
(
png,
(
Path('folder' + png.suffix),
Path('folder' + jpg.suffix),
Path('folder' + gif.suffix)
),
Path('folder' + png.suffix),
id='folder' + png.suffix
),
pytest.param
(
jpg,
(
Path('Something-front-blarg' + png.suffix),
Path('Something-front-blarg' + jpg.suffix),
Path('Something-front-blarg' + gif.suffix),
Path('Something (front) blarg' + png.suffix),
Path('Something (front) blarg' + jpg.suffix),
Path('Something (front) blarg' + gif.suffix),
),
Path('Something (front) blarg' + jpg.suffix),
id='Something (front) blarg' + jpg.suffix
),
pytest.param
(
jpg,
(
Path('Something-front-blarg' + jpg.suffix),
Path('Something (front) blarg' + '.jpeg'),
),
Path('Something (front) blarg' + '.jpeg'),
id='Something (front) blarg' + '.jpeg'
),
)
)
@pytest.mark.dependency(depends=['init_likelyc'])
def test_match_likely_name__match(self, image_type, paths, image_path):
is_ = self._new_imagesearcher_likelycover(image_type)
m = is_._match_likely_name(paths)
assert m == image_path
# abbreviate check
B_cmp_name = lambda x, y: x.name == y.name
#
B2_Dir = 'test_ImageSearcher_LikelyCover2' # actual sub-directory
B2_Img = 'album.jpg' # actual test file in that sub-directory
B2_image_path = resources.joinpath(B2_Dir, B2_Img) # file path test resource .../album.jpg
# these files not need to exist
B_image_path_ne = Path(r'./THIS FILE DOES NOT EXIST 298389325 (album_cover)' + jpg.suffix)
B_image_path_1 = Path(r'./ACDC TNT/ACDC TNT' + png.suffix)
B_image_path_2 = Path(r'./Kraftwerk - Minimum Maximum/Minimum Maximum' + gif.suffix)
B_image_path_3 = Path(r'./Kraftwerk - Minimum Maximum/Kraftwerk' + jpg.suffix)
B_image_path_Xid = 'Do match similar file name to similar parent directory name: '
#
B4_Dir = 'test_ImageSearcher_LikelyCover4' # actual sub-directory
B4_Img = 'album4.jpg' # actual test file in that sub-directory
B4_Img_sz = 0
B4_image_path = resources.joinpath(B4_Dir, 'Covers', B4_Img) # file path test resource .../album4.jpg
B4_image_path_ne = resources.joinpath(B4_Dir, 'cover.jpg') # non-existent file
@pytest.mark.dependency(name='test_res_B2')
def test_B2_resources_exist(self): # XXX: this is unnecessary, just fail
assert self.B2_image_path.exists()
for fp in (self.B_image_path_ne,
self.B_image_path_1,
self.B_image_path_2,
self.B_image_path_3,):
assert not fp.exists()
@pytest.mark.parametrize(
'image_type, image_path, files, test_expect, special_cmp',
(
pytest.param
(
jpg, B2_image_path, (B2_image_path,), None, None,
id='same file exists (Do not match actual file to itself)' + jpg.suffix
),
pytest.param
(
jpg, B_image_path_ne, (B_image_path_ne,), B_image_path_ne, B_cmp_name,
id='same file not exist (Do match non-existent same file)'
),
pytest.param
(
png, B_image_path_1, (B_image_path_1,), B_image_path_1, B_cmp_name,
id=B_image_path_Xid + str(B_image_path_1)
),
pytest.param
(
gif, B_image_path_2, (B_image_path_2,), B_image_path_2, B_cmp_name,
id=B_image_path_Xid + str(B_image_path_2)
),
pytest.param
(
jpg, B_image_path_3, (B_image_path_3,), B_image_path_3, B_cmp_name,
id=B_image_path_Xid + str(B_image_path_3)
),
pytest.param
(
jpg, B4_image_path_ne, (B4_image_path,), B4_image_path, B_cmp_name,
id='image is down one sub-directory'
),
)
)
@pytest.mark.dependency(depends=['init_likelyc', 'test_res_B2'])
def test__match_likely_name(self, image_type, image_path, files, test_expect, special_cmp):
is_ = ImageSearcher_LikelyCover(ArtAlb_empty, image_type, image_path, WrOpts(False, False), True)
mln = is_._match_likely_name(files)
assert test_expect == mln
if special_cmp:
assert special_cmp(mln, test_expect)
B_Artist = Artist('Bob Dylan')
B_Album = Album('Biograph (Disc 1)')
B_ArtAlb = ArtAlb_new(B_Artist, B_Album)
B3_Dir = 'test_ImageSearcher_LikelyCover3' # actual sub-directory
B3_Img1 = 'album1.jpg' # actual test file in that sub-directory
B3_Img2 = 'album2.jpg' # actual test file in that sub-directory
B3_Img_ne = 'album-not-exists-file.jpg' # does not exist
B3_image_path1 = resources.joinpath(B3_Dir, B3_Img1)
B3_image_path1_sz = 0 # set this within a test in case it fails
B3_image_path2 = resources.joinpath(B3_Dir, B3_Img2)
B3_image_path2_sz = 0 # set this within a test in case it fails
B3_image_path_ne = resources.joinpath(B3_Dir, B3_Img_ne)
@pytest.mark.dependency(name='test_res_B3')
def test_B3_resources_exist(self):
assert self.B3_image_path1.exists()
assert self.B3_image_path2.exists()
assert not self.B3_image_path_ne.exists()
# set file sizes once
self.__class__.B3_image_path1_sz = self.B3_image_path1.stat().st_size
self.__class__.B3_image_path2_sz = self.B3_image_path2.stat().st_size
#assert self.__class__.B3_image_path1_sz # might be zero
#assert self.__class__.B3_image_path2_sz # might be zero
@pytest.mark.parametrize(
'image_type, image_path_src, image_path_dst',
(
pytest.param
(
jpg, B3_image_path1, B3_image_path2,
),
pytest.param
(
jpg, B3_image_path1, B3_image_path_ne,
id='happy path - copied'
),
pytest.param
(
jpg, B4_image_path, B4_image_path_ne,
id='happy path - copied'
)
)
)
@pytest.mark.dependency(depends=['init_likelyc', 'test_res_B3'])
def test_search_album_image(self, image_type, image_path_src, image_path_dst):
is_ = ImageSearcher_LikelyCover(ArtAlb_empty, image_type, image_path_dst, WrOpts(False, True), True)
assert is_.search_album_image()
@pytest.mark.parametrize(
'image_type, image_path_src, image_path_dst, overwrite, ' +
'result',
(
pytest.param
(
jpg, B3_image_path1, B3_image_path2, False,
Result.SkipDueToNoOverwrite(ArtAlb_empty, ImageSearcher_LikelyCover, B3_image_path2, WrOpts(False, True)),
id='destination image already exists - overwrite False, returns False'
),
pytest.param
(
jpg, B3_image_path1, B3_image_path2, True,
Result.Copied(ArtAlb_empty, ImageSearcher_LikelyCover, B3_image_path1_sz, B3_image_path1, B3_image_path2, WrOpts(True, True)),
id='destination image already exists - overwrite True, returns True'
),
pytest.param
(
jpg, B3_image_path1, B3_image_path_ne, False,
Result.Copied(ArtAlb_empty, ImageSearcher_LikelyCover, B3_image_path1_sz, B3_image_path1, B3_image_path_ne, WrOpts(False, True)),
id='happy path - copied'
),
pytest.param
(
jpg, B4_image_path, B4_image_path_ne, False,
Result.Copied(ArtAlb_empty, ImageSearcher_LikelyCover, B4_Img_sz, B4_image_path,
B4_image_path_ne, WrOpts(False, True)),
id='happy path - copied'
)
)
)
@pytest.mark.dependency(depends=['init_likelyc', 'test_res_B3'])
def test_write_album_image(self, image_type, image_path_src, image_path_dst, overwrite, result):
is_ = ImageSearcher_LikelyCover(ArtAlb_empty, image_type, image_path_dst, WrOpts(overwrite, True), True)
assert is_.search_album_image()
assert result == is_.write_album_image()
@pytest.mark.dependency(depends=['init_likelyc'])
def test_go(self):
"""basic test of .go()"""
# TODO: cover all code-branches
is_ = ImageSearcher_LikelyCover(self.B_ArtAlb, jpg, self.B3_image_path1, WrOpts(False, True), True)
assert is_.go()
class Test_ImageSearcher_EmbeddedMedia(object):
"""
Test the ImageSearcher_EmbeddedMedia class
"""
E_ArtAlb = ArtAlb_new('my artist', 'my album')
E_imagepath1 = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia1', 'cover.jpg')
E_imagepath2 = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia2', 'cover.jpg')
E_imagepath3jpg = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 JPG', 'cover.jpg')
E_imagepath3mp3 = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 JPG', 'ID3v1 ID3v2 jpg cover.mp3')
E_imagepath3mp3_sz = 100 # magic number: known ahead of time
E_imagepath3png = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 PNG', 'cover.png')
E_imagepath3e_mp3 = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 empty mp3', 'cover.png')
E_imagepath3e_mp4 = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 empty mp4', 'cover.png')
E_imagepath3e_ogg = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 empty ogg', 'cover.png')
E_imagepath3e_flac = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 empty flac', 'cover.png')
E_imagepath3e_wma = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 empty wma', 'cover.png')
E_imagepath3bi = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia3 bad image', 'cover.png')
E_imagepath4 = Path.joinpath(resources, 'test_ImageSearcher_EmbeddedMedia4 PNG multiple', 'cover.png')
# run a pytest assert some of these exist where expected
# @pytest.mark.dependency(name='test_res_E')
# @pytest.mark.parametrize('test_res_path',
# (
# D_res_brg,
# D_res_br1,
# D_res_br2,
# D_res_gil,
# D_res_grgil,
# D_res_sa
# )
# )
# def test_resources_exist(self, test_res_path):
# assert test_res_path.exists()
def _new_imagesearch_embeddedmedia(self, artalb: ArtAlb = ArtAlb_empty) ->\
ImageSearcher_EmbeddedMedia:
"""create a simple instance"""
return ImageSearcher_EmbeddedMedia(artalb, jpg, Path(), WrOpts(False, True), True)
@pytest.mark.parametrize('debug', (True, False))
def test_init(self, debug):
self._new_imagesearch_embeddedmedia(self.E_ArtAlb)
def test_WrongUseError(self):
is_ = self._new_imagesearch_embeddedmedia()
with pytest.raises(ImageSearcher_EmbeddedMedia.WrongUseError):
is_.write_album_image()
@pytest.mark.parametrize(
'image_type, image_path, artalb, test_expect',
(
pytest.param
(
jpg, E_imagepath1, E_ArtAlb, False,
id='empty dir'
),
pytest.param
(
jpg, E_imagepath2, E_ArtAlb, False,
id='normal path - no embedded image'
),
pytest.param
(
jpg, E_imagepath3jpg, E_ArtAlb, True,
id='happy path jpg'
),
pytest.param
(
png, E_imagepath3jpg, E_ArtAlb, True,
id='happy path - embedded image is jpg, image_type is png'
),
pytest.param
(
png, E_imagepath3png, E_ArtAlb, True,
id='happy path png'
),
pytest.param
(
jpg, E_imagepath3png, E_ArtAlb, True,
id='happy path - embedded image is png, image_type is jpg'
),
pytest.param
(
png, E_imagepath3e_mp3, E_ArtAlb, False,
id='zero size mp3 file'
),
pytest.param
(
png, E_imagepath3e_mp4, E_ArtAlb, False,
id='zero size mp4'
),
pytest.param
(
png, E_imagepath3e_flac, E_ArtAlb, False,
id='zero size flac'
),
pytest.param
(
png, E_imagepath3e_ogg, E_ArtAlb, False,
id='zero size ogg file'
),
pytest.param
(
png, E_imagepath3e_wma, E_ArtAlb, False,
id='zero size wma'
),
pytest.param
(
png, E_imagepath3bi, E_ArtAlb, False,
id='mp3 file has zero byte image embedded'
),
pytest.param
(
jpg, E_imagepath4, E_ArtAlb, True,
id='mp3 file has multiple images embedded'
)
)
)
def test_search_album_image(self, image_type, image_path, artalb, test_expect):
is_ = ImageSearcher_EmbeddedMedia(artalb, image_type, image_path, WrOpts(False, True), True)
assert test_expect == is_.search_album_image()
@pytest.mark.parametrize(
'image_type, image_path, artalb, overwrite, ' +
'result',
( # TODO: test against an actual Result class?
pytest.param
(
jpg, E_imagepath3jpg, E_ArtAlb, False,
#Result.SkipDueToNoOverwrite(E_ArtAlb, ImageSearcher_EmbeddedMedia, E_imagepath3jpg, False, True),
False,
id='image already exists - overwrite False, returns False'
),
pytest.param
(
jpg, E_imagepath3jpg, E_ArtAlb, True,
#Result.Extracted(E_ArtAlb, ImageSearcher_EmbeddedMedia, E_imagepath3mp3_sz, E_imagepath3jpg, E_imagepath3jpg, True, True),
True,
id='image already exists - overwrite True, returns True'
)
)
)
def test_write_album_image(self, image_type, image_path, artalb,
overwrite, result):
assert image_path.exists()
is_ = ImageSearcher_EmbeddedMedia(artalb, image_type, image_path, WrOpts(overwrite, True), True)
assert is_.search_album_image()
assert is_.write_album_image()
def test_go(self):
"""basic test of .go()"""
# TODO: cover all code-branches
is_ = ImageSearcher_EmbeddedMedia(ArtAlb_empty, jpg, self.E_imagepath1, WrOpts(False, True), True)
assert None is is_.go()
class Test_ImageSearcher_GoogleCSE(object):
"""
Google CSE is tedious to test live so just use dummy data. Requires
secret values for Key and Search ID. Which then requires adding secret
data to this project.
"""
C_Dir = 'test_ImageSearcher_GoogleCSE1' # actual sub-directory
C_Img = 'album.jpg' # actual test file in that sub-directory
C_fp = resources.joinpath(C_Dir, C_Img)
# create these once with short names
C_gopt = GoogleCSE_Opts('fake+key', 'fake+ID', ImageSize.SML)
C_sz = ImageSize.SML
C_ArtAlb = ArtAlb_new('Bob Dylan', 'Biograph (Disc 1)')
test_res1 = resources.joinpath('googlecse-response1.json')
test_res2 = resources.joinpath('googlecse-response2.json')
test_res3 = resources.joinpath('googlecse-response3-onlygooglecacheimage.json')
@pytest.mark.dependency(name='test_res_C')
@pytest.mark.parametrize('test_res', (test_res1, test_res2, test_res3))
def test_resources_exist(self, test_res):
assert test_res.exists()
@pytest.mark.parametrize('debug', (True, False))
def test_init(self, debug):
gco = GoogleCSE_Opts('fake+key', 'fake+ID', self.C_sz)
ImageSearcher_GoogleCSE(ArtAlb_empty, jpg, Path(), gco, 'referrer!', WrOpts(False, True), debug)
def test_GoogleCSE_Opts_False(self):
gco = GoogleCSE_Opts('', '', self.C_sz)
assert not ImageSearcher_GoogleCSE(ArtAlb_empty, jpg, Path(), gco, 'referrer!', WrOpts(False, True), True)
def _stub_response1(*args, **kwargs):
"""To replace `ImageSearcher_GoogleCSE._search_response_json`"""
return open(str(Test_ImageSearcher_GoogleCSE.test_res1))
def _stub_download_url(*args, **kwargs):
"""To replace `ImageSearcher_GoogleCSE.download_url`"""
return bytes('this is fake image date', encoding='utf8')
@pytest.mark.parametrize('artalb, image_type, result',
(
pytest.param(C_ArtAlb, jpg, True, id=str_ArtAlb(C_ArtAlb)),
pytest.param(ArtAlb_new('A', 'B'), jpg, True, id=str_AA(Artist('A'), Album('B'))),
pytest.param(ArtAlb_new('A', ''), jpg, True, id=str_AA(Artist('A'), Album(''))),
pytest.param(ArtAlb_new('', 'B'), jpg, True, id=str_AA(Artist(''), Album('B'))),
pytest.param(ArtAlb_new('', ''), jpg, False, id=str_AA(Artist(''), Album(''))),
)
)
def test_search_album_image(self, artalb, image_type, result):
# create ImageSearcher_GoogleCSE with stubbed methods
C_isg = ImageSearcher_GoogleCSE(artalb, image_type, Path(), self.C_gopt, 'referrer!', WrOpts(False, True), True)
C_isg._search_response_json = Test_ImageSearcher_GoogleCSE._stub_response1
C_isg.download_url = Test_ImageSearcher_GoogleCSE._stub_download_url
assert C_isg.search_album_image() == result
def _stub_response2(*args, **kwargs):
return open(
str(
resources.joinpath(Test_ImageSearcher_GoogleCSE.test_res3)
)
)
# XXX: presuming only one instance of this test runs at a time
_6_testfile = Path(tempfile.gettempdir(), tempfile.gettempprefix() +
__qualname__)
#
# use a fixture finalizer to remove test file after test runs
#
def _6_rm_testfile(self):
try:
os.remove(self._6_testfile)
except OSError:
pass
@pytest.fixture()
def _6_fixture(self, request):
request.addfinalizer(self._6_rm_testfile)
@pytest.mark.skip(reason="TODO: fix up for newer Google CSE Search")
@pytest.mark.dependency(depends=['net_access'])
@pytest.mark.usefixtures("_6_fixture")
def test_search_album_image__use_altgooglecache(self, _6_fixture):
"""test download from alternate google image cache location
write an actual file (test=False)
"""
is_ = ImageSearcher_GoogleCSE(ArtAlb_new('my artist', 'my album'), jpg, self._6_testfile, self.C_gopt, 'referrer!', WrOpts(False, False), True)
is_.RequestClass = RequestClassNoop
is_._search_response_json = self._stub_response2
# XXX: hopefully the image URL within the test file remains valid!
assert is_.search_album_image()
assert is_.write_album_image(self._6_testfile)
# XXX: hopefully the image never changes! (not ideal)
assert 2000 < os.path.getsize(self._6_testfile) < 2500
# TODO: XXX: need tests for other ImageSearcher_likely functions:
# write_album_image
# TODO: XXX: need tests for other ImageSearcher classes
def test_go(self):
"""basic test of .go()"""
# TODO: cover all code-branches
# create ImageSearcher_GoogleCSE with stubbed methods
C_isg = ImageSearcher_GoogleCSE(self.C_ArtAlb, jpg, self.C_fp, self.C_gopt, 'referrer!', WrOpts(False, True), True)
C_isg._search_response_json = Test_ImageSearcher_GoogleCSE._stub_response1
C_isg.download_url = Test_ImageSearcher_GoogleCSE._stub_download_url
assert C_isg.go()
class Test_ImageSearcher_MusicBrainz(object):
"""
Test the ImageSearcher_MusicBrainz class
"""
D_ArtAlb = ArtAlb_new('Bob Dylan', 'Biograph (Disc 1)')
D_res_brg = resources.joinpath('musicbrainz-response-browse_release_groups.json')
D_res_br1 = resources.joinpath('musicbrainz-response-browse_releases1.json')
D_res_br2 = resources.joinpath('musicbrainz-response-browse_releases2.json')
D_res_gil = resources.joinpath('musicbrainz-response-get_image_list.json')
D_res_grgil = resources.joinpath('musicbrainz-response-get_release_group_image_list.json')
D_res_sa = resources.joinpath('musicbrainz-response-search_artists.json')
@pytest.mark.dependency(name='test_res_C')
@pytest.mark.parametrize('test_res_path',
(
D_res_brg,
D_res_br1,
D_res_br2,
D_res_gil,
D_res_grgil,
D_res_sa
)
)
def test_resources_exist(self, test_res_path):
assert test_res_path.exists()
@pytest.mark.parametrize('debug', (True, False))
def test_init(self, debug):
ImageSearcher_MusicBrainz(ArtAlb_empty, jpg, Path(), WrOpts(False, True), debug)
def test_search_album_image_ArtAlb_empty(self):
ismb = ImageSearcher_MusicBrainz(ArtAlb_empty, jpg, Path(), WrOpts(False, True), True)
assert not ismb.search_album_image()
@pytest.mark.parametrize('search_artists, browse_releases',
(
pytest.param(None, None, id='None'),
pytest.param([], None, id='[]'),
pytest.param({}, None, id='{}'),
pytest.param({'a': 'A'}, None, id='{"a":"A"}'),
pytest.param({'a': 'A', 'b': 'B'}, None, id='{"a":"A",…}'),
pytest.param(
{
'artist-list': [
'foo',
]
},
None,
id='artist-list: "foo"'
),
pytest.param(
{
'artist-list': [
'foo',
'bar',
]
},
None,
id='artist-list: "foo" "bar"'
),
pytest.param(
{
'artist-list': [
{'a': 'A'},
{'b': 'B'},
]
},
None,
id='artist-list: "a:A" "b:B"'
),
pytest.param(
{
'artist-list': [
{'a': 'A',
'id': 'id of a'},
{'b': 'B',
'id': 'id of b'},
]
},
None,
id='artist-list: "a:A" "b:B" with "id"'
),
# TODO: add more test cases that exercise more of the function
# at this point, add values for browse_releases
)
)
def test_search_album_image(self, search_artists, browse_releases):
ismb = ImageSearcher_MusicBrainz(self.D_ArtAlb, jpg, Path(), WrOpts(False, True), True)
def _stub_search_artists(*args, **kwargs):
return search_artists
def _stub_browse_releases(*args, **kwargs):
return browse_releases
ismb._search_artists = _stub_search_artists
ismb._browse_releases = _stub_browse_releases
assert not ismb.search_album_image()
def test_go(self):
"""basic test of .go()"""
# TODO: cover all code-branches
ismb = ImageSearcher_MusicBrainz(self.D_ArtAlb, jpg, Path(), WrOpts(False, True), True)
def _stub_search_artists(*args, **kwargs):
return {}
def _stub_browse_releases(*args, **kwargs):
return {}
ismb._search_artists = _stub_search_artists
ismb._browse_releases = _stub_browse_releases
assert None is ismb.go()
# TODO: test ImageSearcher_MusicBrainz.search_album_image without a stub
# somehow just check it returns some value and does not raise,
# depends on success of test_net_ping, test_net_dns, etc.
# (it will likely choke if either artist or album is blank)
# TODO: test remaining functions of ImageSearcher_MusicBrainz
class Test_complex_funcs(object):
@pytest.mark.parametrize('dirp, image_nt',
(
pytest.param(resources.joinpath('test_process_dir_1_empty'),
'not exist.jpg',
id='test_process_dir_1_empty'),
)
)
def test_process_dir__empty(self, dirp, image_nt):
daa_list = []
sq = queue.SimpleQueue()
daa_list = process_dir(dirp, image_nt, False, sq, daa_list)
assert not daa_list
assert sq.empty()
res2 = resources.joinpath('test_process_dir_2')
res2a1 = res2.joinpath('artist1 - album1')
res2a2 = res2.joinpath('artist2 -- album2')
res3 = resources.joinpath('test_process_dir_3')
res3a1 = res3.joinpath('artist1 - album1')
res3a2a = res3.joinpath('artist2', 'album2a')
res3a2b = res3.joinpath('artist2', 'album2b')
res3a3 = res3.joinpath('artist3 -- 2003 -- album3')
res3a4 = res3.joinpath('artist4 ! -- 2004 -- album4 !')
res4 = resources.joinpath('test_process_dir_4')
res4a1 = res4.joinpath('artist1 - album has cover') # TODO: run test this resource exists
res4a2 = res4.joinpath('artist2 -- 2002 -- album2')
@pytest.mark.parametrize('dirp, image_nt, qsize, daa_list_expect',
(
pytest.param
(
res2, 'cover.jpg', 0, [
(res2a1, ArtAlb_new('artist1', 'album1')),
(res2a2, ArtAlb_new('artist2', 'album2')),
],
id=res2.name
),
pytest.param
(
res3, 'cover.jpg', 0, [
(res3a1, ArtAlb_new('artist1', 'album1')),
# unable to parse path structure artist/album/song.mp3 so these Artist Album are empty
(res3a2a, ArtAlb_new('', '')),
(res3a2b, ArtAlb_new('', '')),
(res3a3, ArtAlb_new('artist3', 'album3')),
(res3a4, ArtAlb_new('artist4 !', 'album4 !')),
],
id=res3.name
),
pytest.param
(
res4, 'cover.jpg', 1, [
# should not include res4a1
(res4a2, ArtAlb_new('artist2', 'album2')),
],
id=res4.name
),
)
)
def test_process_dir(self, dirp: Path, image_nt: str, qsize, daa_list_expect):
sq = queue.SimpleQueue()
assert dirp.is_dir()
daa_list = process_dir(dirp, image_nt, False, sq, [])
assert daa_list == daa_list_expect
assert qsize == sq.qsize()
# TODO: add testing of process_dir that exercises more code
# need to add test "album" directories
#def test_process_dirs(self):
# return True
@pytest.mark.parametrize('args',
(
pytest.param([], id='(empty)'),
pytest.param(['--help'], id='--help'),
pytest.param(['.'], id='no search methods selected'),
pytest.param(['-sg', '.'], id='Google missing gkey gid'),
pytest.param(['-sg', '--sgkey', 'foobar', '.'], id='Google missing gid'),
pytest.param(['-sg', '--sgid ', 'foobar', '.'], id='Google missing gkey'),
)
)
def test_parse_args_raises_SystemExit(self, args):
with pytest.raises(SystemExit):
parse_args_opts(args=args)
# These tests do not need to be elaborate. Enough confidence can be had of
# the argparse.ArgumentParser setup via code inspection; not worth the time
# trade-off. These tests are to increase code coverage score.
argtest1 = ['-se', '.']
argtest2 = ['-se', '--test', '.', '..']
argtest3 = ['-sg', '--sgid', 'my id', '--sgkey', 'my key', '.']
argtest4 = ['-sg', '--sgid', 'my id', '--sgkey', 'my key', '.', '.']
argtest5 = ['.', '-sg', '--sgid', 'my id', '--sgkey', 'my key', '.', '.']
@pytest.mark.parametrize('args',
(
pytest.param(argtest1, id=str(argtest1)),
pytest.param(argtest2, id=str(argtest2)),
pytest.param(argtest3, id=str(argtest3)),
pytest.param(argtest4, id=str(argtest4)),
pytest.param(argtest5, id=str(argtest5)),
)
)
def test_parse_args(self, args):
assert parse_args_opts(args=args)
@pytest.mark.parametrize('args, ret_expect',
(
pytest.param(['-s-', '.'],
(['.'], None, None, (True, True, True, False, False), None, None, None, logging.WARNING),
id='-s- .'),
pytest.param(['.', '-se', '.', '..'],
(['.', '.', '..'], None, None, (False, True, False, False, False), None, None, None, logging.WARNING),
id='. -se . ..'),
pytest.param(['-s*', '.', '--sgkey', 'my key', '--sgid', 'my id'],
(['.'], None, None, (True, True, True, True, True), None, None, None, logging.WARNING),
id='-s* . …'),
)
)
def test_parse_args_more(self, args, ret_expect):
"""only compare expected return values that are not None"""
ret = parse_args_opts(args=args)
for i in range(len(ret_expect)):
if ret_expect[i] is None:
continue
assert ret[i] == ret_expect[i]
class Test_media(object):
@pytest.mark.parametrize('ti_fname, ti_ar, ti_al',
(
# mp3
pytest.param('ID3v1 _.mp3', '', '', id='mp3 ID3v1 "" ""'),
pytest.param('ID3v1 artist album.mp3', 'my artist', 'my album', id='mp3 ID3v1 "my artist" "my album"'),
pytest.param('ID3v1 artist.mp3', 'my artist', '', id='mp3 ID3v1 "my artist" ""'),
pytest.param('ID3v1 ID3v2 artist album.mp3', 'my artist', 'my album', id='mp3 ID3v1 ID3v2 "my artist" "my album"'),
pytest.param('ID3v2 artist album.mp3', 'my artist', 'my album', id='mp3 ID3v2 "my artist" "my album"'),
pytest.param('ID3v1 albumartist album.mp3', 'my albumartist', 'my album', id='mp3 ID3v1 "my artist" "my album"'),
pytest.param('_.mp3', '', '', id='mp3 no ID'),
# m4a
pytest.param('_.m4a', '', '', id='m4a "" ""'),
pytest.param('artist.m4a', 'my artist', '', id='m4a "my artist" ""'),
pytest.param('album.m4a', '', 'my album', id='m4a "" "my album"'),
pytest.param('artist album.m4a', 'my artist', 'my album', id='m4a "my artist" "my album"'),
# ogg
pytest.param('_.ogg', '', '', id='ogg "" ""'),
pytest.param('artist.ogg', 'my artist', '', id='ogg "my artist" ""'),
pytest.param('album.ogg', '', 'my album', id='ogg "" "my album"'),
pytest.param('artist album.ogg', 'my artist', 'my album', id='ogg "my artist" "my album"'),
# wma
pytest.param('_.wma', '', '', id='wma "" ""'),
pytest.param('author.wma', 'my artist', '', id='wma "my artist" ""'),
pytest.param('WM-AlbumTitle.wma', '', 'my album', id='wma "" "my album"'),
pytest.param('author WM-AlbumTitle.wma', 'my artist', 'my album', id='wma "my artist" "my album"'),
# flac
pytest.param('_.flac', '', '', id='flac "" ""'),
pytest.param('ARTIST.flac', 'my artist', '', id='flac "my artist" ""'),
pytest.param('ALBUM.flac', '', 'my album', id='flac "" "my album"'),
pytest.param('ARTIST ALBUM.flac', 'my artist', 'my album', id='flac "my artist" "my album"'),
#pytest.param('', '', '', id='"" ""'),
)
)
def test_parse_media_file(self, ti_fname, ti_ar, ti_al):
fp = exists_or_skip(ti_fname)
ar, al = get_artist_album[fp.suffix](fp)
assert ar == ti_ar
assert al == ti_al
def test_ogg_as_mp3_fail(self):
fp = exists_or_skip('_.ogg')
assert ArtAlb_empty == get_artist_album_mp3(fp)
def test_ogg_as_wma_fail(self):
fp = exists_or_skip('_.ogg')
assert ArtAlb_empty == get_artist_album_asf(fp)
def test_ogg_as_flac_fail(self):
fp = exists_or_skip('_.ogg')
assert ArtAlb_empty == get_artist_album_flac(fp)
def test_bad_file_suffix(self):
with pytest.raises(KeyError):
_ = get_artist_album['foo.bad']
| 37.569118 | 151 | 0.55621 | 5,729 | 51,094 | 4.738349 | 0.111014 | 0.057541 | 0.017535 | 0.019487 | 0.498379 | 0.400243 | 0.328557 | 0.261843 | 0.235136 | 0.195314 | 0 | 0.018859 | 0.320233 | 51,094 | 1,359 | 152 | 37.596762 | 0.762035 | 0.100638 | 0 | 0.27406 | 0 | 0 | 0.166572 | 0.029467 | 0.000917 | 0 | 0 | 0.002943 | 0.056829 | 1 | 0.073327 | false | 0.003666 | 0.010082 | 0.004583 | 0.179652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d44fefde3f6349cad5b1889e79b5643cbf2434d | 7,740 | py | Python | zmqpy/tests/pyzmq_tests/test_poller.py | felipecruz/zmqpy | 91b55bf631c3b96e6f71fc3b26a3e435ae6289df | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2015-02-13T05:17:45.000Z | 2017-12-19T17:16:35.000Z | zmqpy/tests/pyzmq_tests/test_poller.py | felipecruz/zmqpy | 91b55bf631c3b96e6f71fc3b26a3e435ae6289df | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | zmqpy/tests/pyzmq_tests/test_poller.py | felipecruz/zmqpy | 91b55bf631c3b96e6f71fc3b26a3e435ae6289df | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2017-06-15T09:02:21.000Z | 2017-12-19T17:16:07.000Z | #
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
import unittest
import zmqpy
from zmqpy.utils.strtypes import asbytes
from zmqpy import Poller
from .__init__ import BaseZMQTestCase
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class PollerTest(BaseZMQTestCase):
def test_poller_init(self):
poller = Poller()
assert poller
def test_poller_register(self):
poller = Poller()
socket1, socket2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
poller.register(socket1)
assert poller.sockets[socket1] == zmqpy.POLLIN | zmqpy.POLLOUT
def test_poller_register_no_flags(self):
poller = Poller()
socket1, socket2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
poller.register(socket1)
#register with no flags unregister the socket
poller.register(socket1, flags=None)
assert poller.sockets == {}
def test_poller_unregister(self):
poller = Poller()
socket1, socket2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
poller.register(socket1)
#register with no flags unregister the socket
poller.unregister(socket1)
assert poller.sockets == {}
def test_poller_modify(self):
poller = Poller()
socket1, socket2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
poller.register(socket1)
#register with no flags unregister the socket
poller.modify(socket1, flags=zmqpy.POLLOUT)
assert poller.sockets[socket1] == zmqpy.POLLOUT
def wait():
time.sleep(.25)
class TestPoll(BaseZMQTestCase):
Poller = zmqpy.Poller
# This test is failing due to this issue:
# http://github.com/sustrik/zeromq2/issues#issue/26
def test_pair(self):
s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
# Sleep to allow sockets to connect.
wait()
poller = self.Poller()
poller.register(s1, zmqpy.POLLIN|zmqpy.POLLOUT)
poller.register(s2, zmqpy.POLLIN|zmqpy.POLLOUT)
# Poll result should contain both sockets
socks = dict(poller.poll())
# Now make sure that both are send ready.
self.assertEquals(socks[s1], zmqpy.POLLOUT)
self.assertEquals(socks[s2], zmqpy.POLLOUT)
# Now do a send on both, wait and test for zmqpy.POLLOUT|zmqpy.POLLIN
s1.send(b'msg1')
s2.send(b'msg2')
wait()
socks = dict(poller.poll())
self.assertEquals(socks[s1], zmqpy.POLLOUT|zmqpy.POLLIN)
self.assertEquals(socks[s2], zmqpy.POLLOUT|zmqpy.POLLIN)
# Make sure that both are in POLLOUT after recv.
s1.recv()
s2.recv()
socks = dict(poller.poll())
self.assertEquals(socks[s1], zmqpy.POLLOUT)
self.assertEquals(socks[s2], zmqpy.POLLOUT)
poller.unregister(s1)
poller.unregister(s2)
# Wait for everything to finish.
wait()
def test_reqrep(self):
s1, s2 = self.create_bound_pair(zmqpy.REP, zmqpy.REQ)
# Sleep to allow sockets to connect.
wait()
poller = self.Poller()
poller.register(s1, zmqpy.POLLIN|zmqpy.POLLOUT)
poller.register(s2, zmqpy.POLLIN|zmqpy.POLLOUT)
# Make sure that s1 is in state 0 and s2 is in POLLOUT
socks = dict(poller.poll())
self.assertEquals(s1 in socks, 0)
self.assertEquals(socks[s2], zmqpy.POLLOUT)
# Make sure that s2 goes immediately into state 0 after send.
s2.send(b'msg1')
socks = dict(poller.poll())
self.assertEquals(s2 in socks, 0)
# Make sure that s1 goes into POLLIN state after a time.sleep().
time.sleep(0.5)
socks = dict(poller.poll())
self.assertEquals(socks[s1], zmqpy.POLLIN)
# Make sure that s1 goes into POLLOUT after recv.
s1.recv()
socks = dict(poller.poll())
self.assertEquals(socks[s1], zmqpy.POLLOUT)
# Make sure s1 goes into state 0 after send.
s1.send(b'msg2')
socks = dict(poller.poll())
self.assertEquals(s1 in socks, 0)
# Wait and then see that s2 is in POLLIN.
time.sleep(0.5)
socks = dict(poller.poll())
self.assertEquals(socks[s2], zmqpy.POLLIN)
# Make sure that s2 is in POLLOUT after recv.
s2.recv()
socks = dict(poller.poll())
self.assertEquals(socks[s2], zmqpy.POLLOUT)
poller.unregister(s1)
poller.unregister(s2)
# Wait for everything to finish.
wait()
def test_no_events(self):
s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
poller = self.Poller()
poller.register(s1, zmqpy.POLLIN|zmqpy.POLLOUT)
poller.register(s2, 0)
self.assertTrue(s1 in poller.sockets)
self.assertFalse(s2 in poller.sockets)
poller.register(s1, 0)
self.assertFalse(s1 in poller.sockets)
def test_pubsub(self):
s1, s2 = self.create_bound_pair(zmqpy.PUB, zmqpy.SUB)
s2.setsockopt(zmqpy.SUBSCRIBE, b'')
# Sleep to allow sockets to connect.
wait()
poller = self.Poller()
poller.register(s1, zmqpy.POLLIN|zmqpy.POLLOUT)
poller.register(s2, zmqpy.POLLIN)
# Now make sure that both are send ready.
socks = dict(poller.poll())
self.assertEquals(socks[s1], zmqpy.POLLOUT)
self.assertEquals(s2 in socks, 0)
# Make sure that s1 stays in POLLOUT after a send.
s1.send(b'msg1')
socks = dict(poller.poll())
self.assertEquals(socks[s1], zmqpy.POLLOUT)
# Make sure that s2 is POLLIN after waiting.
wait()
socks = dict(poller.poll())
self.assertEquals(socks[s2], zmqpy.POLLIN)
# Make sure that s2 goes into 0 after recv.
s2.recv()
socks = dict(poller.poll())
self.assertEquals(s2 in socks, 0)
poller.unregister(s1)
poller.unregister(s2)
# Wait for everything to finish.
wait()
def test_timeout(self):
"""make sure Poller.poll timeout has the right units (milliseconds)."""
s1, s2 = self.create_bound_pair(zmqpy.PAIR, zmqpy.PAIR)
poller = self.Poller()
poller.register(s1, zmqpy.POLLIN)
tic = time.time()
evt = poller.poll(timeout=.005)
toc = time.time()
self.assertTrue(toc-tic < 0.1)
tic = time.time()
evt = poller.poll(timeout=5)
toc = time.time()
self.assertTrue(toc-tic < 0.1)
self.assertTrue(toc-tic > .001)
tic = time.time()
evt = poller.poll(timeout=500)
toc = time.time()
self.assertTrue(toc-tic < 1)
self.assertTrue(toc-tic > 0.1)
| 32.658228 | 79 | 0.603488 | 970 | 7,740 | 4.774227 | 0.185567 | 0.054416 | 0.0393 | 0.057439 | 0.669834 | 0.622976 | 0.571583 | 0.530123 | 0.501188 | 0.481537 | 0 | 0.023337 | 0.25814 | 7,740 | 236 | 80 | 32.79661 | 0.783177 | 0.289922 | 0 | 0.647482 | 0 | 0 | 0.003676 | 0 | 0 | 0 | 0 | 0 | 0.230216 | 1 | 0.079137 | false | 0 | 0.043165 | 0 | 0.143885 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4561c1593e781ac498d60ae3cfa527c1997b58 | 1,654 | py | Python | chiscore/_data/_files.py | limix/skat | 64884196a15a1ae3b576a37c86bdb8f32335d4f4 | [
"MIT"
] | 3 | 2019-10-03T15:58:20.000Z | 2021-11-02T16:46:36.000Z | chiscore/_data/_files.py | limix/skat | 64884196a15a1ae3b576a37c86bdb8f32335d4f4 | [
"MIT"
] | 1 | 2019-01-25T14:49:37.000Z | 2019-05-13T16:45:32.000Z | chiscore/_data/_files.py | limix/skat | 64884196a15a1ae3b576a37c86bdb8f32335d4f4 | [
"MIT"
] | null | null | null | import shutil
import tempfile
import warnings
from os.path import dirname, join, realpath
_filenames = [
"davies_pvalue.npz",
"optimal_davies_pvalue.npz",
"danilo_nan.npz",
"bound.npz",
"inf.npz",
]
class data_file(object):
def __init__(self, filenames):
global _filenames
self._unlist = False
if not isinstance(filenames, (tuple, list)):
filenames = [filenames]
self._unlist = True
for fn in filenames:
if fn not in _filenames:
raise ValueError(
"Unrecognized file name {}. Choose one of these: {}".format(
fn, _filenames
)
)
self._dirpath = tempfile.mkdtemp()
self._filenames = filenames
def __enter__(self):
import pkg_resources
filepaths = [join(self._dirpath, fn) for fn in self._filenames]
for fn, fp in zip(self._filenames, filepaths):
if __name__ == "__main__":
shutil.copy(join(dirname(realpath(__file__)), fn), fp)
else:
resource_path = "_data/{}".format(fn)
content = pkg_resources.resource_string(
__name__.split(".")[0], resource_path
)
with open(fp, "wb") as f:
f.write(content)
if self._unlist:
return filepaths[0]
return filepaths
def __exit__(self, *_):
try:
shutil.rmtree(self._dirpath)
except PermissionError as e:
warnings.warn(str(e) + "\n. I will ignore it and proceed.")
| 27.114754 | 80 | 0.541717 | 171 | 1,654 | 4.929825 | 0.497076 | 0.061684 | 0.035587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001892 | 0.360943 | 1,654 | 60 | 81 | 27.566667 | 0.795648 | 0 | 0 | 0 | 0 | 0 | 0.1052 | 0.015115 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.104167 | 0 | 0.229167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d49b3c282c0d5aaafc4cee1e7dc907315c8b1b1 | 4,329 | py | Python | app/tests/test_eventactions.py | twatchy/cito_engine | a62dce3c76567dd36b7efcaa70e03728b335f44e | [
"Apache-2.0"
] | null | null | null | app/tests/test_eventactions.py | twatchy/cito_engine | a62dce3c76567dd36b7efcaa70e03728b335f44e | [
"Apache-2.0"
] | null | null | null | app/tests/test_eventactions.py | twatchy/cito_engine | a62dce3c76567dd36b7efcaa70e03728b335f44e | [
"Apache-2.0"
] | null | null | null | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from time import time
from mock import patch, call
from django.test import TestCase
from cito_engine.models import Incident, IncidentLog, EventActionCounter
from cito_engine.poller.event_poller import EventPoller
from . import factories
class TestEventActions(TestCase):
"""
X = 2, Y=100
Case 1
* One incident in T secs
* 2nd at T+10, 3rd at T+11, 4th at T+51
* Assert we have 1 single incident, 4 logs and event action executed once
* 5th incident occurs at T+101
* Assert counters are reset
* 6th incident occurs at T+151
* Assert event action is executed for the second time
"""
def setUp(self):
self.event = factories.EventFactory.create()
self.eventaction = factories.EventActionFactory.create(event=self.event,threshold_count=2, threshold_timer=100)
@patch('cito_engine.actions.incidents.requests')
def test__single_event_action_execution(self, mock_requests):
T = int(time())
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (self.event.id, T)
eventpoller = EventPoller()
self.assertTrue(eventpoller.parse_message(raw_incident))
incident = Incident.objects.filter()[0]
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# 2nd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T+10)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#3rd incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 11)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# 4th incident
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 51)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
#We should have one incident and 4 incident logs
self.assertEqual(Incident.objects.count(), 1)
self.assertEqual(IncidentLog.objects.count(), 4)
# Assert we only execute plugin once
self.assertEqual(mock_requests.post.call_count, 1)
# 5th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 101)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertFalse(eacounter.is_triggered)
# Assert we did not execute plugin yet
self.assertEqual(mock_requests.post.call_count, 1)
# 6th incident after time window
raw_incident = '{ "event": {"eventid":"%s", "element":"foo", "message":"omgwtfbbq"}, "timestamp": %d}' % (
self.event.id, T + 121)
self.assertTrue(eventpoller.parse_message(raw_incident))
eacounter = EventActionCounter.objects.get(incident=incident)
self.assertTrue(eacounter.is_triggered)
# Assert event action occurred for the second time
self.assertEqual(mock_requests.post.call_count, 2)
#todo create tests to check use cases mentioned in the comments | 42.441176 | 131 | 0.682144 | 530 | 4,329 | 5.492453 | 0.315094 | 0.045345 | 0.032978 | 0.047406 | 0.493301 | 0.493301 | 0.493301 | 0.463071 | 0.434902 | 0.434902 | 0 | 0.017417 | 0.204204 | 4,329 | 102 | 132 | 42.441176 | 0.827576 | 0.275583 | 0 | 0.510204 | 0 | 0.122449 | 0.178327 | 0.057273 | 0 | 0 | 0 | 0.009804 | 0.346939 | 1 | 0.040816 | false | 0 | 0.122449 | 0 | 0.183673 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4c948f5a6b82f9e20993ba76cb0021d715d245 | 8,016 | py | Python | code.py | dnkorte/skating_stopwatch | 6cde91471782298a60d2a43f52470730bb94a3ae | [
"MIT"
] | null | null | null | code.py | dnkorte/skating_stopwatch | 6cde91471782298a60d2a43f52470730bb94a3ae | [
"MIT"
] | null | null | null | code.py | dnkorte/skating_stopwatch | 6cde91471782298a60d2a43f52470730bb94a3ae | [
"MIT"
] | null | null | null | """
# PyPortal referee stopwatch for figure skating competitions
# Author(s): Don Korte
# Module: code.py is mainline initialization and master loop
#
# github: https://github.com/dnkorte/skating_stopwatch.git
#
# MIT License
#
# Copyright (c) 2019 Don Korte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# series 7 moves buttons into display_xxx modules
# series 6 adds second screen (for tod set) using separate classes for each screen
# moved initialization for screen textboxes into display classes
# series 5 this incorporates barry enhancements (interruption timer)
# 20190816 has piezo beeper instead of audio .wav (controlled from mainline)
#
"""
import time
from collections import namedtuple
import board
from digitalio import DigitalInOut, Direction, Pull
import displayio
from adafruit_display_text.label import Label
from adafruit_bitmap_font import bitmap_font
import terminalio # added by dnk per https://learn.adafruit.com/circuitpython-display-support-using-displayio?view=all
from adafruit_display_shapes.rect import Rect
from adafruit_button import Button
import adafruit_touchscreen
from analogio import AnalogIn
from display_main import Display_Main
from display_todset import Display_Todset
from skating_info import Skating_Info
from controller import Controller
from beeper import Beep_Manager
from real_time_clock import RealTimeClock
import myconstants
import battery_checker
# initial splash screen just so it doesn't look dead for so long while it loads fonts
# cwd = ("/"+__file__).rsplit('/', 1)[0] # the current working directory (where this file is)
# startup_background = cwd+"/pyportal_splash.bmp"
splash = displayio.Group()
board.DISPLAY.show(splash)
f = open("boot_splash_stopwatch.bmp", "rb")
background = displayio.OnDiskBitmap(f)
face = displayio.TileGrid(background, pixel_shader=displayio.ColorConverter(), x=0, y=0)
splash.append(face)
board.DISPLAY.wait_for_frame()
Coords = namedtuple("Point", "x y")
ts = adafruit_touchscreen.Touchscreen(board.TOUCH_XL, board.TOUCH_XR,
board.TOUCH_YD, board.TOUCH_YU,
calibration=((5200, 59000), (5800, 57000)),
size=(320, 240))
# Load the font
font = bitmap_font.load_font("/fonts/Arial-12.bdf")
fontBig = bitmap_font.load_font("/fonts/Roboto-Bold-75.bdf")
# fontBig = bitmap_font.load_font("/fonts/RobotoMono-Bold-78.bdf")
# now preload the fonts so they display more quickly the first time
glyphs = b'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-,.: '
font.load_glyphs(glyphs)
fontBig.load_glyphs(glyphs)
# ======================== Make the main display context (watch) ========================
# Make a background color fill
color_bitmap = displayio.Bitmap(320, 240, 1)
color_palette = displayio.Palette(1)
color_palette[0] = myconstants.BLACK
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
watch_group = displayio.Group(max_size=35)
watch_group.append(bg_sprite)
# ============ create secondary screen for TOD Clock Setting (not initially shown) ==============
todset_group = displayio.Group(max_size=35)
bg_sprite_tod = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
todset_group.append(bg_sprite_tod)
# =========================== setup the classes for item management ========================
display_main = Display_Main(watch_group, font, fontBig)
display_todset = Display_Todset(todset_group, font, fontBig)
beep_manager = Beep_Manager()
rtc_manager = RealTimeClock()
skating_info = Skating_Info(display_main, beep_manager, rtc_manager)
controller = Controller(display_main, display_todset, skating_info, beep_manager, rtc_manager)
controller.set_current_screen("watch")
display_main.set_text_tod(rtc_manager.get_formatted_tod())
cur_button_label = "" # will hold "label" (the display text) of most recently clicked button
cur_button_id = None # will hold id of most recently clicked button
screensaver_timer = 0 # counts how long before screen dims if no touches
batt_counter = 0 # counts holw long between battery updates
tod_timer = 0 # update time of day display only every 60 sec
watch_timer = 0 # update big (main) timer DISPLAY every 0.5 sec to reduce lagtime at startup
while True:
point = ts.touch_point
# if the screen is currently being touched (probably a button being pressed)
if point is not None:
screensaver_timer = 0 # register the touch for screensaver countdown
if controller.get_current_screen() == "watch":
cur_button_id = display_main.see_if_any_button_clicked(point)
if cur_button_id != None:
cur_button_label = display_main.get_button_label(cur_button_id)
elif controller.get_current_screen() == "todset":
cur_button_id = display_todset.see_if_any_button_clicked(point)
if cur_button_id != None:
cur_button_label = display_todset.get_button_label(cur_button_id)
# here, no button is pressed, so we check to see if a button was recently pressed/released
# but has not been processed yet. if an unprocessed command is pending, then deselect
# the button and then process the command, then indicate that it has been processed
elif cur_button_id != None:
cur_button_id.selected = False
if controller.get_current_screen() == "watch":
controller.process_command_watch(cur_button_label)
elif controller.get_current_screen() == "todset":
controller.process_command_todset(cur_button_label)
cur_button_label = ""
cur_button_id = None
watch_timer += 1
if watch_timer >= 2:
if controller.get_current_screen() == "watch":
skating_info.display_time() # only in watch mode
skating_info.display_notes_panel() # onoy in watch mode
watch_timer = 0
tod_timer = tod_timer + 1
if tod_timer >= 600:
if controller.get_current_screen() == "watch":
display_main.set_text_tod(rtc_manager.get_formatted_tod())
tod_timer = 0
screensaver_timer = screensaver_timer + 1
if screensaver_timer > 6000:
board.DISPLAY.brightness = 0.02
elif screensaver_timer > 5900:
board.DISPLAY.brightness = 0.1
else:
board.DISPLAY.brightness = 1
batt_counter = batt_counter + 1
# if batt_counter > 9: # update battery voltage once per second for testing...
if batt_counter > 600: # update battery voltage every 1 minute for real
batt_counter = 0
raw_volts = battery_checker.get_voltage()
batt_percent = battery_checker.get_battery_pct()
# display_main.set_text_wnb3("Vbat:"+str(raw_volts)+" PCT:"+str(batt_percent))
display_main.show_battery_status(batt_percent)
beep_manager.process_beep()
time.sleep(0.1) | 45.288136 | 120 | 0.721806 | 1,091 | 8,016 | 5.112741 | 0.342805 | 0.025816 | 0.01972 | 0.027967 | 0.166009 | 0.134098 | 0.077447 | 0.065615 | 0.065615 | 0.065615 | 0 | 0.018185 | 0.190494 | 8,016 | 177 | 121 | 45.288136 | 0.841424 | 0.424651 | 0 | 0.207547 | 0 | 0 | 0.039983 | 0.025344 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.188679 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4ca2293273a6038abc8c6538534335292f79f2 | 2,746 | py | Python | examples/h2co_mm_example_despotic.py | glangsto/pyspeckit | 346b24fb828d1d33c7891cdde7609723e51af34c | [
"MIT"
] | 79 | 2015-03-03T15:06:20.000Z | 2022-03-27T21:29:47.000Z | examples/h2co_mm_example_despotic.py | glangsto/pyspeckit | 346b24fb828d1d33c7891cdde7609723e51af34c | [
"MIT"
] | 240 | 2015-01-04T02:59:12.000Z | 2021-11-13T15:11:14.000Z | examples/h2co_mm_example_despotic.py | glangsto/pyspeckit | 346b24fb828d1d33c7891cdde7609723e51af34c | [
"MIT"
] | 68 | 2015-03-02T12:23:12.000Z | 2022-02-28T10:26:36.000Z | import pyspeckit as psk
from pyspeckit.spectrum import models
from astropy.table import Table
from spectral_cube import SpectralCube
import numpy as np
import matplotlib.pyplot as plt
import despotic
import pyspeckit.spectrum.readers.read_class
import os
import shutil
if not os.path.exists('ph2cogrid.fits'):
if not os.path.exists('protostellarCore.desp'):
despotic_install_path = (os.path.split(despotic.__file__))[0]
shutil.copy(despotic_install_path+'/cloudfiles/protostellarCore.desp',os.getcwd())
models.formaldehyde_mm.build_despotic_grids(gridfile='ph2cogrid.fits', DvUpper=10)
t = Table.read('ph2cogrid.fits')
# This returns interpolating functions that take physical parameters
# and returns values for Tex, Tau for the three mm transitions.
f1, f2, f3 = models.formaldehyde_mm.formaldehyde_mm_despotic_functions(t)
# Instantiate that fitter!
formaldehyde_fitter=models.model.SpectralModel(models.formaldehyde_mm.formaldehyde_mm_despotic,
5, parnames=['temperature', 'column', 'density',
'center', 'width'],
parvalues=[50,12,5.0,0,2],
parlimited=[(True, True), (True, True),
(True, True), (False, False),
(True, False)],
parlimits=[(5,205), (10,17),
(2,7), (0,0), (0,0)],
parsteps=[0.01, 0.01, 0.1, 0, 0],
fitunits='Hz',
h2co_303_202=f1, # interpolation of (Tex, tau)
h2co_322_221=f2,
h2co_321_220=f3,
shortvarnames=("T", "N", "n", "v", "\\sigma"))
sp = pyspeckit.readers.read_class.class_to_spectra('example_h2co_mm_spectrum.apex')
sp.data *= 1/0.75 # T_A* -> T_MB
sp.unit = "$T_{MB}$"
# estimate the error from the data
# sp.error[:] = sp.stats((2.183e2,2.184e2))['std']
sp.Registry.add_fitter('formaldehyde_mm_despotic', formaldehyde_fitter, 5)
#plot fit for all 3 ('both')
sp.plotter(figure=1)
sp.specfit(fittype='formaldehyde_mm_despotic',
guesses=[95, 14.5, 4, 0.0, 4.0],
limits=[(10,300), (11,15), (2,7), (-20,150), (1, 10)],
limited=[(True, True)]*5,
fixed=[False, False, True, False, False])
sp.plotter.savefig('test_fitting_figure_01.png')
| 47.344828 | 95 | 0.531682 | 307 | 2,746 | 4.609121 | 0.482085 | 0.069258 | 0.062191 | 0.033922 | 0.100353 | 0.059364 | 0 | 0 | 0 | 0 | 0 | 0.064913 | 0.349235 | 2,746 | 57 | 96 | 48.175439 | 0.726917 | 0.110342 | 0 | 0 | 0 | 0 | 0.104766 | 0.064503 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.232558 | 0 | 0.232558 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4cf207716d10d789ee993c688b562c50706913 | 730 | py | Python | flask_app/app.py | DmytroKomisar/DevOpsTestTask | 41391a3566da7bc619251bac73578e2c33ba6b10 | [
"Apache-2.0",
"MIT"
] | null | null | null | flask_app/app.py | DmytroKomisar/DevOpsTestTask | 41391a3566da7bc619251bac73578e2c33ba6b10 | [
"Apache-2.0",
"MIT"
] | null | null | null | flask_app/app.py | DmytroKomisar/DevOpsTestTask | 41391a3566da7bc619251bac73578e2c33ba6b10 | [
"Apache-2.0",
"MIT"
] | null | null | null | from flask import Flask, request, abort, jsonify
app = Flask(__name__)
@app.route('/', methods=['POST'])
def upload():
if not request.is_json:
abort(400)
content = request.get_json()
security_groups = set()
for module in content['modules']:
resources = module['resources']
sgs = [x for x in resources.values() if 'type' in x and x['type'] == 'aws_security_group']
security_groups.update(x['primary']['id'] for x in sgs)
return jsonify(list(security_groups))
@app.route('/', methods=['GET'])
def resp():
return 'Use curl -X POST -H "Content-Type: application/json" -d @terraform.tfstate http://app.local/ \n'
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
| 26.071429 | 108 | 0.646575 | 105 | 730 | 4.314286 | 0.542857 | 0.092715 | 0.066225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011745 | 0.183562 | 730 | 27 | 109 | 27.037037 | 0.748322 | 0 | 0 | 0 | 0 | 0.055556 | 0.232877 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0.055556 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4d30b6e6303645af1e2596d61513feb8c14510 | 7,537 | py | Python | neurosity/neurosity.py | neurosity/neurosity-python-sdk | 1c8b6412c80711a23a4d677b00e00a0972da6278 | [
"MIT"
] | 3 | 2022-03-16T21:02:44.000Z | 2022-03-24T20:22:21.000Z | neurosity/neurosity.py | neurosity/neurosity-python-sdk | 1c8b6412c80711a23a4d677b00e00a0972da6278 | [
"MIT"
] | 1 | 2022-03-18T15:42:35.000Z | 2022-03-18T15:42:35.000Z | neurosity/neurosity.py | neurosity/neurosity-python-sdk | 1c8b6412c80711a23a4d677b00e00a0972da6278 | [
"MIT"
] | null | null | null | import pyrebase
import atexit
from neurosity.config import PyRebase
class neurosity_sdk:
def __init__(self, options):
if ("device_id" not in options):
raise ValueError("Neurosity SDK: A device ID is required to use the SDK")
options.setdefault("environment", "production")
self.options = options
pyrebase_config = PyRebase.STAGING if options["environment"] == "staging" else PyRebase.PRODUCTION
self.firebase = pyrebase.initialize_app(pyrebase_config)
self.auth = self.firebase.auth()
self.db = self.firebase.database()
self.subscription_ids = []
atexit.register(self.exit_handler)
def exit_handler(self):
self.remove_client()
self.remove_all_subscriptions()
def get_server_timestamp(self):
return {".sv": "timestamp"}
def login(self, credentials):
if (hasattr(self, "user") and hasattr(self, "token")):
print("Neurosity SDK: The SDK is already authenticated.")
return
self.user = self.auth.sign_in_with_email_and_password(
credentials["email"], credentials["password"])
self.token = self.user['idToken']
if (not hasattr(self, "client_id")):
self.add_client()
def add_client(self):
device_id = self.options["device_id"]
clients_path = f"devices/{device_id}/clients"
timestamp = self.get_server_timestamp()
push_result = self.db.child(clients_path).push(timestamp, self.token)
self.client_id = push_result["name"]
def remove_client(self):
client_id = self.client_id
if(client_id):
device_id = self.options["device_id"]
client_path = f"devices/{device_id}/clients/{client_id}"
self.db.child(client_path).remove(self.token)
# @TODO: handle resnponse
def add_action(self, action):
if ("command" not in action):
raise ValueError("A command is required for actions")
if ("action" not in action):
raise ValueError("An action is required for actions")
device_id = self.options["device_id"]
actions_path = f"devices/{device_id}/actions"
action.setdefault("responseRequired", False)
action.setdefault("responseTimeout", None)
push_result = self.db.child(actions_path).push(action, self.token)
return push_result
def add_subscription(self, metric, label, atomic):
client_id = self.client_id
device_id = self.options["device_id"]
subscription_id = self.db.generate_key()
subscription_path = f"devices/{device_id}/subscriptions/{subscription_id}"
subscription_payload = {
"atomic": atomic,
"clientId": client_id,
"id": subscription_id,
"labels": [label],
"metric": metric,
"serverType": "firebase",
}
self.db.child(subscription_path).set(
subscription_payload, self.token)
# caching subscription ids locally for unsubscribe teardown on exit
self.subscription_ids.append(subscription_id)
return subscription_id
def remove_subscription(self, subscription_id):
device_id = self.options["device_id"]
subscription_path = f"devices/{device_id}/subscriptions/{subscription_id}"
self.db.child(subscription_path).remove(self.token)
def remove_all_subscriptions(self):
device_id = self.options["device_id"]
subscriptions_path = f"devices/{device_id}/subscriptions"
data = {}
for subscription_id in self.subscription_ids:
data[subscription_id] = None
self.db.child(subscriptions_path).update(data, self.token)
def stream_metric(self, callback, metric, label, atomic):
subscription_id = self.add_subscription(metric, label, atomic)
if (atomic):
metric_path = f"metrics/{metric}"
else:
metric_path = f"metrics/{metric}/{label}"
def teardown(subscription_id):
self.remove_subscription(subscription_id)
self.subscription_ids.remove(subscription_id)
return self.stream_from_path(callback, metric_path, teardown, subscription_id)
def stream_from_path(self, callback, path_name, teardown=None, subscription_id=None):
device_id = self.options["device_id"]
path = f"devices/{device_id}/{path_name}"
stream_id = subscription_id or self.db.generate_key()
initial_message = {}
def stream_handler(message):
if (message["path"] == "/"):
initial_message[message["stream_id"]] = message
full_payload = message["data"]
else:
child = message["path"][1:]
full_payload = initial_message[message["stream_id"]]["data"]
if (message["data"] == None):
# delete key is value is `None`
full_payload.pop(child, None)
else:
full_payload[child] = message["data"]
callback(full_payload)
stream = self.db.child(path).stream(
stream_handler, self.token, stream_id=stream_id)
def unsubscribe():
if (teardown):
teardown(stream_id)
stream.close()
return unsubscribe
def get_from_path(self, path_name):
device_id = self.options["device_id"]
path = f"devices/{device_id}/{path_name}"
snapshot = self.db.child(path).get(self.token)
return snapshot.val()
def add_marker(self, label):
if (not label):
raise ValueError("A label is required for markers")
return self.add_action({
"command": "marker",
"action": "add",
"message": {
"label": label,
"timestamp": self.get_server_timestamp()
}
})
def brainwaves_raw(self, callback):
return self.stream_metric(callback, "brainwaves", "raw", False)
def brainwaves_raw_unfiltered(self, callback):
return self.stream_metric(callback, "brainwaves", "rawUnfiltered", False)
def brainwaves_psd(self, callback):
return self.stream_metric(callback, "brainwaves", "psd", False)
def brainwaves_power_by_band(self, callback):
return self.stream_metric(callback, "brainwaves", "powerByBand", False)
def signal_quality(self, callback):
return self.stream_metric(callback, "signalQuality", None, True)
def accelerometer(self, callback):
return self.stream_metric(callback, "accelerometer", None, True)
def calm(self, callback):
return self.stream_metric(callback, "awareness", "calm", False)
def focus(self, callback):
return self.stream_metric(callback, "awareness", "focus", False)
def kinesis(self, label, callback):
return self.stream_metric(callback, "kinesis", label, False)
def kinesis_predictions(self, label, callback):
return self.stream_metric(callback, "predictions", label, False)
def status(self, callback):
return self.stream_from_path(callback, "status")
def settings(self, callback):
return self.stream_from_path(callback, "settings")
def status_once(self):
return self.get_from_path("status")
def settings_once(self):
return self.get_from_path("settings")
def get_info(self):
return self.get_from_path("info")
| 34.732719 | 106 | 0.631949 | 852 | 7,537 | 5.388498 | 0.166667 | 0.045306 | 0.045306 | 0.062731 | 0.342845 | 0.262688 | 0.225441 | 0.181006 | 0.049662 | 0.02396 | 0 | 0.000179 | 0.25806 | 7,537 | 216 | 107 | 34.893519 | 0.820815 | 0.015789 | 0 | 0.10625 | 0 | 0 | 0.141759 | 0.042352 | 0 | 0 | 0 | 0.00463 | 0 | 1 | 0.2 | false | 0.0125 | 0.01875 | 0.1 | 0.36875 | 0.00625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4e51a479e1c13cd74a1e9a830b7c2a69de31d5 | 2,624 | py | Python | ChiantiPy/tools/sources.py | kdere/ChiantiPy | 2d17585d64dd1ed5a92edc645d6c85176899c185 | [
"0BSD",
"MIT"
] | 56 | 2016-01-14T15:34:50.000Z | 2022-03-09T10:41:36.000Z | ChiantiPy/tools/sources.py | kdere/ChiantiPy | 2d17585d64dd1ed5a92edc645d6c85176899c185 | [
"0BSD",
"MIT"
] | 163 | 2015-11-12T16:01:22.000Z | 2022-03-23T14:19:59.000Z | ChiantiPy/tools/sources.py | chianti-atomic/ChiantiPy | 0d47cc1c5855ab0290d0c6bd43628722651a77c5 | [
"0BSD",
"MIT"
] | 36 | 2015-11-12T16:03:47.000Z | 2022-02-09T17:53:39.000Z | """
Blackbody temperature calculations
"""
import numpy as np
import ChiantiPy.tools.constants as const
class blackStar:
"""
Calculate blackbody radiation
Parameters
----------
temperature : `~numpy.ndarray`
Temperature in Kelvin
radius : `~numpy.ndarray`
Stellar radius in cm
Attributes
----------
Temperature : `~numpy.ndarray`
Temperature in Kelvin
Radius : `~numpy.ndarray`
Stellar radius in cm
Incident : `~numpy.ndarray`
Blackbody photon distribution
"""
def __init__(self, temperature, radius):
self.Temperature = temperature
self.Radius = radius
def incident(self, distance, energy):
"""
Calculate photon distribution times the visible cross-sectional area.
Parameters
----------
distance : `~numpy.ndarray`
Distance to the stellar object in cm
energy : `~numpy.ndarray`
Energy range in erg
Notes
-----
This function returns the photon distribution instead of the distribution times the cross-sectional area. Is this correct? Why is the incident photon distribution calculated at all?
"""
print((' distance %10.2e energy '%(energy)))
bb = blackbody(self.Temperature, energy)
out = const.pi*(self.Radius/distance)**2*bb['photons']
self.Incident = bb
def blackbody(temperature, variable, hnu=1):
"""
Calculate the blackbody photon distribution as a function of energy (`hnu` = 1) or as a function of wavelength (`hnu` = 0) in units of :math:`\mathrm{photons}\,\mathrm{cm}^{-2}\,\mathrm{s}^{-1}\,\mathrm{str}^{-1}\,\mathrm{erg}^{-1}`
Parameters
----------
temperature : `~numpy.float64`
Temperature at which to calculate the blackbody photon distribution
variable : `~numpy.ndarray`
Either energy (in erg) or wavelength (in angstrom)
hnu : `int`
If 1, calculate distribution as a function of energy. Otherwise, calculate it as a function of wavelength
Returns
-------
{'photons', 'temperature', 'energy'} or {'photons', 'temperature', 'wvl'} : `dict`
"""
if hnu:
energy = variable
bb =(2./(const.planck*(const.hc**2)))*energy**2/(np.exp(energy/(const.boltzmann*temperature)) - 1.)
return {'photons':bb, 'temperature':temperature, 'energy':energy}
else:
wvl = 1.e-8*variable
bb = ((2.*const.pi*const.light)/wvl**4)/(np.exp(const.hc/(wvl*const.boltzmann*temperature)) - 1.)
return {'photons':bb, 'temperature':temperature, 'wvl':wvl}
| 32.8 | 237 | 0.61814 | 299 | 2,624 | 5.411371 | 0.311037 | 0.059333 | 0.027194 | 0.032138 | 0.280593 | 0.211372 | 0.173053 | 0.173053 | 0.173053 | 0.095179 | 0 | 0.01161 | 0.245046 | 2,624 | 79 | 238 | 33.21519 | 0.805149 | 0.556402 | 0 | 0 | 0 | 0 | 0.082707 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.4 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d4e92239a42a1e07fc1974d6169fd9d61b14b1d | 6,854 | py | Python | plugins/module_utils/dnac_path_trace.py | takamitsu-iida/ansible_collections.iida.dnac | 943c9e0b954087d82db934865605bf7eb7608659 | [
"MIT"
] | null | null | null | plugins/module_utils/dnac_path_trace.py | takamitsu-iida/ansible_collections.iida.dnac | 943c9e0b954087d82db934865605bf7eb7608659 | [
"MIT"
] | null | null | null | plugins/module_utils/dnac_path_trace.py | takamitsu-iida/ansible_collections.iida.dnac | 943c9e0b954087d82db934865605bf7eb7608659 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=missing-module-docstring
import json
import logging
import datetime
try:
HAS_TABULATE = True
import tabulate
except ImportError:
HAS_TABULATE = False
try:
from dnac_rest_client import DnacRestClient
except ImportError:
from ansible_collections.iida.dnac.plugins.module_utils.dnac_rest_client import DnacRestClient
logger = logging.getLogger(__name__)
class DnacPathTrace(DnacRestClient):
"""Manage Path Trace
"""
def get_path_trace(self):
"""Retrives all previous Pathtraces summary
version 1.2
/dna/intent/api/v1/flow-analysis
Returns:
list -- List of path trace object
"""
api_path = '/dna/intent/api/v1/flow-analysis'
get_result = self.get(api_path=api_path)
return self.extract_data_response(get_result)
def get_path_trace_by_id(self, path_id):
"""Get path trace by id
version 1.2
/dna/intent/api/v1/flow-analysis/{path_id}
Arguments:
path_id {[type]} -- [description]
Returns:
dict -- Object of the path trace
"""
api_path = '/dna/intent/api/v1/flow-analysis/{}'.format(path_id)
get_result = self.get(api_path=api_path)
return self.extract_data_response(get_result)
def show_path_trace(self, path_trace=None):
"""Print path_trace
Keyword Arguments:
path_trace {dict} -- Object of the path_trace (default: {None})
"""
if path_trace is None:
logger.error("no path_trace found to show path trace")
return
# print(json.dumps(path_trace, indent=2))
# networkElementsInfo is the list of trace
# see data-structure-memo.txt
headers = ['name', 'ip', 'type', 'ingress', 'egress']
table = []
for element in path_trace['networkElementsInfo']:
element_name = element.get('name')
element_ip = element.get('ip')
element_type = element.get('type')
ingress_name = element.get('ingressInterface', {}).get('physicalInterface', {}).get('name') or '-'
egress_name = element.get('egressInterface', {}).get('physicalInterface', {}).get('name') or '-'
table.append([element_name, element_ip, element_type, ingress_name, egress_name])
print(tabulate.tabulate(table, headers, tablefmt='simple'))
def show_path_trace_list(self, path_trace_list):
"""Print path trace list
Arguments:
path_trace_list {list} -- List of path trace object
"""
if not path_trace_list:
logger.error("no path_trace found to show the list of path trace")
return
# sort by createTime
path_trace_list = sorted(path_trace_list, key=lambda path: path.get('createTime'))
headers = ['sourceIP', 'destIP', 'status', 'createTime', 'id', 'inclusions']
table = []
for path in path_trace_list:
source_ip = path.get('sourceIP') or '-'
dest_ip = path.get('destIP') or '-'
status = path.get('status')
create_time = path.get('createTime') # this is int
create_time /= 1000 # from msec to sec
create_time = datetime.datetime.fromtimestamp(create_time)
create_time = create_time.strftime('%Y-%m-%d %H:%M:%S')
inclusions = path.get('inclusions') or []
inclusions = ', '.join(inclusions)
path_id = path.get('id')
table.append([source_ip, dest_ip, status, create_time, path_id, inclusions])
print(tabulate.tabulate(table, headers, tablefmt='simple'))
def create_path_trace(self, src_ip=None, dst_ip=None, src_port=None, dst_port=None):
"""Initiate a new Pathtrace
version 1.2
/dna/intent/api/v1/flow-analysis
Keyword Arguments:
src_ip {str} -- [description] (default: {None})
dst_ip {str} -- [description] (default: {None})
src_port {str} -- [description] (default: {None})
dst_port {str} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
if not all((src_ip, dst_ip)):
logger.error('src_ip and dst_ip are required to create path trace')
return
payload = {
'sourceIP': src_ip,
'destIP': dst_ip,
'periodicRefresh': False,
'inclusions': ['INTERFACE-STATS', 'DEVICE-STATS']
}
if src_port is not None:
payload['sourcePort'] = src_port
if dst_port is not None:
payload['destPort'] = dst_port
api_path = '/dna/intent/api/v1/flow-analysis'
post_result = self.post(api_path=api_path, data=payload)
if post_result.get('failed'):
status_code = post_result.get('status_code')
if status_code == 403:
logging.error('The server recognizes the authentication credentials, but the client is not authorized to perform this request.')
elif status_code == 404:
logging.error('The client made a request for a resource that does not exist.')
elif status_code == 409:
logging.error('The target resource is in a conflicted state. Retrying the request later might succeed.')
elif status_code == 415:
logging.error('The client sent a request body in a format that the server does not support')
return
data = self.extract_data_response(post_result)
task_id = data.get('taskId')
wait_result = self.wait_for_task(task_id)
if wait_result.get('failed'):
logger.error('wait failed')
return
logger.info(wait_result.get('progress'))
def delete_path_trace_by_id(self, path_trace_id=None):
api_path = '/dna/intent/api/v1/flow-analysis/{}'.format(path_trace_id)
delete_result = self.delete_object(api_path)
json.dumps(delete_result, indent=2)
if __name__ == '__main__':
import sys
from dnac_sandbox import sandbox_params
def main():
"""main function for test"""
logging.basicConfig(level=logging.INFO)
params = sandbox_params.get('always-on-lab')
params = sandbox_params.get('hardware-lab')
# DnacRestClient object
drc = DnacPathTrace(params)
# get path_trace list
path_trace_list = drc.get_path_trace()
drc.show_path_trace_list(path_trace_list)
# # for example
# # select first path_trace object
# path_trace_id = path_trace_list[0].get('id')
# path_trace_id = '7916708d-be09-40d9-b73f-0b71eb9575b0'
# path_trace = drc.get_path_trace_by_id(path_trace_id)
# drc.show_path_trace(path_trace)
# create a new path trace
src_ip = '10.10.20.81'
dst_ip = '10.10.20.82'
# drc.create_path_trace(src_ip=src_ip, dst_ip=dst_ip)
# get path_trace list
path_trace_list = drc.get_path_trace()
drc.show_path_trace_list(path_trace_list)
# drc.delete_path_trace_by_id(path_trace_id='b95f7fcd-31d9-4f4a-9d94-1c0a5181de6e')
# drc.delete_path_trace_by_id(path_trace_id='4f9f5ca5-a8d7-49a8-a038-278fd5576049')
# drc.delete_path_trace_by_id(path_trace_id='4dc87a90-bbfb-4922-94dc-64bcb8e06ce6')
return 0
sys.exit(main())
| 30.061404 | 136 | 0.678144 | 943 | 6,854 | 4.700954 | 0.246023 | 0.119783 | 0.049853 | 0.022107 | 0.291902 | 0.22152 | 0.189939 | 0.184525 | 0.13219 | 0.086172 | 0 | 0.023714 | 0.200175 | 6,854 | 227 | 137 | 30.193833 | 0.784933 | 0.25445 | 0 | 0.209091 | 0 | 0 | 0.212447 | 0.027164 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063636 | false | 0 | 0.090909 | 0 | 0.236364 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d54823200e0f754f7348757b18b30672ceb3d3f | 2,460 | py | Python | src/main/python/khaiii/train/tagger.py | cjdans5545/khaiii | 328d5a8af456a5941130383354c07d1cd0e47cf5 | [
"Apache-2.0"
] | 1,235 | 2018-11-30T01:35:13.000Z | 2022-03-31T03:47:48.000Z | src/main/python/khaiii/train/tagger.py | cjdans5545/khaiii | 328d5a8af456a5941130383354c07d1cd0e47cf5 | [
"Apache-2.0"
] | 91 | 2018-11-30T05:19:28.000Z | 2022-03-14T12:38:44.000Z | src/main/python/khaiii/train/tagger.py | cjdans5545/khaiii | 328d5a8af456a5941130383354c07d1cd0e47cf5 | [
"Apache-2.0"
] | 332 | 2018-11-30T00:49:04.000Z | 2022-03-30T01:57:54.000Z | # -*- coding: utf-8 -*-
"""
part-of-speech tagger
__author__ = 'Jamie (jamie.lim@kakaocorp.com)'
__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from argparse import Namespace
import json
import logging
import re
import torch.nn.functional as F
from khaiii.resource.resource import Resource
from khaiii.train.dataset import PosSentTensor
from khaiii.train.models import Model
#########
# types #
#########
class PosTagger:
"""
part-of-speech tagger
"""
def __init__(self, model_dir: str, gpu_num: int = -1):
"""
Args:
model_dir: model dir
gpu_num: GPU number to override
"""
cfg_dict = json.load(open('{}/config.json'.format(model_dir), 'r', encoding='UTF-8'))
self.cfg = Namespace()
for key, val in cfg_dict.items():
setattr(self.cfg, key, val)
setattr(self.cfg, 'gpu_num', gpu_num)
self.rsc = Resource(self.cfg)
self.model = Model(self.cfg, self.rsc)
self.model.load('{}/model.state'.format(model_dir))
self.model.eval()
def tag_raw(self, raw_sent: str, enable_restore: bool = True) -> PosSentTensor:
"""
part-of-speech tagging at raw sentence
Args:
raw_sent: raw input sentence
Returns:
PosSentTensor object
"""
pos_sent = PosSentTensor(raw_sent)
contexts = pos_sent.get_contexts(self.cfg, self.rsc)
left_spc_masks, right_spc_masks = pos_sent.get_spc_masks(self.cfg, self.rsc, False)
outputs, _ = self.model(PosSentTensor.to_tensor(contexts, self.cfg.gpu_num), # pylint: disable=no-member
PosSentTensor.to_tensor(left_spc_masks, self.cfg.gpu_num), # pylint: disable=no-member
PosSentTensor.to_tensor(right_spc_masks, self.cfg.gpu_num)) # pylint: disable=no-member
_, predicts = F.softmax(outputs, dim=1).max(1)
tags = [self.rsc.vocab_out[t.item()] for t in predicts]
pos_sent.set_pos_result(tags, self.rsc.restore_dic if enable_restore else None)
if logging.getLogger().isEnabledFor(logging.DEBUG):
raw_nospc = re.sub(r'\s+', '', raw_sent)
for idx, (tag, pred) in enumerate(zip(tags, predicts)):
logging.debug('[%2d]%s: %5s(%d)', idx, raw_nospc[idx], tag, pred.data[0])
return pos_sent
| 33.69863 | 122 | 0.610976 | 319 | 2,460 | 4.526646 | 0.410658 | 0.048476 | 0.027701 | 0.036011 | 0.110803 | 0.110803 | 0.110803 | 0.110803 | 0.110803 | 0.110803 | 0 | 0.006504 | 0.25 | 2,460 | 72 | 123 | 34.166667 | 0.776152 | 0.187398 | 0 | 0 | 0 | 0 | 0.032698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.235294 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d55134a6274e95c13bfff3b7a7a64f396c1258d | 2,614 | py | Python | hot_coffee_brewers/queries.py | pr222/1dv503-pa2-hot-coffee-brewers | 0f51363a0413391eee5720245625685709e4d21a | [
"MIT"
] | null | null | null | hot_coffee_brewers/queries.py | pr222/1dv503-pa2-hot-coffee-brewers | 0f51363a0413391eee5720245625685709e4d21a | [
"MIT"
] | null | null | null | hot_coffee_brewers/queries.py | pr222/1dv503-pa2-hot-coffee-brewers | 0f51363a0413391eee5720245625685709e4d21a | [
"MIT"
] | null | null | null | '''
List number of reviews for all Coffee Shops.
'''
def query_all_shop_reviews(cursor, cnx):
query = "SELECT reviews.shopID, coffee_shops.name, COUNT(*)FROM reviews JOIN coffee_shops ON reviews.shopID = coffee_shops.id GROUP BY shopID;"
cursor.execute(query)
myresult = cursor.fetchall()
for result in myresult:
print(result)
'''
List all Coffee Shops with their coffee sorts
that has an average rating of 4.5 or above.
'''
def query_most_liked_coffee_all_shops(cursor, cnx):
query2 = "SELECT avgRate.Shop, avgRate.Coffee, avgRate.Rating " \
"FROM (" \
"SELECT " \
"s.name as Shop, " \
"c.name as Coffee, " \
"AVG(r.rating) as Rating " \
"FROM coffee_reviews.reviews r " \
"JOIN coffee_reviews.coffee c " \
"ON r.coffeeID=c.id " \
"JOIN coffee_reviews.coffee_shops s " \
"ON r.shopID=s.id " \
"GROUP BY " \
"s.name, " \
"c.name " \
") avgRate " \
"WHERE avgRate.Rating >= 4.5 " \
"GROUP BY avgRate.Shop, avgRate.Coffee;"
cursor.execute(query2)
myresult = cursor.fetchall()
for result in myresult:
print(result)
'''
List average ratings for all different coffees
for a specific Coffee Shop.
'''
def query_reviews_coffee_specific_shop(input, cursor, cnx):
query3 = "SELECT coffee.name, AVG(reviews.rating) " \
"FROM reviews " \
"JOIN coffee_shops ON reviews.shopID = coffee_shops.id " \
"JOIN coffee ON reviews.coffeeID = coffee.id " \
"WHERE coffee_shops.name = '{}' " \
"GROUP BY coffee.name " \
"ORDER BY AVG(reviews.rating) DESC;".format(input)
cursor.execute(query3)
myresult = cursor.fetchall()
for result in myresult:
print(result)
'''
Shows the one coffee sort with the most ratings for all countries from the view table
'''
def query_most_rated_coffee(cursor, cnx):
query4 = "SELECT name, country, ratings " \
"FROM reviewtimes " \
"WHERE ratings = (SELECT MAX(ratings) FROM reviewtimes) "
cursor.execute(query4)
myresult = cursor.fetchall()
for result in myresult:
print(result)
'''
Shows the one coffee sort with the least ratings for all countries from the view table
'''
def query_least_rated_coffee(cursor, cnx):
query5 = "SELECT name, country, ratings " \
"FROM reviewtimes " \
"WHERE ratings = (SELECT MIN(ratings) FROM reviewtimes) "
cursor.execute(query5)
myresult = cursor.fetchall()
for result in myresult:
print(result) | 31.878049 | 147 | 0.628921 | 327 | 2,614 | 4.941896 | 0.235474 | 0.061262 | 0.068069 | 0.077351 | 0.438119 | 0.394802 | 0.394802 | 0.394802 | 0.394802 | 0.292079 | 0 | 0.006231 | 0.263198 | 2,614 | 82 | 148 | 31.878049 | 0.832814 | 0.016832 | 0 | 0.303571 | 0 | 0.017857 | 0.427189 | 0.032258 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0 | 0 | 0.089286 | 0.089286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d57187b1fe621fa44ec3e0138c833d459861dc4 | 41,128 | py | Python | lib/eapeak/parse.py | InfamousSYN/eapeak | 62f8989b23c723f952cab462b326d59c7f16faec | [
"BSD-3-Clause"
] | null | null | null | lib/eapeak/parse.py | InfamousSYN/eapeak | 62f8989b23c723f952cab462b326d59c7f16faec | [
"BSD-3-Clause"
] | null | null | null | lib/eapeak/parse.py | InfamousSYN/eapeak | 62f8989b23c723f952cab462b326d59c7f16faec | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# lib/eapeak/parse.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import datetime
import os
import sys
import struct
import time
from xml.dom import minidom
from xml.etree import ElementTree
try:
import curses
CURSES_CAPABLE = True
except ImportError:
CURSES_CAPABLE = False
from M2Crypto import X509
from scapy.utils import PcapReader
import scapy.packet # pylint: disable=unused-import
import scapy.layers.all # pylint: disable=unused-import
from scapy.layers.eap import EAP
from eapeak.scapylayers.l2 import eap_types as EAP_TYPES
from eapeak.common import get_bssid, get_source, get_destination, EXPANDED_EAP_VENDOR_IDS, __version__
import eapeak.networks
import eapeak.clients
# Statics
UNKNOWN_SSID_NAME = 'UNKNOWN_SSID'
XML_FILE_NAME = 'eapeak.xml'
SSID_SEARCH_RECURSION = 5
CURSES_LINE_BREAK = (0, '')
CURSES_REFRESH_FREQUENCY = 0.10
CURSES_LOWER_REFRESH_FREQUENCY = 5 # Also used for calls to exportXML
CURSES_MIN_X = 99
CURSES_MIN_Y = 25
TAB_LENGTH = 4
TAB_DEPTH_2 = 2 * TAB_LENGTH
TAB_DEPTH_3 = 3 * TAB_LENGTH
TAB_DEPTH_4 = 4 * TAB_LENGTH
USER_MARKER = '=> '
USER_MARKER_OFFSET = 8
SSID_MAX_LENGTH = 32
EAP_TYPES[0] = 'NONE'
def merge_wireless_networks(source, destination):
"""
Merge information about two wireless networks, used to preserve
information when one is un-orphaned.
"""
for bssid in source.bssids:
destination.add_BSSID(bssid)
for clientobj in source.clients.values():
destination.add_client(clientobj)
for eaptype in source.eapTypes:
destination.addEapType(eaptype)
for cert in source.x509certs:
destination.add_certificate(cert)
return destination
class wpsDataHolder(dict):
"""
This wraps a dictionary and a few key methods to allow types to be
retreived from either their numerical cylon value or thier alphabetical
human value
Keys are not case sensitive because I like it that way.
"""
__h_to_c__ = {
'authentication type flags': 0x1004,
'authenticator': 0x1005,
'configuration error': 0x1009,
'encryption type flags': 0x1010,
'device name': 0x1011,
'encrypted settings': 0x1018,
'enrollee nonce': 0x101a,
'manufacturer': 0x1021,
'message type': 0x1022,
'model name': 0x1023,
'model number': 0x1024,
'os version': 0x102d,
'registrar nonce': 0x1039,
'uuid': 0x1048,
'version': 0x104a,
}
def __getitem__(self, index):
if isinstance(index, str):
if index.lower() in self.__h_to_c__:
index = self.__h_to_c__[index.lower()]
else:
raise KeyError(index)
return dict.__getitem__(self, index)
def __setitem__(self, name, value):
if isinstance(name, str):
if name.lower() in self.__h_to_c__:
name = self.__h_to_c__[name.lower()]
else:
raise KeyError(name)
return dict.__setitem__(self, name, value)
def get(self, item):
if isinstance(item, str):
if item.lower() in self.__h_to_c__:
item = self.__h_to_c__[item.lower()]
else:
return None
return dict.get(self, item)
def has_key(self, item):
if isinstance(item, str):
if item.lower() in self.__h_to_c__:
item = self.__h_to_c__[item.lower()]
else:
return False
return dict.has_key(self, item)
def keys(self):
keys = dict.keys(self)
new_keys = []
for key, value in self.__h_to_c__.items():
if value in keys:
new_keys.append(key)
keys.extend(new_keys)
return keys
def parse_wps_data(wpsdata, trimStrings=True):
"""
Take raw WPS data string and return a dictionary of types and values
"""
data = wpsDataHolder()
while wpsdata:
if len(wpsdata) < 4:
raise Exception('invalid/corrupted WPS data')
_type = struct.unpack('>H', wpsdata[:2])[0]
length = struct.unpack('>H', wpsdata[2:4])[0]
if len(wpsdata) < (length + 4):
raise Exception('invalid/corrupted WPS data')
value = wpsdata[4:(4 + length)]
wpsdata = wpsdata[(4 + length):]
if trimStrings and _type in [0x1011, 0x1021, 0x1023, 0x1024]:
value = value.replace('\x00', '')
if not len(value):
continue
data[_type] = value
return data
def parse_rsn_data(rsndata):
"""
Take raw RSN data and return a dictionary representing it's values
Tag Number and Tag length are removed
"""
rsn = {}
rsn['version'] = struct.unpack('<H', rsndata[:2])[0]
rsn['grp_cipher'] = rsndata[2:6]
pair_ciphers = []
nbr_pair_cipher = struct.unpack('<H', rsndata[6:8])[0]
rsndata = rsndata[8:]
while nbr_pair_cipher and len(rsndata):
pair_ciphers.append(rsndata[:4])
rsndata = rsndata[4:]
nbr_pair_cipher -= 1
rsn['pair_ciphers'] = pair_ciphers
auth_key_mgmt = []
nbr_auth_key_mgmt = struct.unpack('<H', rsndata[:2])[0]
rsndata = rsndata[2:]
while nbr_auth_key_mgmt and len(rsndata):
auth_key_mgmt.append(rsndata[:4])
rsndata = rsndata[4:]
nbr_auth_key_mgmt -= 1
rsn['auth_key_mgmts'] = auth_key_mgmt
rsn['capabilities'] = rsndata
return rsn
def build_rsn_data(rsn):
version = rsn.get('version') or 1
rsndata = struct.pack('<H', version)
rsndata += rsn['grp_cipher']
rsndata += struct.pack('<H', 1)
rsndata += rsn['pair_ciphers'][0]
rsndata += struct.pack('<H', 1)
rsndata += rsn['auth_key_mgmts'][0]
rsndata += rsn.get('capabilities') or '\x00\x00'
return rsndata
class EapeakParsingEngine:
"""
This is the main parsing engine that manages all of the networks.
Notable attributes:
KnownNetworks: holds wireless network objects, indexed by SSID if available, BSSID if orphaned
BSSIDToSSIDMap: holds SSIDs, indexed by BSSIDS, so you can obtain network objects by BSSID
OrphanedBSSIDs: holds BSSIDs that are not associated with a known SSID
fragment_buffer: holds buffers (lists), indexed by connection strings (src_mac + ' ' + dst_mac)
"""
def __init__(self, targetSSIDs=None, targetBSSIDs=None):
self.KnownNetworks = {} # Holds wireless network objects, indexed by SSID if available, BSSID if orphaned
self.BSSIDToSSIDMap = {} # Holds SSIDs, indexed by BSSIDS, so you can obtain network objects by BSSID
self.OrphanedBSSIDs = [] # holds BSSIDs that are not associated with a known SSID
self.packets = []
self.targetSSIDs = targetSSIDs
self.targetBSSIDs = targetBSSIDs
self.packetCounter = 0
self.fragment_buffer = {} # Holds buffers (lists), indexed by connection strings (src_mac + ' ' + dst_mac)
def parse_live_capture(self, packet, quite=True):
"""
Function is meant to be passed to Scapy's sniff() function similar to:
lambda packet: eapeakParser.parseLiveCapture(packet, use_curses)
sniff(iface = 'mon0', prn = lambda packet: eapeakParser.parseLiveCapture(packet, False) )
"""
self.parse_wireless_packet(packet)
if quite:
return
sys.stdout.write('Packets: ' + str(self.packetCounter) + ' Wireless Networks: ' + str(len(self.KnownNetworks)) + '\r')
sys.stdout.flush()
def parse_pcap_files(self, pcapFiles, quite=True):
"""
Take one more more (list, or tuple) of pcap files and parse them
into the engine.
"""
if not hasattr(pcapFiles, '__iter__'):
if isinstance(pcapFiles, str):
pcapFiles = [pcapFiles]
else:
return
for i in range(0, len(pcapFiles)):
pcap = pcapFiles[i]
pcapName = os.path.split(pcap)[1]
if not quite:
sys.stdout.write("Reading PCap File: {0}\r".format(pcapName))
sys.stdout.flush()
if not os.path.isfile(pcap):
if not quite:
sys.stdout.write("Skipping File {0}: File Not Found\n".format(pcap))
sys.stdout.flush()
continue
elif not os.access(pcap, os.R_OK):
if not quite:
sys.stdout.write("Skipping File {0}: Permissions Issue\n".format(pcap))
sys.stdout.flush()
continue
pcapr = PcapReader(pcap) # pylint: disable=no-value-for-parameter
packet = pcapr.read_packet()
i = 1
try:
while packet:
if not quite:
sys.stdout.write('Parsing File: ' + pcap + ' Packets Done: ' + str(i) + '\r')
sys.stdout.flush()
self.parse_wireless_packet(packet)
packet = pcapr.read_packet()
i += 1
i -= 1
if not quite:
sys.stdout.write((' ' * len('Parsing File: ' + pcap + ' Packets Done: ' + str(i))) + '\r')
sys.stdout.write('Done With File: ' + pcap + ' Read ' + str(i) + ' Packets\n')
sys.stdout.flush()
except KeyboardInterrupt:
if not quite:
sys.stdout.write("Skipping File {0} Due To Ctl+C\n".format(pcap))
sys.stdout.flush()
except: # pylint: disable=bare-except
if not quite:
sys.stdout.write("Skipping File {0} Due To Scapy Exception\n".format(pcap))
sys.stdout.flush()
self.fragment_buffer = {}
pcapr.close()
def parse_xml_files(self, xmlFiles, quite=True):
"""
Load EAPeak/Kismet style XML files for information. This is
faster than parsing large PCap files.
"""
if not hasattr(xmlFiles, '__iter__'):
if isinstance(xmlFiles, str):
xmlFiles = [xmlFiles]
else:
return
for xmlfile in xmlFiles:
if not os.path.isfile(xmlfile):
if not quite:
sys.stdout.write("Skipping File {0}: File Not Found\n".format(xmlfile))
sys.stdout.flush()
continue
elif not os.access(xmlfile, os.R_OK):
if not quite:
sys.stdout.write("Skipping File {0}: Permissions Issue\n".format(xmlfile))
sys.stdout.flush()
continue
sys.stdout.write("Parsing XML File: {0}".format(xmlfile))
sys.stdout.flush()
e = ElementTree.parse(xmlfile)
for network in e.findall('wireless-network'):
ssid = network.find('SSID')
if not ElementTree.iselement(ssid) or not ElementTree.iselement(ssid.find('type')):
continue
elif ssid.find('type').text.strip() != 'Beacon':
continue
ssid = ssid.find('essid')
if ElementTree.iselement(ssid):
if ssid.text is None:
ssid = UNKNOWN_SSID_NAME
else:
ssid = ssid.text.strip()
newNetwork = eapeak.networks.WirelessNetwork(ssid)
else:
continue
self.get_network_info(network, newNetwork, ElementTree, ssid)
for client in network.findall('wireless-client'):
bssid = client.find('client-bssid')
if ElementTree.iselement(bssid):
bssid = bssid.text.strip()
else:
continue
client_mac = client.find('client-mac').text.strip()
newClient = eapeak.clients.WirelessClient(bssid, client_mac)
self.get_client_info(client, newClient, ElementTree)
newNetwork.add_client(newClient)
self.find_certs(network, newNetwork)
if ssid != UNKNOWN_SSID_NAME:
self.KnownNetworks[ssid] = newNetwork
else:
self.KnownNetworks[bssid] = newNetwork
# if ssid == UNKNOWN_SSID_NAME and len(network.findall('BSSID')) > 1:
# there will be an issue with where to store the single network object.
# If there is a client and the network is added to KnownNetworks each time this occurs then the client will appear to under each network but only
# be associated with the single BSSID. This problem needs to be addressed and throughly tested.
sys.stdout.write(" Done\n")
sys.stdout.flush()
def get_network_info(self, network, newNetwork, _ElementTree, ssid):
for bssid in network.findall('BSSID'):
bssid = bssid.text.strip()
newNetwork.add_BSSID(bssid)
if ssid != UNKNOWN_SSID_NAME:
self.BSSIDToSSIDMap[bssid] = ssid
else:
self.BSSIDToSSIDMap[bssid] = bssid
self.OrphanedBSSIDs.append(bssid)
eaptypes = network.find('SSID').find('eap-types')
if ElementTree.iselement(eaptypes):
for eaptype in eaptypes.text.strip().split(','):
if eaptype.isdigit():
newNetwork.addEapType(int(eaptype))
expandedVendorIDs = network.find('SSID').find('expanded-vendor-ids')
if ElementTree.iselement(expandedVendorIDs):
for vendorid in expandedVendorIDs.text.strip().split(','):
if vendorid.isdigit():
newNetwork.add_expanded_vendor_id(int(vendorid))
wpsXMLData = network.find('wps-data')
if ElementTree.iselement(wpsXMLData):
wpsData = wpsDataHolder()
for elem in wpsXMLData:
key = elem.tag.replace('-', ' ')
value = elem.text.strip()
encoding = elem.get('encoding')
if encoding == 'hex':
wpsData[key] = binascii.a2b_hex(value)
elif encoding == 'base64':
wpsData[key] = base64.standard_b64decode(value)
else:
wpsData[key] = value
if len(wpsData):
newNetwork.wpsData = wpsData
def get_client_info(self, client, newClient, _ElementTree):
eaptypes = client.find('eap-types')
if ElementTree.iselement(eaptypes):
eaptypes = eaptypes.text
if eaptypes != None:
for eaptype in eaptypes.strip().split(','):
if eaptype.isdigit():
newClient.addEapType(int(eaptype))
identities = client.findall('identity') or []
for identity in identities:
tmp = identity.get('eap-type')
if tmp.isdigit():
newClient.add_identity(int(tmp), identity.text.strip())
mschaps = client.findall('mschap') or []
for mschap in mschaps:
newClient.add_ms_chap_info(
int(mschap.get('eap-type')),
binascii.a2b_hex(mschap.find('challenge').text.strip().replace(':', '')),
binascii.a2b_hex(mschap.find('response').text.strip().replace(':', '')),
mschap.get('identity')
)
wpsXMLData = client.find('wps-data')
if ElementTree.iselement(wpsXMLData):
wpsData = wpsDataHolder()
for elem in wpsXMLData:
key = elem.tag.replace('-', ' ')
value = elem.text.strip()
if elem.get('encoding') == 'hex':
wpsData[key] = binascii.a2b_hex(value)
elif elem.get('encoding') == 'base64':
wpsData[key] = base64.standard_b64decode(value)
else:
wpsData[key] = value
if len(wpsData):
newClient.wpsData = wpsData
def find_certs(self, network, newNetwork):
for cert in network.findall('certificate'):
if cert.get('encoding') == 'DER':
newNetwork.add_certificate(X509.load_cert_string(base64.standard_b64decode(cert.text.strip()), X509.FORMAT_DER))
elif cert.get('encoding') == 'PEM':
newNetwork.add_certificate(X509.load_cert_string(base64.standard_b64decode(cert.text.strip()), X509.FORMAT_PEM))
def export_xml(self, filename=XML_FILE_NAME):
"""
Exports an XML file that can be reimported with the parseXMLFiles
function.
"""
eapeakXML = ElementTree.Element('detection-run')
eapeakXML.set('eapeak-version', __version__)
eapeakXML.append(ElementTree.Comment(' Summary: Found ' + str(len(self.KnownNetworks)) + ' Network(s) '))
eapeakXML.append(ElementTree.Comment(datetime.datetime.now().strftime(' Created %A %m/%d/%Y %H:%M:%S ')))
networks = self.KnownNetworks.keys()
if not networks:
return
networks.sort()
for network in networks:
eapeakXML.append(self.KnownNetworks[network].get_xml())
xmldata = minidom.parseString(ElementTree.tostring(eapeakXML)).toprettyxml()
if xmldata:
tmpfile = open(filename, 'w')
tmpfile.write(xmldata)
tmpfile.close()
def update_maps(self, packet):
tmp = packet
for x in range(0, SSID_SEARCH_RECURSION): # pylint: disable=unused-variable
if 'ID' in tmp.fields and tmp.fields['ID'] == 0 and 'info' in tmp.fields: # Verifies that we found an SSID
if tmp.fields['info'] == '\x00':
break
bssid = get_bssid(packet)
if (self.targetSSIDs and tmp.fields['info'] not in self.targetSSIDs) or (self.targetBSSIDs and bssid not in self.targetBSSIDs): # Obi says: These are not the SSIDs you are looking for...
break
if not bssid:
return
ssid = ''.join([c for c in tmp.fields['info'] if (ord(c) > 31 or ord(c) == 9) and ord(c) < 128])
if self.targetBSSIDs:
if not self.targetSSIDs:
self.targetSSIDs = []
if ssid not in self.targetSSIDs:
self.targetSSIDs.append(ssid)
if not ssid:
return
if bssid in self.OrphanedBSSIDs: # If this info is relating to a BSSID that was previously considered to be orphaned
newNetwork = self.KnownNetworks[bssid] # Retrieve the old one
del self.KnownNetworks[bssid] # Delete the old network's orphaned reference
self.OrphanedBSSIDs.remove(bssid)
self.BSSIDToSSIDMap[bssid] = ssid # Changes the map from BSSID -> BSSID (for orphans) to BSSID -> SSID
newNetwork.update_SSID(ssid)
if ssid in self.KnownNetworks:
newNetwork = merge_wireless_networks(newNetwork, self.KnownNetworks[ssid])
elif bssid in self.BSSIDToSSIDMap:
continue
elif ssid in self.KnownNetworks: # If this is a BSSID from a probe for an SSID we've seen before
newNetwork = self.KnownNetworks[ssid] # Pick up where we left off by using the curent state of the WirelessNetwork object
elif bssid:
newNetwork = eapeak.networks.WirelessNetwork(ssid)
self.BSSIDToSSIDMap[bssid] = ssid
newNetwork.add_BSSID(bssid)
self.KnownNetworks[ssid] = newNetwork
del bssid, ssid
break
tmp = tmp.payload
if tmp is None:
break
def parse_wireless_packet(self, packet):
"""
This is the core packet parsing routine. It takes a Scapy style
packet object as an argument.
"""
if packet.name == 'RadioTap dummy':
packet = packet.payload # Offset it so we start with the Dot11 header
shouldStop = False
self.packetCounter += 1
# this section finds SSIDs in Bacons
if packet.haslayer('Dot11Beacon') or packet.haslayer('Dot11ProbeResp') or packet.haslayer('Dot11AssoReq'):
self.update_maps(packet)
shouldStop = True
if shouldStop:
return
# This section extracts useful EAP info
cert_layer = None
if 'EAP' in packet:
fields = packet.getlayer('EAP').fields
if fields['code'] not in [1, 2]:
return
eaptype = fields['type']
for x in range(1, 4):
addr = 'addr' + str(x)
if not addr in packet.fields:
return
bssid = get_bssid(packet)
if not bssid:
return
if bssid and not bssid in self.BSSIDToSSIDMap:
self.BSSIDToSSIDMap[bssid] = bssid
self.OrphanedBSSIDs.append(bssid)
self.KnownNetworks[bssid] = eapeak.networks.WirelessNetwork(UNKNOWN_SSID_NAME)
self.KnownNetworks[bssid].add_BSSID(bssid)
network = self.KnownNetworks[self.BSSIDToSSIDMap[bssid]]
client_mac = get_source(packet)
from_AP = False
if client_mac == bssid:
client_mac = get_destination(packet)
from_AP = True
if not bssid or not client_mac:
return
if network.has_client(client_mac):
client = network.get_client(client_mac)
else:
client = eapeak.clients.WirelessClient(bssid, client_mac)
if from_AP:
network.addEapType(eaptype)
elif eaptype > 4:
client.addEapType(eaptype)
elif eaptype == 3 and fields['code'] == 2: # Parses NAKs and attempts to harvest the desired EAP types, RFC 3748
self.get_client_eap_types(fields, client)
if eaptype == 254 and packet.haslayer('EAP_Expanded'):
network.add_expanded_vendor_id(packet.getlayer('EAP_Expanded').vendor_id)
if from_AP:
if packet.haslayer('LEAP'):
self.get_leap_from_ap_data(packet, client)
elif packet.getlayer(EAP).payload.name in ['EAP_TLS', 'EAP_TTLS', 'PEAP', 'EAP_Fast']:
cert_layer = self.get_eap_data(packet, bssid, client_mac)
elif packet.haslayer('EAP_Expanded') and packet.getlayer('EAP_Expanded').vendor_type == 1 and packet.haslayer('WPS') and packet.getlayer('WPS').opcode == 4:
try:
self.get_wps_data(packet, network)
except: # pylint: disable=bare-except
pass
else:
if eaptype == 1 and 'identity' in fields:
client.add_identity(1, fields['identity'])
if packet.haslayer('LEAP'):
self.get_leap_data(packet, client)
elif packet.haslayer('EAP_Expanded') and packet.getlayer('EAP_Expanded').vendor_type == 1 and packet.haslayer('WPS') and packet.getlayer('WPS').opcode == 4:
try:
self.get_client_wps_data(packet, client)
except: # pylint: disable=bare-except
pass # Data is corrupted
network.add_client(client)
if not cert_layer:
shouldStop = True
if shouldStop:
return
if cert_layer and 'certificate' in cert_layer.fields:
self.get_cert_data(network, cert_layer)
return
def get_cert_data(self, network, cert_layer):
cert_data = cert_layer.certificate[3:]
tmp_certs = []
while cert_data:
if len(cert_data) < 4:
break # Length and 1 byte are at least 4 bytes
tmp_length = struct.unpack('!I', '\x00' + cert_data[:3])[0]
cert_data = cert_data[3:]
if len(cert_data) < tmp_length:
break # I smell corruption
tmp_certs.append(cert_data[:tmp_length])
cert_data = cert_data[tmp_length:]
for certificate in tmp_certs:
try:
certificate = X509.load_cert_string(certificate, X509.FORMAT_DER)
except X509.X509Error:
pass
network.add_certificate(certificate)
def get_client_eap_types(self, fields, client):
if 'eap_types' in fields:
for eap in fields['eap_types']:
client.addEapType(eap)
del eap # pylint: disable=undefined-loop-variable
def get_client_wps_data(self, packet, client):
wpsData = parse_wps_data(packet.getlayer('WPS').data)
if client.wpsData is None:
client.wpsData = wpsData
else:
client.wpsData.update(wpsData)
def get_wps_data(self, packet, network):
wpsData = parse_wps_data(packet.getlayer('WPS').data)
if network.wpsData is None:
network.wpsData = wpsData
else:
network.wpsData.update(wpsData)
def get_eap_data(self, packet, bssid, client_mac):
cert_layer = None
eap_layer = packet.getlayer(EAP).payload
conn_string = bssid + ' ' + client_mac
frag_flag, len_flag = {'EAP_TLS':(64, 128), 'EAP_TTLS':(8, 16), 'PEAP':(16, 32), 'EAP_Fast':(8, 16)}[eap_layer.name]
if eap_layer.flags & frag_flag and eap_layer.flags & len_flag:
self.fragment_buffer[conn_string] = [eap_layer]
elif eap_layer.flags & frag_flag:
if conn_string in self.fragment_buffer:
self.fragment_buffer[conn_string].append(eap_layer.payload)
elif eap_layer.flags == 0 and conn_string in self.fragment_buffer:
eap_layer = eap_layer.__class__(''.join([x.do_build() for x in self.fragment_buffer[conn_string]]) + eap_layer.payload.do_build()) # Take that people trying to read my code! Spencer 1, you 0.
del self.fragment_buffer[conn_string]
if eap_layer.haslayer('TLSv1Certificate'): # At this point, if possible, we should have a fully assembled packet
cert_layer = eap_layer.getlayer('TLSv1Certificate')
del eap_layer, conn_string, frag_flag, len_flag
return cert_layer
def get_leap_data(self, packet, client):
leap_fields = packet.getlayer('LEAP').fields
identity = None
if 'name' in leap_fields:
identity = leap_fields['name']
client.add_identity(17, identity)
if 'data' in leap_fields and len(leap_fields['data']) == 24:
client.add_ms_chap_info(17, response=leap_fields['data'], identity=identity)
del leap_fields, identity
def get_leap_from_ap_data(self, packet, client):
leap_fields = packet.getlayer('LEAP').fields
if 'data' in leap_fields and len(leap_fields['data']) == 8:
client.add_ms_chap_info(17, challenge=leap_fields['data'], identity=leap_fields['name'])
del leap_fields
class CursesEapeakParsingEngine(EapeakParsingEngine):
"""
This engine contains additional methods necessary for the Curses UI.
It is seperate from the other class to not degrade performance when
Curses is not being used.
"""
def init_curses(self):
"""
This initializes the screen for curses useage. It must be
called before Curses can be used.
"""
self.user_marker_pos = 1 # Used with curses
self.curses_row_offset = 0 # Used for marking the visible rows on the screen to allow scrolling
self.curses_row_offset_store = 0 # Used for storing the row offset when switching from detailed to non-detailed view modes
self.curses_detailed = None # Used with curses
self.screen = curses.initscr()
curses.start_color()
curses.init_pair(1, curses.COLOR_BLUE, curses.COLOR_WHITE)
size = self.screen.getmaxyx()
if size[0] < CURSES_MIN_Y or size[1] < CURSES_MIN_X:
curses.endwin()
return 1
self.curses_max_rows = size[0] - 2 # Minus 2 for the border on the top and bottom
self.curses_max_columns = size[1] - 2
self.screen.border(0)
self.screen.addstr(2, TAB_LENGTH, 'EAPeak Capturing Live')
self.screen.addstr(3, TAB_LENGTH, 'Found 0 Networks')
self.screen.addstr(4, TAB_LENGTH, 'Processed 0 Packets')
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, USER_MARKER)
self.screen.refresh()
try:
curses.curs_set(1)
curses.curs_set(0)
except curses.error: # Ignore exceptions from terminals that don't support setting the cursor's visibility
pass
curses.noecho()
curses.cbreak()
self.curses_enabled = True
self.curses_lower_refresh_counter = 1
return 0
def curses_interaction_handler(self, garbage=None):
"""
This is a function meant to be run in a seperate thread to
handle human interaction with the curses interface.
"""
while self.curses_enabled:
c = self.screen.getch()
if self.curses_lower_refresh_counter == 0:
continue
size = self.screen.getmaxyx()
if size[0] < CURSES_MIN_Y or size[1] < CURSES_MIN_X:
if not self.resize_dialog():
break
continue
if c in [65, 117, 85] and len(self.KnownNetworks): # 117 = ord('u')
if self.curses_detailed:
if self.curses_row_offset > 0:
self.curses_row_offset -= 1
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY # Trigger a redraw by adjusting the counter
else:
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, ' ' * len(USER_MARKER))
if self.user_marker_pos == 1 and self.curses_row_offset == 0:
pass # Ceiling
elif self.user_marker_pos == 1 and self.curses_row_offset:
self.curses_row_offset -= 1
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY
else:
self.user_marker_pos -= 1
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, USER_MARKER)
elif c in [66, 100, 68] and len(self.KnownNetworks): # 100 = ord('d')
if self.curses_detailed:
self.curses_row_offset += 1
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY # Trigger a redraw by adjusting the counter
else:
if self.user_marker_pos + self.curses_row_offset == len(self.KnownNetworks):
continue # Floor
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, ' ' * len(USER_MARKER))
if self.user_marker_pos + USER_MARKER_OFFSET == self.curses_max_rows - 1:
self.curses_row_offset += 1
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY
else:
self.user_marker_pos += 1
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, USER_MARKER)
elif c in [10, 105, 73]: # 105 = ord('i')
self.curses_row_offset_store = (self.curses_row_offset_store ^ self.curses_row_offset)
self.curses_row_offset = (self.curses_row_offset ^ self.curses_row_offset_store)
self.curses_row_offset_store = (self.curses_row_offset_store ^ self.curses_row_offset)
if self.curses_detailed:
self.curses_detailed = None
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, USER_MARKER)
self.screen.refresh()
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY # Trigger a redraw by adjusting the counter
elif 0 <= (self.user_marker_pos - 1 + self.curses_row_offset) < len(self.KnownNetworks):
self.curses_detailed = self.KnownNetworks.keys()[(self.user_marker_pos - 1) + self.curses_row_offset_store]
self.screen.refresh()
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY # Trigger a redraw by adjusting the counter
elif c in [113, 81]: # 113 = ord('q')
self.curses_lower_refresh_counter = 0
subwindow = self.screen.subwin(6, 40, (self.curses_max_rows / 2 - 3), (self.curses_max_columns / 2 - 20))
subwindow.erase()
subwindow.addstr(2, 11, 'Really Quit? (y/N)')
subwindow.border(0)
subwindow.refresh()
subwindow.overlay(self.screen)
c = subwindow.getch()
if c in [121, 89]:
break
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY
elif c in [104, 72]: # 113 = ord('h')
self.curses_lower_refresh_counter = 0
subwindow = self.screen.subwin(10, 40, (self.curses_max_rows / 2 - 5), (self.curses_max_columns / 2 - 20))
subwindow.erase()
subwindow.addstr(1, 15, 'Help Menu')
subwindow.addstr(2, 9, 'EAPeak Version: ' + __version__)
subwindow.addstr(4, 2, 'i/Enter : Toggle View')
subwindow.addstr(5, 2, 'q : Quit')
subwindow.addstr(6, 2, 'e : Export Users For The')
subwindow.addstr(7, 2, ' Selected Network')
subwindow.border(0)
subwindow.refresh()
subwindow.overlay(self.screen)
c = subwindow.getch()
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY
elif c in [101, 69]: # 101 = ord('e')
usernames = []
if self.curses_detailed in self.KnownNetworks:
network = self.KnownNetworks[self.curses_detailed]
else:
network = self.KnownNetworks.values()[self.user_marker_pos - 1 + self.curses_row_offset]
filename = network.ssid + '_users.txt'
if network.clients:
for client in network.clients.values():
usernames.extend(client.identities.keys())
try:
filehandle = open(filename, 'w')
filehandle.write("\n".join(usernames) + '\n')
filehandle.close()
message = 'Successfully Saved'
except: # pylint: disable=bare-except
message = 'Failed To Save'
else:
message = 'No ID Strings'
self.curses_lower_refresh_counter = 0
subwindow = self.screen.subwin(10, 40, (self.curses_max_rows / 2 - 5), (self.curses_max_columns / 2 - 20))
subwindow.erase()
subwindow.addstr(2, 2, 'File: ' + filename)
subwindow.addstr(3, 2, message)
subwindow.addstr(6, 8, 'Press Any Key To Continue')
subwindow.border(0)
subwindow.refresh()
subwindow.overlay(self.screen)
c = subwindow.getch()
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY
self.cleanup_curses()
return
def curses_screen_draw_handler(self, save_to_xml):
"""
This is a function meant to be run in a seperate thread to
handle drawing the curses interface to the screen.
"""
while self.curses_enabled:
time.sleep(CURSES_REFRESH_FREQUENCY)
if self.curses_lower_refresh_counter == 0: # used to trigger pauses
continue
size = self.screen.getmaxyx()
if size[0] < CURSES_MIN_Y or size[1] < CURSES_MIN_X:
if not self.resize_dialog():
break
continue
self.screen.refresh()
self.screen.addstr(2, 4, 'EAPeak Capturing Live') # This is all static, so don't use the messages queue
self.screen.addnstr(3, 4, 'Found ' + str(len(self.KnownNetworks)) + ' Networks', 25)
self.screen.addnstr(4, 4, "Processed {0} Packets".format(self.packetCounter), 30)
self.screen.addstr(6, 4, 'Network Information:')
if self.curses_lower_refresh_counter == CURSES_LOWER_REFRESH_FREQUENCY:
self.curses_lower_refresh_counter = 1
self.screen.move(7, 0)
self.screen.clrtobot()
if save_to_xml:
self.export_xml()
else:
self.curses_lower_refresh_counter += 1
continue
messages = []
ssids = self.KnownNetworks.keys()
if self.curses_detailed and self.curses_detailed in self.KnownNetworks:
network = self.KnownNetworks[self.curses_detailed]
messages.append((TAB_LENGTH, 'SSID: ' + network.ssid))
messages.append(CURSES_LINE_BREAK)
messages.append((TAB_LENGTH, 'BSSIDs:'))
for bssid in network.bssids:
messages.append((TAB_DEPTH_2, bssid))
messages.append(CURSES_LINE_BREAK)
self.get_network_info(messages, network)
messages.append(CURSES_LINE_BREAK)
self.get_network_data(messages, network)
if network.x509certs:
messages.append(CURSES_LINE_BREAK)
messages.append((TAB_LENGTH, 'Certificates:'))
i = 1
self.get_certs(messages, network, i)
messages.pop() # trash the trailing line break
# message queue is built, now adjust it to be printed to the screen
self.set_max_offset(len(messages) - (self.curses_max_rows - 7))
for i in range(0, self.curses_row_offset):
messages.pop(0)
self.screen.border(0)
else:
messages.append((TAB_DEPTH_2, 'SSID:' + ' ' * (SSID_MAX_LENGTH + 2) + 'EAP Types:'))
if self.curses_row_offset:
messages.append((TAB_DEPTH_2, '[ MORE ]'))
else:
messages.append((TAB_DEPTH_2, ' '))
for i in range(self.curses_row_offset, len(ssids)):
if len(messages) > self.curses_max_rows - 8:
messages.append((TAB_DEPTH_2, '[ MORE ]'))
break
network = self.KnownNetworks[ssids[i]]
self.get_network_eap(network, messages, i)
if not len(messages) > self.curses_max_rows - 2:
messages.append((TAB_DEPTH_2, ' '))
self.screen.border(0)
self.screen.addstr(self.user_marker_pos + USER_MARKER_OFFSET, TAB_LENGTH, USER_MARKER)
line = 7
try:
for message in messages:
self.screen.addnstr(line, message[0], message[1], self.curses_max_columns - message[0])
line += 1
if line > self.curses_max_rows:
break # Fail safe
except curses.error:
pass
self.cleanup_curses()
return
def get_network_data(self, messages, network):
if network.wpsData:
the_cheese_stands_alone = True
for piece in ['Manufacturer', 'Model Name', 'Model Number', 'Device Name']:
if network.wpsData.has_key(piece):
if the_cheese_stands_alone:
messages.append((TAB_LENGTH, 'WPS Information:'))
the_cheese_stands_alone = False
messages.append((TAB_DEPTH_2, piece + ': ' + network.wpsData[piece]))
if not the_cheese_stands_alone:
messages.append(CURSES_LINE_BREAK)
del the_cheese_stands_alone, piece # pylint: disable=undefined-loop-variable
if network.clients:
messages.append((TAB_LENGTH, 'Clients: '))
clients = network.clients.values()
for i in range(0, len(clients)):
client = clients[i]
messages.append((TAB_DEPTH_2, 'Client ' + str(i + 1) + ') MAC: ' + client.mac))
if client.eapTypes:
self.get_client_eap(client, messages)
else:
messages.append((TAB_DEPTH_2, 'EAP Types: [ UNKNOWN ]'))
if client.identities:
messages.append((TAB_DEPTH_2, 'Identities:'))
for ident, eap in client.identities.items():
messages.append((TAB_DEPTH_3, '(' + EAP_TYPES[eap] + ') ' + ident))
if client.mschap:
first = True
for value in client.mschap:
if 'r' not in value:
continue
if first:
messages.append((TAB_DEPTH_2, 'MSChap:'))
first = False
messages.append((TAB_DEPTH_3, 'EAP Type: ' + EAP_TYPES[value['t']] + ', Identity: ' + value['i']))
messages.append((TAB_DEPTH_3, 'C: ' + value['c']))
messages.append((TAB_DEPTH_3, 'R: ' + value['r']))
del first
if client.wpsData:
the_cheese_stands_alone = True
for piece in ['Manufacturer', 'Model Name', 'Model Number', 'Device Name']:
if client.wpsData.has_key(piece):
if the_cheese_stands_alone:
messages.append((TAB_DEPTH_2, 'WPS Information:'))
the_cheese_stands_alone = False
messages.append((TAB_DEPTH_3, piece + ': ' + client.wpsData[piece]))
del the_cheese_stands_alone, piece # pylint: disable=undefined-loop-variable
messages.append(CURSES_LINE_BREAK)
messages.pop() # trash the trailing line break
del clients # pylint: disable=undefined-loop-variable
else:
messages.append((TAB_LENGTH, 'Clients: [ NONE ]'))
def get_network_info(self, messages, network):
tmpEapTypes = []
if network.eapTypes:
for eType in network.eapTypes:
if eType in EAP_TYPES:
tmpEapTypes.append(EAP_TYPES[eType])
else:
tmpEapTypes.append(str(eType))
if tmpEapTypes:
messages.append((TAB_LENGTH, 'EAP Types: ' + ", ".join(tmpEapTypes)))
else:
messages.append((TAB_LENGTH, 'EAP Types: [ NONE ]'))
tmpVendorIDs = []
if network.expandedVendorIDs:
for vType in network.expandedVendorIDs:
if vType in EXPANDED_EAP_VENDOR_IDS:
tmpVendorIDs.append(EXPANDED_EAP_VENDOR_IDS[vType])
else:
tmpVendorIDs.append(str(vType))
if tmpVendorIDs:
messages.append((TAB_LENGTH, 'Expanded Vendor IDs: ' + ", ".join(tmpVendorIDs)))
del tmpEapTypes, tmpVendorIDs
def set_max_offset(self, max_offset):
if max_offset < 0:
max_offset = 0
if self.curses_row_offset > max_offset:
self.curses_row_offset = max_offset
def get_network_eap(self, network, messages, i):
tmpEapTypes = []
if network.eapTypes:
for eType in network.eapTypes:
if eType in EAP_TYPES:
tmpEapTypes.append(EAP_TYPES[eType])
else:
tmpEapTypes.append(str(eType))
if i < 9:
messages.append((TAB_DEPTH_2, str(i + 1) + ') ' + network.ssid + ' ' * (SSID_MAX_LENGTH - len(network.ssid) + 3) + ", ".join(tmpEapTypes)))
else:
messages.append((TAB_DEPTH_2, str(i + 1) + ') ' + network.ssid + ' ' * (SSID_MAX_LENGTH - len(network.ssid) + 3) + ", ".join(tmpEapTypes)))
def get_client_eap(self, client, messages):
tmpEapTypes = []
for y in client.eapTypes:
if y in EAP_TYPES:
tmpEapTypes.append(EAP_TYPES[y])
else:
tmpEapTypes.append(str(y))
messages.append((TAB_DEPTH_2, 'EAP Types: ' + ", ".join(tmpEapTypes)))
def get_certs(self, messages, network, i):
for cert in network.x509certs:
messages.append((TAB_DEPTH_2, 'Certificate ' + str(i) + ') Expiration Date: ' + str(cert.get_not_after())))
data = cert.get_issuer()
messages.append((TAB_DEPTH_2, 'Issuer:'))
for X509_Name_Entry_inst in data.get_entries_by_nid(13): # 13 is CN
messages.append((TAB_DEPTH_3, 'CN: ' + X509_Name_Entry_inst.get_data().as_text()))
for X509_Name_Entry_inst in data.get_entries_by_nid(18): # 18 is OU
messages.append((TAB_DEPTH_3, 'OU: ' + X509_Name_Entry_inst.get_data().as_text()))
data = cert.get_subject()
messages.append((TAB_DEPTH_2, 'Subject:'))
for X509_Name_Entry_inst in data.get_entries_by_nid(13): # 13 is CN
messages.append((TAB_DEPTH_3, 'CN: ' + X509_Name_Entry_inst.get_data().as_text()))
for X509_Name_Entry_inst in data.get_entries_by_nid(18): # 18 is OU
messages.append((TAB_DEPTH_3, 'OU: ' + X509_Name_Entry_inst.get_data().as_text()))
del data
i += 1
messages.append(CURSES_LINE_BREAK)
def parse_live_capture(self, packet, quite=True):
"""
Function is meant to be passed to Scapy's sniff() function similar to:
lambda packet: eapeakParser.parseLiveCapture(packet, use_curses)
sniff(iface = 'mon0', prn = lambda packet: eapeakParser.parseLiveCapture(packet, False) )
"""
self.parse_wireless_packet(packet)
if self.curses_enabled or quite:
return
sys.stdout.write('Packets: ' + str(self.packetCounter) + ' Wireless Networks: ' + str(len(self.KnownNetworks)) + '\r')
sys.stdout.flush()
def resize_dialog(self):
"""
This is a dialog to be used to warn the user when a screen
resize event has been used to make the screen to small for use.
"""
self.curses_lower_refresh_counter = 0
size = self.screen.getmaxyx()
self.screen.erase()
self.screen.addstr(0, 0, 'Screen Too Small, Requires')
self.screen.addstr(1, 0, 'At Least: ' + str(CURSES_MIN_X) + 'x' + str(CURSES_MIN_Y))
self.screen.refresh()
while size[0] < CURSES_MIN_Y or size[1] < CURSES_MIN_X:
if size[0] < 2 or size[1] < 26:
return False
size = self.screen.getmaxyx()
self.screen.refresh() # This has to be here
self.screen.erase()
self.screen.refresh()
self.curses_lower_refresh_counter = CURSES_LOWER_REFRESH_FREQUENCY # Trigger a redraw by adjusting the counter
self.curses_max_rows = size[0] - 2 # Minus 2 for the border on the top and bottom
self.curses_max_columns = size[1] - 2
return True
def cleanup_curses(self):
"""
This cleans up the curses interface and resets things back to
normal.
"""
if not self.curses_enabled:
return
self.screen.erase()
del self.screen
curses.endwin()
curses.echo()
self.curses_enabled = False
| 37.905991 | 195 | 0.705335 | 5,791 | 41,128 | 4.840442 | 0.136419 | 0.02854 | 0.021833 | 0.018301 | 0.4322 | 0.367771 | 0.322108 | 0.292141 | 0.280547 | 0.26674 | 0 | 0.017524 | 0.179999 | 41,128 | 1,084 | 196 | 37.940959 | 0.81364 | 0.165605 | 0 | 0.379956 | 0 | 0 | 0.072758 | 0 | 0 | 0 | 0.003353 | 0 | 0 | 1 | 0.04185 | false | 0.006608 | 0.022026 | 0 | 0.104626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0d578e27a40851cab52e76a2682cae56e7f828c8 | 50,422 | py | Python | site.py | ahmetfurkaann/Firma-Website | 6f4318f31a8c660de0e41c61929ea6d09c722600 | [
"Apache-2.0"
] | null | null | null | site.py | ahmetfurkaann/Firma-Website | 6f4318f31a8c660de0e41c61929ea6d09c722600 | [
"Apache-2.0"
] | null | null | null | site.py | ahmetfurkaann/Firma-Website | 6f4318f31a8c660de0e41c61929ea6d09c722600 | [
"Apache-2.0"
] | null | null | null | from re import U
from sys import unraisablehook
from flask import Flask, g,render_template,flash,redirect,url_for,session,logging,request
from flask_mysqldb import MySQL
from wtforms import Form,StringField,TextAreaField,PasswordField,validators
from passlib.hash import sha256_crypt
from functools import wraps
# Kullanıcı Giriş Decorator
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if "logged_in" in session:
return f(*args, **kwargs)
else:
flash("Bu sayfayı görüntülemek için lütfen giriş yapın.","danger")
return redirect(url_for("forbidden")) # YETKİSİZ ERİŞİMDE TEKRARDAN LOGIN SAYFASINA YÖNLENDİRİLİYORUZ
return decorated_function
app = Flask(__name__)
app.secret_key = "ahmetfurkandb"
app.config["MYSQL_HOST"] = "localhost"
app.config["MYSQL_USER"] = "root"
app.config["MYSQL_PASSWORD"] = ""
app.config["MYSQL_DB"] = "ahmet_furkan_db"
app.config["MYSQL_CURSORCLASS"] = "DictCursor"
mysql = MySQL(app)
# Kontrol Paneli
@app.route("/dashboard")
@login_required
def dashboard():
return render_template("dashboard.html")
@app.errorhandler(404)
def page_not_found(error):
return render_template('404notfound.html'), 404
@app.route("/403forbidden")
def forbidden():
return render_template("403forbidden.html")
# Logout İşlemi
@app.route("/logout")
def logout():
session.clear() # SESSION TEMİZLEMEYİ MUTLAKA YAP UNUTMA SAKIN
return redirect(url_for("index"))
# Index
@app.route("/")
def index():
cursor = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
elemansayisi = "Select * From eleman"
koronasayisi = "select DISTINCT tc_no from covid"
result = cursor.execute(elemansayisi)
result2 = cursor2.execute(koronasayisi)
if result or result2 > 0:
data = cursor.fetchall()
data2 = cursor2.fetchall()
eleman_sayisi = len(data)
korona_sayisi = len(data2)
return render_template("index.html",eleman_sayisi = eleman_sayisi,korona_sayisi=korona_sayisi)
else:
return render_template("index.html")
# Kayıt Olma
@app.route("/register",methods = ["GET", "POST"])
@login_required
def register():
if request.method == "POST": # KAYIT OLMA GİBİ BİR VERİ YOLLAMAYA POST REQUEST DENİR
name = request.form.get('name')
email = request.form.get('eposta')
username = request.form.get('username')
password = sha256_crypt.encrypt(request.form.get('password'))
cursor = mysql.connection.cursor()
sorgu = "Insert into users(name,email,username,password) VALUES(%s,%s,%s,%s)"
cursor.execute(sorgu,(name,email,username,password))
mysql.connection.commit()
cursor.close()
flash("Başarılı bir şekilde kayıt oldunuz","success")
return redirect(url_for("login")) # LOGİN FONKSİYONUNA İLİŞKİN URL ADRESE GİTTİK
else: # SUNUCUDAN BİR VERİ İSTERSSEK DE BUNA GET REQUEST DENİR
return render_template("register.html")
# Login İşlemi
@app.route("/login",methods = ["GET","POST"])
def login():
if request.method == "POST":
username = request.form.get('username')
password_entered= request.form.get('password')
cursor = mysql.connection.cursor() # MYSQL DE DOLAŞMAYI SAĞLIYOR
sorgu = "Select * From users where username = %s"
result = cursor.execute(sorgu,(username,)) # BUNU DEMET OLARAK VERMEN GEREKTİĞİ İÇİN usernamedeen sonra virgül(,) gelmelidir.
if result > 0:
data = cursor.fetchone() # KULLANICININ TÜM VERİLERİNİ ALMIŞ BULUNMAKTAYIZ. (NAME, USERNAME, PASSWORD, EMAIL)
real_password = data["password"] # ŞİFRELENMİŞ ŞİFRE
if(sha256_crypt.verify(password_entered,real_password)):
flash("Başarılı bir şekilde giriş yaptınız.","success")
session["logged_in"] = True
session["username"] = username
return redirect(url_for("index"))
else:
flash("Parolayı yanlış girdiniz","danger")
return redirect(url_for("login"))
else:
flash("Böyle bir kullanıcı bulunmuyor...","danger")
return redirect(url_for("login"))
return render_template("login.html")
# Çalışan Ekleme
@app.route("/veri/1",methods = ["GET","POST"])
@login_required
def istatistik1():
if request.method == "POST":
tcno = request.form.get('tcno')
isim = request.form.get('isim')
soyisim = request.form.get('soyisim')
kangrubu = request.form.get('kangrubu')
sehir = request.form.get('sehir')
pozisyon = request.form.get('pozisyon')
maas = request.form.get('maas')
lisans = request.form.get('lisans')
yukseklisans = request.form.get('yukseklisans')
doktora = request.form.get('doktora')
asidurumu = request.form.get('asidurumu')
cursor = mysql.connection.cursor()
sorgu = "Insert into eleman(tc_no,isim,soyisim,kan_grubu,dogum_yeri,pozisyon,maas,lisans,yuksek_lisans,doktora,asi_id) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
cursor.execute(sorgu,(tcno,isim,soyisim,kangrubu,sehir,pozisyon,maas,lisans,yukseklisans,doktora,asidurumu))
mysql.connection.commit()
cursor.close()
flash("Çalışan başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik1"))
else:
return render_template("veri1.html")
# Çalışan Silme ve güncelleme
@app.route("/veri/2")
@login_required
def istatistik2():
cursor = mysql.connection.cursor()
sorgu = "Select * From eleman order by tc_no"
result = cursor.execute(sorgu)
if result > 0:
elemanlar = cursor.fetchall()
return render_template("veri2.html",elemanlar = elemanlar)
else:
return render_template("veri2.html")
@app.route("/veri/2/delete/<string:id>")
@login_required
def delete(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from eleman where eleman_id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from eleman where eleman_id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik2"))
else:
flash("Bu numaralı id'de eleman bulunmamaktadır.")
return redirect(url_for("index"))
# VERİ/2/EDIT/X TARAFINDA VERİ ÇEKİLİYOR VE ORADAN VERİ3.HTML'DEKİ TABLOYA VERİLER GÖNDERİLİYOR. YANI
# VERİ3.HTML LAZIM. SAKIN SİİİLLLMEEEE
@app.route("/veri/2/edit/<string:id>",methods=["GET","POST"])
@login_required
def edit(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM eleman WHERE eleman_id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
return render_template('veri3.html',employee=data[0])
else:
newtcno = request.form['tcno']
newisim = request.form['isim']
newsoyisim = request.form['soyisim']
newkangrubu = request.form['kangrubu']
newdogumyeri = request.form['sehir']
newpozisyon = request.form['pozisyon']
newmaas = request.form['maas']
newlisans = request.form['lisans']
newyukseklisans = request.form['yukseklisans']
newdoktora = request.form['doktora']
newasidurumu = request.form['asidurumu']
cursor = mysql.connection.cursor()
sorgu = "UPDATE eleman SET tc_no = %s, isim = %s, soyisim = %s, kan_grubu = %s, dogum_yeri = %s, pozisyon = %s, maas = %s, lisans = %s, yuksek_lisans = %s,doktora = %s,asi_id = %s WHERE eleman_id = %s"
result = cursor.execute(sorgu,(newtcno,newisim,newsoyisim,newkangrubu,newdogumyeri,newpozisyon,newmaas,newlisans,newyukseklisans,newdoktora,newasidurumu,id))
mysql.connection.commit()
if result > 0:
flash("Çalışan başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik2"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik2"))
# HASTALIK EKLEME
@app.route("/veri/4",methods=["GET", "POST"])
@login_required
def istatistik4():
if request.method == "POST":
tcno = request.form.get('tcno')
hastalikadi = request.form.get('hastalikadi')
hastaliktarihi = request.form.get('hastaliktarihi')
hastaliktarihiTrue = hastaliktarihi.replace(".", "-") #Hastalık tarihi DD.MM.YYYY şeklinde geliyor. Bunu DD-MM-YYYY şekline çeviriyor
ilac = request.form.get('ilac')
doz = request.form.get('doz')
semptom = request.form.get('semptom')
cursor = mysql.connection.cursor()
sorgu = "Insert into hasta(tc_no,hastalik_adi,hastalik_tarihi,ilac,doz,semptomlar) VALUES(%s,%s,%s,%s,%s,%s)"
cursor.execute(sorgu,(tcno,hastalikadi,hastaliktarihiTrue,ilac,doz,semptom))
mysql.connection.commit()
cursor.close()
flash("Çalışanın hastalığı başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik4"))
else:
return render_template("veri4.html")
# Hastalık görüntüleme tablosu, silme ve güncelleme
# HASTALIK SİLME ÇALIŞIYOR
@app.route("/veri/5")
@login_required
def istatistik5():
cursor = mysql.connection.cursor()
sorgu = "Select * From hasta order by tc_no"
result = cursor.execute(sorgu)
if result > 0:
hastalik = cursor.fetchall()
return render_template("veri5.html",hastalik = hastalik)
else:
return render_template("veri5.html")
@app.route("/veri/5/delete/<string:id>")
@login_required
def hastaliksil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from hasta where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from hasta where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik4"))
else:
flash("Bu numaralı id'de hastalık bulunmamaktadır.")
return redirect(url_for("index"))
@app.route("/veri/5/edit/<string:id>",methods=["GET","POST"])
@login_required
def hastalikduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM hasta WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
print(data[0])
return render_template('veri6.html',hasta=data[0])
else:
newtcno = request.form['tcno']
newhastalikadi = request.form['hastalikadi']
newhastaliktarihi = request.form['hastaliktarihi']
newilac = request.form['ilac']
newdoz = request.form['doz']
newsemptom = request.form.get("semptomlar")
cursor = mysql.connection.cursor()
sorgu = "UPDATE hasta SET tc_no = %s, hastalik_adi = %s, hastalik_tarihi = %s, ilac = %s, doz = %s, semptomlar = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newhastalikadi,newhastaliktarihi,newilac,newdoz,newsemptom,id))
mysql.connection.commit()
if result > 0:
flash("Hastalık başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik5"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik5"))
# COVID BİLGİSİ EKLEME
@app.route("/veri/7",methods=["GET", "POST"])
@login_required
def istatistik7():
if request.method == "POST":
tcno = request.form.get('tcno')
yakalandigitarih = request.form.get('yakalandigitarih')
negatiftarih = request.form.get('negatiftarih')
yakalandigitarihTrue = yakalandigitarih.replace(".", "-") #Hastalık tarihi DD.MM.YYYY şeklinde geliyor. Bunu DD-MM-YYYY şekline çeviriyor
negatiftarihTrue = negatiftarih.replace(".", "-") #Hastalık tarihi DD.MM.YYYY şeklinde geliyor. Bunu DD-MM-YYYY şekline çeviriyor
asidurumu = request.form.get('asidurumu')
cursor = mysql.connection.cursor()
sorgu = "Insert into covid(tc_no,yakalandigi_tarih,negatif_tarihi,asi_id) VALUES(%s,%s,%s,%s)"
cursor.execute(sorgu,(tcno,yakalandigitarihTrue,negatiftarihTrue,asidurumu))
mysql.connection.commit()
cursor.close()
flash("Çalışanın covid bilgisi başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik7"))
else:
return render_template("veri7.html")
# Elemanın COVID bilgisini görüntüleme, güncelleme ve silme
@app.route("/veri/8")
@login_required
def istatistik8():
cursor = mysql.connection.cursor()
sorgu = "Select * From covid order by tc_no"
result = cursor.execute(sorgu)
if result > 0:
covid = cursor.fetchall()
return render_template("veri8.html",covid = covid)
else:
return render_template("veri8.html")
@app.route("/veri/8/delete/<string:id>")
@login_required
def covidsil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from covid where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from covid where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik8"))
else:
flash("Bu numaralı id'de hastalık bulunmamaktadır.")
return redirect(url_for("istatistik8"))
@app.route("/veri/8/edit/<string:id>",methods=["GET","POST"])
@login_required
def covidduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM covid WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
print(data[0])
return render_template('veri9.html',covid=data[0])
else:
newtcno = request.form['tcno']
newyakalanigitarih = request.form['yakalandigitarih']
newnegatiftarih = request.form['negatiftarih']
newasidurumu = request.form['asidurumu']
cursor = mysql.connection.cursor()
sorgu = "UPDATE covid SET tc_no = %s, yakalandigi_tarih = %s, negatif_tarihi = %s, asi_id = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newyakalanigitarih,newnegatiftarih,newasidurumu,id))
mysql.connection.commit()
if result > 0:
flash("COVID bilgisi başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik8"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik8"))
# Çalışan Ekleme
@app.route("/veri/23",methods = ["GET","POST"])
@login_required
def istatistik23():
if request.method == "POST":
tcno = request.form.get('tcno')
temaslitc = request.form.get('temaslitc')
sorgu = "Insert into temasli_calisanlar(tc_no,temasli_tcno) VALUES(%s,%s)"
cursor = mysql.connection.cursor()
cursor.execute(sorgu,(tcno,temaslitc,))
mysql.connection.commit()
cursor.close()
flash("Temaslı bilgisi başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik23"))
else:
return render_template("veri23.html")
# Elemanın Temaslı Çalışan bilgisini görüntüleme, güncelleme ve silme
@app.route("/veri/21")
@login_required
def istatistik21():
cursor = mysql.connection.cursor()
sorgu = "Select * From temasli_calisanlar"
result = cursor.execute(sorgu)
if result > 0:
temasli = cursor.fetchall()
return render_template("veri21.html",temasli = temasli)
else:
return render_template("veri21.html")
@app.route("/veri/21/delete/<string:id>")
@login_required
def temaslisil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from temasli_calisanlar where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from temasli_calisanlar where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik21"))
else:
flash("Bu numaralı id'e ait bir kişi bulunmamaktadır.")
return redirect(url_for("istatistik21"))
@app.route("/veri/21/edit/<string:id>",methods=["GET","POST"])
@login_required
def temasliduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM temasli_calisanlar WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
return render_template('veri22.html',temasli=data[0])
else:
newtcno = request.form['tcno']
newtemaslitc = request.form['temaslitc']
cursor = mysql.connection.cursor()
sorgu = "UPDATE covid SET tc_no = %s, temasli_tcno = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newtemaslitc,id))
mysql.connection.commit()
if result > 0:
flash("Temaslı bilgisi başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik21"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik21"))
# Belirti bilgisi ekleme
@app.route("/veri/24",methods=["GET", "POST"])
@login_required
def istatistik24():
if request.method == "POST":
tcno = request.form.get('tcno')
belirti = request.form.get('belirti')
cursor = mysql.connection.cursor()
sorgu = "Insert into belirtiler(tc_no,belirti_ismi) VALUES(%s,%s)"
cursor.execute(sorgu,(tcno,belirti))
mysql.connection.commit()
cursor.close()
flash("Çalışanın belirti bilgisi başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik24"))
else:
return render_template("veri24.html")
# Belirti bilgisini güncelleme ve silme
@app.route("/veri/25")
@login_required
def istatistik25():
cursor = mysql.connection.cursor()
sorgu = "Select * From belirtiler order by tc_no"
result = cursor.execute(sorgu)
if result > 0:
belirtiler = cursor.fetchall()
return render_template("veri25.html",belirtiler = belirtiler)
else:
return render_template("veri25.html")
@app.route("/veri/25/delete/<string:id>")
@login_required
def belirtisil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from belirtiler where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from belirtiler where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik25"))
else:
flash("Bu numaralı id'de bir belirti bulunmamaktadır.")
return redirect(url_for("istatistik25"))
@app.route("/veri/25/edit/<string:id>",methods=["GET","POST"])
@login_required
def belirtiduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM belirtiler WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
return render_template('veri26.html',belirtiler=data[0])
else:
newtcno = request.form['tcno']
newbelirti = request.form['belirti']
cursor = mysql.connection.cursor()
sorgu = "UPDATE belirtiler SET tc_no = %s, belirti_ismi = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newbelirti,id))
mysql.connection.commit()
if result > 0:
flash("Belirti bilgisi başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik25"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik25"))
# Kronik hastalık bilgisi ekleme
@app.route("/veri/27",methods=["GET", "POST"])
@login_required
def istatistik27():
if request.method == "POST":
tcno = request.form.get('tcno')
kronik = request.form.get('kronik')
cursor = mysql.connection.cursor()
sorgu = "Insert into kronik_hastaliklar(tc_no,kronik_hastaligi) VALUES(%s,%s)"
cursor.execute(sorgu,(tcno,kronik))
mysql.connection.commit()
cursor.close()
flash("Kronik hastalık bilgisi başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik27"))
else:
return render_template("veri27.html")
# Kronik hastalık bilgisini güncelleme ve silme
@app.route("/veri/28")
@login_required
def istatistik28():
cursor = mysql.connection.cursor()
sorgu = "Select * From kronik_hastaliklar"
result = cursor.execute(sorgu)
if result > 0:
kronik = cursor.fetchall()
return render_template("veri28.html",kronik = kronik)
else:
return render_template("veri28.html")
@app.route("/veri/28/delete/<string:id>")
@login_required
def kroniksil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from kronik_hastaliklar where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from kronik_hastaliklar where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik28"))
else:
flash("Bu numaralı id'de bir belirti bulunmamaktadır.")
return redirect(url_for("istatistik28"))
@app.route("/veri/28/edit/<string:id>",methods=["GET","POST"])
@login_required
def kronikduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM kronik_hastaliklar WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
return render_template('veri29.html',kronik=data[0])
else:
newtcno = request.form['tcno']
newkronik = request.form['kronik']
cursor = mysql.connection.cursor()
sorgu = "UPDATE kronik_hastaliklar SET tc_no = %s, kronik_hastaligi = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newkronik,id))
mysql.connection.commit()
if result > 0:
flash("Kronik hastalık bilgisi başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik28"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik28"))
# Elemana çalışma bilgisi ekleme
@app.route("/veri/10",methods=["GET", "POST"])
@login_required
def istatistik10():
if request.method == "POST":
tcno = request.form.get('tcno')
haftaicigiris = request.form.get('haftaicigiris')
haftaicicikis = request.form.get('haftaicicikis')
cumartesigiris = request.form.get('cumartesigiris')
cumartesicikis = request.form.get('cumartesicikis')
pazargiris = request.form.get('pazargiris')
pazarcikis = request.form.get('pazarcikis')
cursor = mysql.connection.cursor()
sorgu = "Insert into calisma_sureleri(tc_no,haftaicigiris,haftaicicikis,cumartesigiris,cumartesicikis,pazargiris,pazarcikis) VALUES(%s,%s,%s,%s,%s,%s,%s)"
cursor.execute(sorgu,(tcno,haftaicigiris,haftaicicikis,cumartesigiris,cumartesicikis,pazargiris,pazarcikis))
mysql.connection.commit()
cursor.close()
flash("Çalışanın çalışma bilgisi başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik10"))
else:
return render_template("veri10.html")
# Elemanın çalışma bilgisini güncelleme ve silme
@app.route("/veri/11")
@login_required
def istatistik11():
cursor = mysql.connection.cursor()
sorgu = "Select * From calisma_sureleri order by tc_no"
result = cursor.execute(sorgu)
if result > 0:
gun = cursor.fetchall()
return render_template("veri11.html",gun = gun)
else:
return render_template("veri11.html")
@app.route("/veri/11/delete/<string:id>")
@login_required
def calismasaatisil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from calisma_sureleri where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from calisma_sureleri where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik11"))
else:
flash("Bu numaralı id'de bir çalışma süresi bulunmamaktadır.")
return redirect(url_for("istatistik11"))
@app.route("/veri/11/edit/<string:id>",methods=["GET","POST"])
@login_required
def calismasaatiduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM calisma_sureleri WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
return render_template('veri12.html',gun=data[0])
else:
newtcno = request.form['tcno']
newhaftaicigiris = request.form['haftaicigiris']
newhaftaicicikis = request.form['haftaicicikis']
newcumartesigiris = request.form['cumartesigiris']
newcumartesicikis = request.form['cumartesicikis']
newpazargiris = request.form['pazargiris']
newpazarcikis = request.form['pazarcikis']
cursor = mysql.connection.cursor()
sorgu = "UPDATE calisma_sureleri SET tc_no = %s, haftaicigiris = %s, haftaicicikis = %s, cumartesigiris = %s, cumartesicikis = %s, pazargiris = %s, pazarcikis = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newhaftaicigiris,newhaftaicicikis,newcumartesigiris,newcumartesicikis,newpazargiris,newpazarcikis,id))
mysql.connection.commit()
if result > 0:
flash("Çalışma saati bilgisi başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik11"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik11"))
# Hobi bilgisi ekleme
@app.route("/veri/13",methods=["GET", "POST"])
@login_required
def istatistik13():
if request.method == "POST":
tcno = request.form.get('tcno')
hobi_ismi = request.form.get('hobi_ismi')
cursor = mysql.connection.cursor()
sorgu = "Insert into eleman_hobileri(tc_no,hobi_ismi) VALUES(%s,%s)"
cursor.execute(sorgu,(tcno,hobi_ismi))
mysql.connection.commit()
cursor.close()
flash("Çalışanın hobi bilgisi başarılı bir şekilde kaydedildi","success")
return redirect(url_for("istatistik13"))
else:
return render_template("veri13.html")
@app.route("/veri/14")
@login_required
def istatistik14():
cursor = mysql.connection.cursor()
sorgu = "Select * From eleman_hobileri order by tc_no"
result = cursor.execute(sorgu)
if result > 0:
hobi = cursor.fetchall()
return render_template("veri14.html",hobi = hobi)
else:
return render_template("veri14.html")
@app.route("/veri/14/delete/<string:id>")
@login_required
def hobisil(id):
cursor = mysql.connection.cursor()
sorgu = "Select * from eleman_hobileri where id = %s"
result = cursor.execute(sorgu,(id,))
if result > 0:
sorgu2 = "Delete from eleman_hobileri where id = %s"
cursor.execute(sorgu2,(id,))
mysql.connection.commit()
return redirect(url_for("istatistik14"))
else:
flash("Bu numaralı id'de hastalık bulunmamaktadır.")
return redirect(url_for("istatistik14"))
@app.route("/veri/14/edit/<string:id>",methods=["GET","POST"])
@login_required
def hobiduzenle(id):
if request.method == "GET":
cursor = mysql.connection.cursor()
sorgu = "SELECT * FROM eleman_hobileri WHERE id = %s"
result = cursor.execute(sorgu,(id,))
data = cursor.fetchall()
cursor.close()
print(data[0])
return render_template('veri15.html',hobi=data[0])
else:
newtcno = request.form['tcno']
newhobi = request.form['hobi_ismi']
cursor = mysql.connection.cursor()
sorgu = "UPDATE eleman_hobileri SET tc_no = %s, hobi_ismi = %s WHERE id = %s"
result = cursor.execute(sorgu,(newtcno,newhobi,id))
mysql.connection.commit()
if result > 0:
flash("Hobi bilgisi başarılı bir şekilde güncellendi","success")
return redirect(url_for("istatistik14"))
else:
flash("Bir hata ile karşılaşıldı. Lütfen tekrar deneyiniz.","danger")
return redirect(url_for("istatistik14"))
# Eğitim durumu ve COVID arasındaki istatistiki bilgi (1)
@app.route("/veri/16")
@login_required
def istatistik16():
cursor = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
cursor3 = mysql.connection.cursor()
sorgu = "select DISTINCT e.* from eleman e, covid c where doktora = '0' and yuksek_lisans = '0' and e.tc_no in (Select c.tc_no from covid);" #Lisans
sorgu2 = "select DISTINCT e.* from eleman e, covid c where doktora = '0' and yuksek_lisans <> '0' and e.tc_no in (Select c.tc_no from covid);" #Yüksek Lisans
sorgu3 = "select DISTINCT e.* from eleman e, covid c where doktora <> '0' and e.tc_no in (Select c.tc_no from covid);" # Doktora
result = cursor.execute(sorgu)
result2 = cursor2.execute(sorgu2)
result3 = cursor3.execute(sorgu3)
if result or result2 or result3 > 0:
lisans = cursor.fetchall()
ylisans = cursor2.fetchall()
doktora = cursor3.fetchall()
lisansadet = len(lisans)
yukseklisansadet = len(ylisans)
doktoraadet = len(doktora)
return render_template("veri16.html",lisans = lisans, ylisans = ylisans, doktora = doktora,lisansadet=lisansadet,yukseklisansadet=yukseklisansadet,doktoraadet=doktoraadet)
else:
flash("Bir hata oluştu!","danger")
return render_template("veri16.html")
#Elemanlar arasında görülen en yaygın üç hastalık türü ve o hastalığa sahip olan elemanların listesi
@app.route("/veri/17", methods=["GET","POST"])
@login_required
def istatistik17():
if request.method == "GET" or request.method == "POST":
cursor = mysql.connection.cursor()
cursor0 = mysql.connection.cursor()
cursor1 = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
sorgu = "SELECT hastalik_adi, COUNT(hastalik_adi) AS hastaliklar FROM hasta GROUP BY hastalik_adi ORDER BY hastaliklar DESC LIMIT 3;"
result = cursor.execute(sorgu)
if result > 0:
data = cursor.fetchall()
hastalik_adi0 = data[0]['hastalik_adi']
hastalik_adi1 = data[1]['hastalik_adi']
hastalik_adi2 = data[2]['hastalik_adi']
sorgu0 = "SELECT DISTINCT e.tc_no, e.isim, e.soyisim from eleman e, hasta h where e.tc_no in (SELECT DISTINCT tc_no from hasta where hastalik_adi= '" + hastalik_adi0 + "');"
sorgu1 = "SELECT DISTINCT e.tc_no, e.isim, e.soyisim from eleman e, hasta h where e.tc_no in (SELECT DISTINCT tc_no from hasta where hastalik_adi= '" + hastalik_adi1 + "');"
sorgu2 = "SELECT DISTINCT e.tc_no, e.isim, e.soyisim from eleman e, hasta h where e.tc_no in (SELECT DISTINCT tc_no from hasta where hastalik_adi= '" + hastalik_adi2 + "');"
result0 = cursor0.execute(sorgu0)
result1 = cursor1.execute(sorgu1)
result2 = cursor2.execute(sorgu2)
if result0 and result1 and result2 > 0:
data0 = cursor0.fetchall()
data1 = cursor1.fetchall()
data2 = cursor2.fetchall()
return render_template("veri17.html", data=data, data0=data0, data1= data1, data2= data2)
else:
flash("Bu sayfada henüz herhangi bir veri bulunmamaktadır","danger")
return redirect(url_for("dashboard"))
@app.route("/veri/18", methods=["GET","POST"])
@login_required
def istatistik18():
if request.method == "POST":
sehiradi = request.form.get('sehiradi')
cursor = mysql.connection.cursor()
sorgu = "SELECT h.hastalik_adi, COUNT(hastalik_adi) as hastaliklar from eleman e, hasta h where h.tc_no in (Select e.tc_no from eleman where e.dogum_yeri= '"+ str(sehiradi) +"') GROUP BY hastalik_adi order by hastaliklar desc limit 3;"
result = cursor.execute(sorgu)
if result == 0:
flash("Aranan şehire ait bir hastalık bulunamadı...","warning")
return render_template("veri18.html")
else:
data = cursor.fetchall()
return render_template("veri18.html",data = data, sehiradi=sehiradi)
else:
return render_template("veri18.html") # render_template ile yazmadığım için 1 saat boşa gitti
@app.route("/veri/19")
@login_required
def istatistik19():
if request.method == "GET" or request.method == "POST":
cursor = mysql.connection.cursor()
cursor0 = mysql.connection.cursor()
cursor1 = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
sorgu = "select ilac, COUNT(ilac) as adet_sayisi from hasta GROUP BY ilac ORDER BY adet_sayisi desc LIMIT 3;"
result = cursor.execute(sorgu)
if result > 0:
data = cursor.fetchall()
ilac_adi0 = data[0]['ilac']
ilac_adi1 = data[1]['ilac']
ilac_adi2 = data[2]['ilac']
sorgu0 = "Select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih,c.negatif_tarihi, h.ilac from eleman e, covid c, hasta h where e.tc_no in (Select c.tc_no from covid where c.tc_no in (Select h.tc_no from hasta where h.ilac = '" + ilac_adi0 + "'));"
sorgu1 = "Select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih,c.negatif_tarihi, h.ilac from eleman e, covid c, hasta h where e.tc_no in (Select c.tc_no from covid where c.tc_no in (Select h.tc_no from hasta where h.ilac = '" + ilac_adi1 + "'));"
sorgu2 = "Select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih,c.negatif_tarihi, h.ilac from eleman e, covid c, hasta h where e.tc_no in (Select c.tc_no from covid where c.tc_no in (Select h.tc_no from hasta where h.ilac = '" + ilac_adi2 + "'));"
result0 = cursor0.execute(sorgu0)
result1 = cursor1.execute(sorgu1)
result2 = cursor2.execute(sorgu2)
if result0 and result1 and result2 > 0:
data0 = cursor0.fetchall()
data1 = cursor1.fetchall()
data2 = cursor2.fetchall()
bir = len(data0)
iki = len(data1)
uc = len(data2)
return render_template("veri19.html", data=data, data0=data0, data1= data1, data2= data2,bir = bir, iki = iki, uc = uc)
else:
flash("Bu sayfada henüz herhangi bir veri bulunmamaktadır","danger")
return redirect(url_for("dashboard"))
@app.route("/veri/20",methods=["GET","POST"])
@login_required
def istatistik20():
if request.method == "POST":
ilacadi = request.form.get('ilacadi')
cursor = mysql.connection.cursor()
sorgu = "select DISTINCT c.tc_no, c.yakalandigi_tarih, c.negatif_tarihi, h.ilac from covid c, hasta h where c.tc_no in (Select h.tc_no from hasta where h.ilac = '" + ilacadi + "');"
result = cursor.execute(sorgu)
if result == 0:
flash("Bu ilacı kullanıp korona olan çalışan bulunmamaktadır","warning")
return render_template("veri20.html")
else:
data = cursor.fetchall()
return render_template("veri20.html",data = data, ilacadi=ilacadi)
else:
return render_template("veri20.html") # render_template ile yazmadığım için 1 saat boşa gitti
# Aşı vurulma durumuna göre covide yakalanma durumu ve oranı
@app.route("/veri/30")
@login_required
def istatistik30():
cursor = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
cursor3 = mysql.connection.cursor()
sorgu = "select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih,c.negatif_tarihi,c.asi_id from covid c, eleman e where e.tc_no in (Select DISTINCT c.tc_no from covid where c.asi_id = '0' GROUP BY tc_no)" #Aşı olmayıp korona olanlar
sorgu2 = "select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih,c.negatif_tarihi,c.asi_id from covid c, eleman e where e.tc_no in (Select DISTINCT c.tc_no from covid where c.asi_id = '1' GROUP BY tc_no)" #Sinovac aşısı olup korona olanlar
sorgu3 = "select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih,c.negatif_tarihi,c.asi_id from covid c, eleman e where e.tc_no in (Select DISTINCT c.tc_no from covid where c.asi_id = '2' GROUP BY tc_no)" #Biontech aşısı olup korona olanlar
result = cursor.execute(sorgu)
result2 = cursor2.execute(sorgu2)
result3 = cursor3.execute(sorgu3)
if result or result2 or result3 > 0:
asisiz = cursor.fetchall()
sinovac = cursor2.fetchall()
biontech = cursor3.fetchall()
bir = len(asisiz)
iki = len(sinovac)
uc = len(biontech)
return render_template("veri30.html",asisiz = asisiz, sinovac = sinovac, biontech = biontech, bir = bir, iki = iki, uc = uc)
else:
flash("Bir hata oluştur","danger")
return render_template("veri30.html")
# Belirli bir kronik hastalığa göre koronanın geçme süresini gösteren sorgu
@app.route("/veri/31",methods=["GET","POST"])
@login_required
def istatistik31():
if request.method == "POST":
hastalikadi = request.form.get('hastalikadi')
cursor = mysql.connection.cursor()
sorgu = "Select c.tc_no,c.yakalandigi_tarih,c.negatif_tarihi,TIMESTAMPDIFF(DAY,c.yakalandigi_tarih,c.negatif_tarihi) as gecen_sure ,k.kronik_hastaligi from covid c, kronik_hastaliklar k where c.tc_no in (SELECT k.tc_no from kronik_hastaliklar where k.kronik_hastaligi = '"+ str(hastalikadi) +"');"
result = cursor.execute(sorgu)
if result == 0:
flash("Bu hastalığa sahip korona geçirmiş çalışan bulunmamaktadır","warning")
return render_template("veri31.html")
else:
data = cursor.fetchall()
return render_template("veri31.html",data = data, hastalikadi=hastalikadi)
else:
return render_template("veri31.html")
# Aşı vurulma durumuna göre covide yakalanma durumu ve oranı
@app.route("/veri/32")
@login_required
def istatistik32():
cursor = mysql.connection.cursor() #A-
cursor2 = mysql.connection.cursor() #A+
cursor3 = mysql.connection.cursor() #B-
cursor4 = mysql.connection.cursor() #B+
cursor5 = mysql.connection.cursor() #AB-
cursor6 = mysql.connection.cursor() #AB+
cursor7 = mysql.connection.cursor() #0-
cursor8 = mysql.connection.cursor() #0+
sorgu = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = 'A-')"
sorgu2 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = 'A+')"
sorgu3 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = 'B-')"
sorgu4 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = 'B+')"
sorgu5 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = 'AB-')"
sorgu6 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = 'AB+')"
sorgu7 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = '0-')"
sorgu8 = "select e.tc_no, e.isim, e.soyisim, e.kan_grubu, c.yakalandigi_tarih,c.negatif_tarihi from eleman e, covid c where c.tc_no in (Select e.tc_no from eleman where e.kan_grubu = '0+')"
result = cursor.execute(sorgu) #A-
result2 = cursor2.execute(sorgu2) #A+
result3 = cursor3.execute(sorgu3) #B-
result4 = cursor4.execute(sorgu4) #B+
result5 = cursor5.execute(sorgu5) #AB-
result6 = cursor6.execute(sorgu6) #AB+
result7 = cursor7.execute(sorgu7) #0-
result8 = cursor8.execute(sorgu8) #0+
if result or result2 or result3 or result4 or result5 or result6 or result7 or result8 > 0:
a1 = cursor.fetchall()
a2 = cursor2.fetchall()
b1 = cursor3.fetchall()
b2 = cursor4.fetchall()
ab1 = cursor5.fetchall()
ab2 = cursor6.fetchall()
sıfır1 = cursor7.fetchall()
sıfır2 = cursor8.fetchall()
ua1 = len(a1)
ua2 = len(a2)
ub1 = len(b1)
ub2 = len(b2)
uab1 = len(ab1)
uab2 = len(ab2)
usıfır1 = len(sıfır1)
usıfır2 = len(sıfır2)
return render_template("veri32.html",a1 = a1, a2= a2, b1 = b1, b2 = b2, ab1= ab1, ab2= ab2, sıfır1 = sıfır1, sıfır2 = sıfır2, ua1=ua1,ua2=ua2,ub1=ub1,ub2=ub2,uab1=uab1,uab2=uab2,usıfır1 = usıfır1, usıfır2 = usıfır2)
else:
flash("Bir hata oluştur","danger")
return render_template("veri32.html")
# COVID'e yakalananlar arasında en sık görülen 3 belirti ve o belirtiye sahip olan çalışanlar
@app.route("/veri/33", methods=["GET","POST"])
@login_required
def istatistik33():
if request.method == "GET" or request.method == "POST":
cursor = mysql.connection.cursor()
cursor0 = mysql.connection.cursor()
cursor1 = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
sorgu = "Select belirti_ismi, COUNT(belirti_ismi) as gorulme_sayisi from belirtiler GROUP BY belirti_ismi ORDER BY gorulme_sayisi desc LIMIT 3;"
result = cursor.execute(sorgu)
if result > 0:
data = cursor.fetchall()
belirti_adi0 = data[0]['belirti_ismi']
belirti_adi1 = data[1]['belirti_ismi']
belirti_adi2 = data[2]['belirti_ismi']
sorgu0 = "Select DISTINCT e.tc_no, e.isim, e.soyisim, b.belirti_ismi from eleman e, belirtiler b where e.tc_no in (Select b.tc_no where b.belirti_ismi = '" + belirti_adi0 + "');"
sorgu1 = "Select DISTINCT e.tc_no, e.isim, e.soyisim, b.belirti_ismi from eleman e, belirtiler b where e.tc_no in (Select b.tc_no where b.belirti_ismi = '" + belirti_adi1 + "');"
sorgu2 = "Select DISTINCT e.tc_no, e.isim, e.soyisim, b.belirti_ismi from eleman e, belirtiler b where e.tc_no in (Select b.tc_no where b.belirti_ismi = '" + belirti_adi2 + "');"
result0 = cursor0.execute(sorgu0)
result1 = cursor1.execute(sorgu1)
result2 = cursor2.execute(sorgu2)
if result0 and result1 and result2 > 0:
data0 = cursor0.fetchall()
data1 = cursor1.fetchall()
data2 = cursor2.fetchall()
sifir = len(data0)
bir = len(data1)
iki = len(data2)
return render_template("veri33.html", data=data, data0=data0, data1= data1, data2= data2,sifir=sifir,bir=bir,iki=iki)
else:
flash("Bu sayfada henüz herhangi bir veri bulunmamaktadır","danger")
return redirect(url_for("dashboard"))
# Temas eden ve edilen kişi listei
@app.route("/veri/34")
@login_required
def istatistik34():
cursor = mysql.connection.cursor()
sorgu = "Select e.tc_no, e.isim, e.soyisim, t.temasli_tcno from eleman e, temasli_calisanlar t where e.tc_no in (SELECT t.tc_no from temasli_calisanlar) order by e.isim;"
result = cursor.execute(sorgu)
if result > 0:
data = cursor.fetchall()
print(len(data))
return render_template("veri34.html",data = data)
else:
flash("Bir hata oluştur","danger")
return render_template("veri34.html")
# Aşı türüne ve durumuna göre covidi kaç günde atlatma bilgisi
@app.route("/veri/35")
@login_required
def istatistik35():
cursor = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
cursor3 = mysql.connection.cursor()
sorgu = "Select DISTINCT e.tc_no, e.isim, e.soyisim, TIMESTAMPDIFF(DAY,c.yakalandigi_tarih,c.negatif_tarihi) as fark, c.asi_id from covid c, eleman e where e.tc_no in (SELECT c.tc_no from covid where c.asi_id = '0');" #Aşı olmayanlar
sorgu2 = "Select DISTINCT e.tc_no, e.isim, e.soyisim, TIMESTAMPDIFF(DAY,c.yakalandigi_tarih,c.negatif_tarihi) as fark, c.asi_id from covid c, eleman e where e.tc_no in (SELECT c.tc_no from covid where c.asi_id = '1');" #Sinovac aşısı olanlar
sorgu3 = "Select DISTINCT e.tc_no, e.isim, e.soyisim, TIMESTAMPDIFF(DAY,c.yakalandigi_tarih,c.negatif_tarihi) as fark, c.asi_id from covid c, eleman e where e.tc_no in (SELECT c.tc_no from covid where c.asi_id = '2');" #Biontech aşısı olanlar
result = cursor.execute(sorgu)
result2 = cursor2.execute(sorgu2)
result3 = cursor3.execute(sorgu3)
if result or result2 or result3 > 0:
asisiz = cursor.fetchall()
sinovac = cursor2.fetchall()
biontech = cursor3.fetchall()
bir = len(asisiz)
iki = len(sinovac)
uc = len(biontech)
return render_template("veri35.html",asisiz = asisiz, sinovac = sinovac, biontech = biontech, bir=bir, iki=iki, uc=uc)
else:
flash("Bir hata oluştur","danger")
return render_template("veri35.html")
# Haftasonu çalışıp korona olanların listesi
@app.route("/veri/36")
@login_required
def istatistik36():
cursor = mysql.connection.cursor()
cursor2 = mysql.connection.cursor()
sorgu = "Select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih, c.negatif_tarihi, cs.cumartesigiris,cs.cumartesicikis,cs.pazargiris, cs.pazarcikis from eleman e, covid c, calisma_sureleri cs where e.tc_no in (Select c.tc_no from covid where c.tc_no in (Select cs.tc_no from calisma_sureleri where cs.cumartesigiris <> '0:00:00' or cs.cumartesicikis <> '0:00:00' or cs.pazargiris <> '0:00:00' or cs.pazarcikis <> '0:00:00')) order by tc_no"
sorgu2 = "Select * from calisma_sureleri where cumartesigiris <> '0:00:00' or cumartesicikis <> '0:00:00' or pazargiris <> '0:00:00' or pazarcikis <> '0:00:00'"
result = cursor.execute(sorgu)
result2 = cursor2.execute(sorgu2)
if result or result2> 0:
data = cursor.fetchall()
data2 = cursor2.fetchall()
korona_olan = len(data)
toplamkisisayisi = len(data2)
return render_template("veri36.html",data = data,korona_olan=korona_olan,toplamkisisayisi=toplamkisisayisi)
else:
flash("Bir hata oluştur","danger")
return render_template("veri36.html")
@app.route("/veri/37")
@login_required
def istatistik37():
cursor = mysql.connection.cursor()
sorgu = "select DISTINCT e.tc_no,e.isim, e.soyisim,c.yakalandigi_tarih,c.negatif_tarihi from eleman e, hasta h, covid c where e.tc_no in (select c.tc_no from covid where c.tc_no in (SELECT tc_no from hasta GROUP BY tc_no ORDER BY COUNT(tc_no) desc) and timestampdiff(month,c.negatif_tarihi,curdate())<=1)"
result = cursor.execute(sorgu)
if result > 0:
data = cursor.fetchall()
return render_template("veri37.html",data = data)
else:
flash("Bir hata oluştur","danger")
return render_template("veri37.html")
@app.route("/veri/38", methods=["GET","POST"])
@login_required
def istatistik38():
if request.method == "POST":
hastalikadi = request.form.get('hastalikadi')
cursor = mysql.connection.cursor()
sorgu = "select e.tc_no, e.isim, e.soyisim, c.yakalandigi_tarih, c.negatif_tarihi,h.hastalik_adi, c.asi_id from eleman e, covid c, hasta h where e.tc_no in (Select c.tc_no from covid where c.asi_id = '2' and c.tc_no in (Select h.tc_no from hasta where h.hastalik_adi = '"+hastalikadi+"')) order by e.tc_no"
result = cursor.execute(sorgu)
if result == 0:
flash("Aranan hastalığa ait bir veri bulunamadı...","warning")
return render_template("veri38.html")
else:
data = cursor.fetchall()
return render_template("veri38.html",data = data, hastalikadi=hastalikadi)
else:
return render_template("veri38.html")
if __name__ == "__main__":
app.run(debug=True,port=5000)
| 42.948893 | 446 | 0.637618 | 6,240 | 50,422 | 5.073397 | 0.099679 | 0.01731 | 0.05373 | 0.048613 | 0.670857 | 0.604302 | 0.55114 | 0.509824 | 0.47015 | 0.404132 | 0 | 0.019878 | 0.240728 | 50,422 | 1,173 | 447 | 42.985507 | 0.805637 | 0.046805 | 0 | 0.520249 | 0 | 0.047767 | 0.328755 | 0.042101 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057113 | false | 0.009346 | 0.007269 | 0.003115 | 0.187954 | 0.004154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b48f48a86a480a643b98de1d8c4d0bbf244c1ccb | 6,116 | py | Python | 20180107-Causality/chmp-app-causality/src/chmp/app/causality/dataset/customer.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 6 | 2017-10-31T20:54:37.000Z | 2020-10-23T19:03:00.000Z | 20180107-Causality/chmp-app-causality/src/chmp/app/causality/dataset/customer.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 7 | 2020-03-24T16:14:34.000Z | 2021-03-18T20:51:37.000Z | 20180107-Causality/chmp-app-causality/src/chmp/app/causality/dataset/customer.py | chmp/misc-exp | 2edc2ed598eb59f4ccb426e7a5c1a23343a6974b | [
"MIT"
] | 1 | 2019-07-29T07:55:49.000Z | 2019-07-29T07:55:49.000Z | import argparse
import logging
import os.path
import numpy as np
import pandas as pd
import patsy
_logger = logging.getLogger(__name__)
_basedir = os.path.abspath(os.path.dirname(__file__))
def create(data_path):
target_fname = os.path.join(data_path, "customer.parquet")
if os.path.exists(target_fname):
_logger.info("skip %s, does already exist", target_fname)
return
_logger.info("create %s", target_fname)
generate_customer_data(n_samples=100_000).to_parquet(
target_fname, engine="pyarrow", compression="brotli"
)
def generate_customer_data(
n_samples=100_000, n_occupations=5, seed=24, p_random=0.05, train_ratio=0.75
):
"""Generate a dataset designed to resemble customer datasets.
"""
np.random.seed(seed)
age_latent = sigmoid(np.random.normal(size=n_samples))
gender_latent = sigmoid(np.random.normal(size=n_samples))
occupation_latent = sample_categorical(n_occupations, size=n_samples, alpha=0.9)
generic_latent = sigmoid(np.random.normal(size=n_samples))
salary_latent = spline(
sample_cauchy(size=(1, n_occupations))
+ np.random.normal(loc=1, scale=0.75, size=(10, n_occupations)).cumsum(axis=0),
age_latent,
) + spline(
sample_cauchy(size=(1, n_occupations))
+ np.random.normal(loc=3, scale=0.75, size=(10, n_occupations)).cumsum(axis=0),
gender_latent,
)
salary_latent = salary_latent[np.arange(n_samples), occupation_latent]
dist_city_latent = spline(
np.random.laplace(loc=+1, scale=0.4, size=(10, n_occupations)).cumsum(axis=0),
age_latent,
)
dist_city_latent = dist_city_latent[np.arange(n_samples), occupation_latent]
count_mean = normalize(generic_latent) * np.random.gamma(10, 10 / 5, size=n_samples)
count = np.random.poisson(count_mean)
occupation_delta = np.random.laplace(size=n_occupations, loc=0.5, scale=1.5)
effect_noise_0 = np.random.normal(scale=0.4, size=n_samples)
effect_noise_1 = np.random.normal(scale=0.4, size=n_samples)
generic = spline(
np.random.laplace(loc=+0, scale=0.4, size=10).cumsum(axis=0), generic_latent
)
data = pd.DataFrame()
data["age"] = spline(
[20, 25, 30, 45, 60, 85],
normalize(age_latent) + np.random.normal(scale=0.05, size=n_samples),
)
data["gender"] = (
(gender_latent + np.random.normal(scale=0.05, size=n_samples)) > 0.5
).astype(float)
data["salary"] = spline(
[30, 35, 45, 60, 70, 80, 90, 100],
normalize(salary_latent) + np.random.normal(scale=0.05, size=n_samples),
)
data["dist_city"] = sigmoid(
3 * normalize(dist_city_latent)
- 1.5
+ np.random.normal(scale=0.05, size=n_samples)
)
data["occupation"] = random_cat_swaps(occupation_latent, eps=5e-2)
data["generic"] = generic
data["count"] = count
data["outcome_mean_p_det"] = sigmoid(
-0.0
+ 2.5 * (normalize(age_latent) - 0.35)
+ 3 * (normalize(salary_latent) - 0.5)
)
data["outcome_delta_p_det"] = sigmoid(
0.55
+ -0.15 * occupation_delta[occupation_latent] * normalize(salary_latent) ** 2
+ -0.5 * normalize(age_latent) ** 2
+ +1.0 * (normalize(gender_latent) - 0.5) * (normalize(generic_latent) - 0.15)
)
data["outcome_0_p_det"] = sigmoid(
logit(data["outcome_mean_p_det"]) - 0.5 * logit(data["outcome_delta_p_det"])
)
data["outcome_1_p_det"] = sigmoid(
logit(data["outcome_mean_p_det"]) + 0.5 * logit(data["outcome_delta_p_det"])
)
data["outcome_0_p"] = sigmoid(
logit(data["outcome_0_p_det"])
- 0.2 * effect_noise_0 * occupation_delta[occupation_latent]
)
data["outcome_1_p"] = sigmoid(
logit(data["outcome_1_p_det"])
+ 0.2 * effect_noise_1 * occupation_delta[occupation_latent]
)
cutoff = logit(p_random)
data["action_p"] = (data["age"] - 40) / 1.5
data["action_p"] = sigmoid(np.clip(data["action_p"], -cutoff, +cutoff))
data["action"] = sample_bernoulli(data["action_p"])
data["outcome_p"] = (data["action"] == 1) * data["outcome_1_p"] + (
data["action"] == 0
) * data["outcome_0_p"]
data["outcome"] = sample_bernoulli(data["outcome_p"])
data["train"] = 1
data.iloc[int(train_ratio * len(data)) :, data.columns.get_loc("train")] = 0
return data
def reject(proposal, accept, max_iter=10):
res = []
for _ in range(max_iter):
cand = proposal()
n_target = cand.shape[0]
sel = accept(cand)
cand = cand[sel]
res = np.concatenate([res, cand])
n_current = res.shape[0]
if n_current >= n_target:
return res[:n_target]
raise ValueError()
def sample_bernoulli(p):
u = np.random.uniform(size=np.shape(p))
return (u < p).astype(float)
def sigmoid(x):
x = np.clip(x, -1e2, +1e2)
return 1. / (1. + np.exp(-x))
def logit(x):
x = np.clip(x, 1e-6, 1 - 1e-6)
return np.log(x / (1 - x))
def sample_categorical(n_categories, size, alpha=0.5):
p = np.random.dirichlet([alpha] * n_categories)
return np.random.choice(np.arange(n_categories), size=size, p=p)
def spline(w, x):
w = np.asarray(w)
x = np.asarray(x)
splines = patsy.bs(
x,
df=w.shape[0],
lower_bound=np.min(x),
upper_bound=np.max(x),
include_intercept=True,
)
return np.dot(splines, w)
def sample_cauchy(loc=0, scale=1, size=1):
u = np.random.uniform(size=size)
return loc + scale * np.tan(np.pi * (u + 0.5))
def normalize(x):
return (x - np.min(x)) / np.ptp(x)
def random_cat_swaps(x, eps=0.05):
n_categories = np.max(x) + 1
u = np.random.uniform(size=np.size(x))
sel = u < eps
x = x.copy()
x[sel] = np.random.randint(0, n_categories, size=sel.sum())
return x
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("data_path")
args = parser.parse_args()
create(args.data_path)
| 28.184332 | 88 | 0.630968 | 892 | 6,116 | 4.115471 | 0.206278 | 0.050123 | 0.04195 | 0.031054 | 0.315445 | 0.264233 | 0.236993 | 0.197766 | 0.165895 | 0.136747 | 0 | 0.040893 | 0.216318 | 6,116 | 216 | 89 | 28.314815 | 0.725016 | 0.009483 | 0 | 0.025478 | 0 | 0 | 0.071275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070064 | false | 0 | 0.038217 | 0.006369 | 0.178344 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b48f53faa5d22345fc859ae5353bddfdc3d64abb | 1,745 | py | Python | Data Collection/Apparatus/Air Pollution/air_pollution_multithreading.py | NathanDai5287/air-pollution-covid-19 | dbf030bba7df22efc53d2262cea469309c884791 | [
"MIT"
] | null | null | null | Data Collection/Apparatus/Air Pollution/air_pollution_multithreading.py | NathanDai5287/air-pollution-covid-19 | dbf030bba7df22efc53d2262cea469309c884791 | [
"MIT"
] | null | null | null | Data Collection/Apparatus/Air Pollution/air_pollution_multithreading.py | NathanDai5287/air-pollution-covid-19 | dbf030bba7df22efc53d2262cea469309c884791 | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import datetime
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import pandas as pd
from pollution import start_date, start_date_string, end_date, end_date_string, parameters
from pollution import location_to_code, county_average, county_air_pollution
import zip_conversion
def complete_air_pollution(zip_code, parameters, start: str, end: str):
print(zip_code)
state, county = zip_conversion.zip_to_location(zip_code)
state, county = location_to_code(state, county)
df = pd.DataFrame()
for parameter in parameters:
data = county_average(county_air_pollution(
parameter, start_date_string, end_date_string, state, county), parameters[parameter])
if (isinstance(data, pd.DataFrame)):
df = data.join(df, how='outer')
# with open(r'Data Collection\Data\extra\\' + str(zip_code) + '.csv', 'w', newline='') as f:
with open(r'C:\Users\natha\Programming\long-term-air-pollution\Data\Air Pollution\2015\\' + str(zip_code) + '.csv', 'w', newline='') as f:
f.write(df.to_csv())
print('Export ' + zip_code + ' Completed')
if __name__ == "__main__":
with open(r'Data Collection\Apparatus\Docs\zip_codes.csv') as f:
zip_codes = [i.strip() for i in f.readlines()]
# complete_air_pollution('92130', parameters)
# exit(0)
start_date_string = datetime.date(2015, 1, 1).strftime('%Y%m%d')
end_date_string = datetime.date(2015, 12, 31).strftime('%Y%m%d')
with ThreadPoolExecutor() as executor:
_ = [executor.submit(complete_air_pollution, zip_code, parameters, start_date_string, end_date_string) for zip_code in zip_codes]
| 37.934783 | 142 | 0.711748 | 246 | 1,745 | 4.792683 | 0.345528 | 0.067854 | 0.050891 | 0.045802 | 0.309584 | 0.155216 | 0.111959 | 0.040712 | 0 | 0 | 0 | 0.016416 | 0.162178 | 1,745 | 45 | 143 | 38.777778 | 0.790014 | 0.081375 | 0 | 0 | 0 | 0.034483 | 0.105691 | 0.061288 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.275862 | 0 | 0.310345 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4921f6ec217a9592cadd47f882fbef1d2caa7a8 | 1,015 | py | Python | test/settings.py | movermeyer/django-response-timeout | 38f7462ab71d967749efc3be914e2a7a2df80f33 | [
"MIT"
] | 1 | 2018-06-17T19:54:47.000Z | 2018-06-17T19:54:47.000Z | test/settings.py | movermeyer/django-response-timeout | 38f7462ab71d967749efc3be914e2a7a2df80f33 | [
"MIT"
] | null | null | null | test/settings.py | movermeyer/django-response-timeout | 38f7462ab71d967749efc3be914e2a7a2df80f33 | [
"MIT"
] | 1 | 2018-03-03T16:17:38.000Z | 2018-03-03T16:17:38.000Z | import django
from os import path
SECRET_KEY = 'not secret'
INSTALLED_APPS = ('response_timeout', 'test')
TEMPLATE_DEBUG = DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'response_timeout.db',
},
}
ROOT_URLCONF = 'test.urls'
# Testing
if django.VERSION[:2] < (1, 6):
INSTALLED_APPS += ('discover_runner',)
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = path.dirname(path.dirname(__file__))
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'response_timeout'
},
}
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'response_timeout.middleware.SetCacheTimeoutMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 1
RESPONSE_CACHE_SECONDS = 2
| 24.756098 | 67 | 0.714286 | 106 | 1,015 | 6.566038 | 0.528302 | 0.086207 | 0.071839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007018 | 0.157635 | 1,015 | 40 | 68 | 25.375 | 0.807018 | 0.012808 | 0 | 0.0625 | 0 | 0 | 0.423423 | 0.288288 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4986e96e1f0b1e90e25754d1b183dee07129893 | 23,748 | py | Python | src/vtra/preprocess/province_roads_access_od_creation.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
] | 3 | 2018-07-09T12:15:46.000Z | 2020-12-03T07:02:23.000Z | src/vtra/preprocess/province_roads_access_od_creation.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
] | 1 | 2019-05-09T21:57:20.000Z | 2019-05-09T21:57:20.000Z | src/vtra/preprocess/province_roads_access_od_creation.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
] | 2 | 2018-07-23T12:49:21.000Z | 2021-06-03T11:00:44.000Z | """Pre-process accessibility-based provincial OD matrix
Purpose
-------
Create province scale OD matrices between roads connecting villages to nearest communes:
- Net revenue estimates of commune villages
- IFPRI crop data at 1km resolution
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. Geotiff files with IFPRI crop data:
- tons - Float values of production tonnage at each grid cell
- geometry - Raster grid cell geometry
3. Shapefile of RiceAtlas data:
- month production columns - tonnage of rice for each month
- geometry - Shapely Polygon geometry of Provinces
4. Shapefile of Provinces
- od_id - Integer Province ID corresponding to OD ID
- name_eng - String name of Province in English
- geometry - Shapely Polygon geometry of Provinces
5. Shapefile of Communes
- population - Float values of populations in Communes
- nfrims - Float values of number of firms in Provinces
- netrevenue - Float values of Net Revenue in Provinces
- argi_prop - Float values of proportion of agrivculture firms in Provinces
- geometry - Shapely Polygon geometry of Communes
6. Shapefiles of network nodes
- node_id - String node ID
- geometry - Shapely point geometry of nodes
7. Shapefiles of network edges
- vehicle_co - Count of vehiles only for roads
- geometry - Shapely LineString geometry of edges
8. Shapefiles of Commune center points
- object_id - Integer ID of point
- geometry - Shapely point geometry of points
9. Shapefiles of Village center points
- object_id - Integer ID of points
- geometry - Shapely point geometry of points
Results
-------
1. Excel workbook with sheet of province-wise OD flows
- origin - String node ID of origin node
- destination - String node ID of destination node
- crop_names - Float values of daily tonnages of IFPRI crops (except rice) between OD nodes
- min_rice - Float values of minimum daily tonnages of rice between OD nodes
- max_rice - Float values of maximum daily tonnages of rice between OD nodes
- min_croptons - Float values of minimum daily tonnages of crops between OD nodes
- max_croptons - Float values of maximum daily tonnages of crops between OD nodes
- min_agrirev - Float value of Minimum daily revenue of agriculture firms between OD nodes
- max_agrirev - Float value of Maximum daily revenue of agriculture firms between OD nodes
- min_noagrirev - Float value of Minimum daily revenue of non-agriculture firms between OD nodes
- max_noagrirev - Float value of Maximum daily revenue of non-agriculture firms between OD nodes
- min_netrev - Float value of Minimum daily revenue of all firms between OD nodes
- max_netrev - Float value of Maximum daily revenue of all firms between OD nodes
References
----------
1. Pant, R., Koks, E.E., Russell, T., Schoenmakers, R. & Hall, J.W. (2018).
Analysis and development of model for addressing climate change/disaster risks in multi-modal transport networks in Vietnam.
Final Report, Oxford Infrastructure Analytics Ltd., Oxford, UK.
2. All input data folders and files referred to in the code below.
"""
import os
import subprocess
import sys
import geopandas as gpd
import igraph as ig
import numpy as np
import pandas as pd
from shapely.geometry import Point
from vtra.utils import *
def netrev_od_pairs(start_points, end_points):
"""Assign crop tonnages to OD pairs
Parameters
- start_points - GeoDataFrame of start points for Origins
- end_points - GeoDataFrame of potential end points for Destinations
Outputs
od_pairs_df - Pandas DataFrame with columns:
- origin - Origin node ID
- destination - Destination node ID
- netrev_argi - Net revenue of agriculture firms
- netrev_noargi - Net revenue of non-agriculture firms
"""
save_paths = []
for iter_, place in start_points.iterrows():
try:
closest_center = end_points.loc[end_points['OBJECTID']
== place['NEAREST_C_CENTER']]['NEAREST_G_NODE'].values[0]
save_paths.append(
(closest_center, place['NEAREST_G_NODE'], place['netrev_agri'], place['netrev_noagri']))
except:
print(iter_)
od_pairs_df = pd.DataFrame(
save_paths, columns=['origin', 'destination', 'netrev_agri', 'netrev_noagri'])
od_pairs_df = od_pairs_df.groupby(['origin', 'destination'])[
'netrev_agri', 'netrev_noagri'].sum().reset_index()
return od_pairs_df
def crop_od_pairs(start_points, end_points, crop_name):
"""Assign crop tonnages to OD pairs
Parameters
- start_points - GeoDataFrame of start points for Origins
- end_points - GeoDataFrame of potential end points for Destinations
- crop_name - String name of crop
Outputs
od_pairs_df - Pandas DataFrame wit columns:
- origin - Origin node ID
- destination - Destination node ID
- crop - Tonnage values for the named crop
- netrev_argi - Daily Net revenue of agriculture firms in USD
- netrev_noargi - Daily Net revenue of non-agriculture firms in USD
"""
save_paths = []
for iter_, place in start_points.iterrows():
try:
closest_center = end_points.loc[end_points['OBJECTID']
== place['NEAREST_C_CENTER']]['NEAREST_G_NODE'].values[0]
save_paths.append((closest_center, place['NEAREST_G_NODE'], place['tons']))
except:
print(iter_)
od_pairs_df = pd.DataFrame(save_paths, columns=['origin', 'destination', crop_name])
od_pairs_df = od_pairs_df.groupby(['origin', 'destination'])[crop_name].sum().reset_index()
return od_pairs_df
def assign_monthly_tons_crops(x,rice_prod_file,crop_month_fields,province,x_cols):
"""Assign crop tonnages to OD pairs
Parameters
- x - Pandas DataFrame of values
- rice_prod_file - Shapefile of RiceAtlas monthly production value
- crop_month_fields - Lsit of strings of month columns in Rice Atlas shapefile
- province - Stirng name of province
- x_cols - List of string names of crops
Outputs
- min_croptons - Float value of Minimum daily tonnages of crops
- max_croptons - Float value of Maximum daily tonnages of crops
"""
# find the crop production months for the province
rice_prod_months = gpd.read_file(rice_prod_file)
rice_prod_months = rice_prod_months.loc[rice_prod_months.SUB_REGION == province]
rice_prod_months = rice_prod_months[crop_month_fields].values.tolist()
rice_prod_months = np.array(rice_prod_months[0])/sum(rice_prod_months[0])
rice_prod_months = rice_prod_months[rice_prod_months > 0]
rice_prod_months = rice_prod_months.tolist()
min_croptons = 0
max_croptons = 0
for x_name in x_cols:
if x_name == 'rice':
min_croptons += (1.0*min(rice_prod_months)*x[x_name])/30.0
max_croptons += (1.0*max(rice_prod_months)*x[x_name])/30.0
else:
min_croptons += (1.0*x[x_name])/365.0
max_croptons += (1.0*x[x_name])/365.0
return min_croptons, max_croptons
def assign_io_rev_costs_crops(x, cost_dataframe,rice_prod_file,crop_month_fields,province, x_cols, ex_rate):
"""Assign crop tonnages to daily net revenues
Parameters
- x - Pandas DataFrame of values
- cost_dataframe - Pandas DataFrame of conversion of tonnages to net revenues
- rice_prod_file - Shapefile of RiceAtlas monthly production value
- province - Stirng name of province
- x_cols - List of string names of crops
- ex_rate - Exchange rate from VND millions to USD
Outputs
- min_croprev - Float value of Minimum daily revenue of crops
- max_croprev - Float value of Maximum daily revenue of crops
"""
# find the crop production months for the province
rice_prod_months = gpd.read_file(rice_prod_file)
rice_prod_months = rice_prod_months.loc[rice_prod_months.SUB_REGION == province]
rice_prod_months = rice_prod_months[crop_month_fields].values.tolist()
rice_prod_months = np.array(rice_prod_months[0])/sum(rice_prod_months[0])
rice_prod_months = rice_prod_months[rice_prod_months > 0]
rice_prod_months = rice_prod_months.tolist()
min_croprev = 0
max_croprev = 0
cost_list = list(cost_dataframe.itertuples(index=False))
for cost_param in cost_list:
if cost_param.crop_code in x_cols:
if cost_param.crop_code == 'rice':
min_croprev += (1.0*min(rice_prod_months)*ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))/30.0
max_croprev += (1.0*max(rice_prod_months)*ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))/30.0
else:
min_croprev += 1.0/365.0 * \
(ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))
max_croprev += 1.0/365.0 * \
(ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))
return min_croprev, max_croprev
def netrevenue_values_to_province_od_nodes(province_ods_df,prov_communes,commune_sindex,netrevenue,
n_firms,agri_prop,prov_pop,prov_pop_sindex,nodes,sindex_nodes,prov_commune_center,
sindex_commune_center,node_id,object_id,exchange_rate):
"""Assign commune level netrevenue values to OD nodes in provinces
- Based on finding nearest nodes to village points with netrevenues as Origins
- And finding nearest commune centers as Destinations
Parameters
- province_ods_df - List of lists of Pandas dataframes
- prov_communes - GeoDataFrame of commune level statistics
- commune_sindex - Spatial index of communes
- netrevenue - String name of column for netrevenue of communes in VND millions
- nfirm - String name of column for numebr of firms in communes
- agri_prop - Stirng name of column for proportion of agriculture firms in communes
- prov_pop - GeoDataFrame of population points in Province
- prov_pop_sindex - Spatial index of population points in Province
- nodes - GeoDataFrame of province road nodes
- sindex_nodes - Spatial index of province road nodes
- prov_commune_center - GeoDataFrame of province commune center points
- sindex_commune_center - Spatial index of commune center points
- node_id - String name of Node ID column
- object_id - String name of commune ID column
- exchange_rate - Float value for exchange rate from VND million to USD
Outputs
province_ods_df - List of Lists of Pandas dataframes with columns:
- origin - Origin node ID
- destination - Destination node ID
- netrev_argi - Net revenue of agriculture firms
- netrev_noargi - Net revenue of non-agriculture firms
"""
# create new column in prov_communes with amount of villages
prov_communes['n_villages'] = prov_communes.geometry.apply(
lambda x: count_points_in_polygon(x, prov_pop_sindex))
prov_communes['netrev_village'] = exchange_rate * \
(prov_communes[netrevenue]*prov_communes[n_firms])/prov_communes['n_villages']
# also get the net revenue of the agriculture sector which is called nongnghiep
prov_communes['netrev_village_agri'] = 1.0/365.0 * \
(prov_communes[agri_prop]*prov_communes['netrev_village'])
prov_communes['netrev_village_noagri'] = 1.0/365.0 * \
(prov_communes['netrev_village'] - prov_communes['netrev_village_agri'])
# give each village a net revenue based on average per village in commune
prov_pop['netrev_agri'] = prov_pop.geometry.apply(lambda x: extract_value_from_gdf(
x, commune_sindex, prov_communes, 'netrev_village_agri'))
prov_pop['netrev_noagri'] = prov_pop.geometry.apply(lambda x: extract_value_from_gdf(
x, commune_sindex, prov_communes, 'netrev_village_noagri'))
# get nearest node in network for all start and end points
prov_pop['NEAREST_G_NODE'] = prov_pop.geometry.apply(
lambda x: get_nearest_node(x, sindex_nodes, nodes, node_id))
prov_pop['NEAREST_C_CENTER'] = prov_pop.geometry.apply(
lambda x: get_nearest_node(x, sindex_commune_center, prov_commune_center, object_id))
# find all OD pairs of the revenues
netrev_ods = netrev_od_pairs(prov_pop, prov_commune_center)
province_ods_df.append(netrev_ods)
return province_ods_df
def crop_values_to_province_od_nodes(province_ods_df,province_geom,calc_path,
crop_data_path,crop_names,nodes,sindex_nodes,prov_commune_center,sindex_commune_center,node_id,object_id):
"""Assign IFPRI crop values to OD nodes in provinces
- Based on finding nearest nodes to crop production sites as Origins
- And finding nearest commune centers as Destinations
Parameters
- province_ods_df - List of lists of Pandas dataframes
- province_geom - Shapely Geometry of province
- calc_path - Path to store intermediary calculations
- crop_data_path - Path to crop datasets
- crop_names - List of string of crop names in IFPRI datasets
- nodes - GeoDataFrame of province road nodes
- sindex_nodes - Spatial index of province road nodes
- prov_commune_center - GeoDataFrame of province commune center points
- sindex_commune_center - Spatial index of commune center points
- node_id - String name of Node ID column
- object_id - String name of commune ID column
Outputs
province_ods_df - List of Lists of Pandas dataframes with columns:
- origin - Origin node ID
- destination - Destination node ID
- crop - Tonnage values for the named crop
"""
# all the crop OD pairs
for file in os.listdir(crop_data_path):
if file.endswith(".tif") and 'spam_p' in file.lower().strip():
fpath = os.path.join(crop_data_path, file)
crop_name = [cr for cr in crop_names if cr in file.lower().strip()][0]
outCSVName = os.path.join(calc_path, 'crop_concentrations.csv')
subprocess.run(["gdal2xyz.py", '-csv', fpath, outCSVName])
# Load points and convert to geodataframe with coordinates
load_points = pd.read_csv(outCSVName, header=None, names=[
'x', 'y', 'tons'], index_col=None)
load_points = load_points[load_points['tons'] > 0]
geometry = [Point(xy) for xy in zip(load_points.x, load_points.y)]
load_points = load_points.drop(['x', 'y'], axis=1)
crop_points = gpd.GeoDataFrame(load_points, crs={'init': 'epsg:4326'}, geometry=geometry)
del load_points
# clip all to province
prov_crop = gdf_geom_clip(crop_points, province_geom)
if len(prov_crop.index) > 0:
prov_crop_sindex = prov_crop.sindex
prov_crop['NEAREST_G_NODE'] = prov_crop.geometry.apply(
lambda x: get_nearest_node(x, sindex_nodes, nodes, node_id))
prov_crop['NEAREST_C_CENTER'] = prov_crop.geometry.apply(
lambda x: get_nearest_node(x, sindex_commune_center, prov_commune_center, object_id))
crop_ods = crop_od_pairs(prov_crop, prov_commune_center, crop_name)
province_ods_df.append(crop_ods)
return province_ods_df
def main():
"""Pre-process provincial-scale OD
1. Specify the paths from where to read and write:
- Input data
- Intermediate calcuations data
- Output results
2. Supply input data and parameters
- Names of the Provinces: List of strings
- Exchange rate to convert 2012 Net revenue in million VND values to USD in 2016
- Names of crops in IFPRI crop data
- Names of months in Rice Atlas data
- Name of column for netrevenue of communes in VND millions
- Name of column for numebr of firms in communes
- Name of column for proportion of agriculture firms in communes
- Name of Node ID column
- Name of commune ID column
3. Give the paths to the input data files:
- Network nodes files
- IFPRI crop data files
- Rice Atlas data shapefile
- Province boundary and stats data shapefile
- Commune boundary and stats data shapefile
- Population points shapefile for locations of villages
- Commune center points shapefile
"""
data_path, calc_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
province_list = ['Lao Cai', 'Binh Dinh', 'Thanh Hoa']
exchange_rate = 1.05*(1000000/21000)
crop_names = ['rice', 'cash', 'cass', 'teas',
'maiz', 'rubb', 'swpo', 'acof', 'rcof', 'pepp']
crop_month_fields = ['P_Jan', 'P_Feb', 'P_Mar', 'P_Apr', 'P_May',
'P_Jun', 'P_Jul', 'P_Aug', 'P_Sep', 'P_Oct', 'P_Nov', 'P_Dec']
netrevenue = 'netrevenue'
n_firms = 'nfirm'
agri_prop = 'nongnghiep'
node_id = 'NODE_ID'
object_id = 'OBJECTID'
# Give the paths to the input data files
network_data_path = os.path.join(data_path,'post_processed_networks')
crop_data_path = os.path.join(data_path, 'Agriculture_crops', 'crop_data')
rice_month_file = os.path.join(data_path, 'rice_atlas_vietnam', 'rice_production.shp')
province_path = os.path.join(data_path, 'Vietnam_boundaries',
'boundaries_stats', 'province_level_stats.shp')
commune_path = os.path.join(data_path, 'Vietnam_boundaries',
'boundaries_stats', 'commune_level_stats.shp')
population_points_in = os.path.join(
data_path, 'Points_of_interest', 'population_points.shp')
commune_center_in = os.path.join(
data_path, 'Points_of_interest', 'commune_committees_points.shp')
# Specify the output files and paths to be created
output_dir = os.path.join(output_path, 'flow_ods')
if os.path.exists(output_dir) == False:
os.mkdir(output_dir)
flow_output_excel = os.path.join(
output_dir, 'province_roads_commune_center_flow_ods.xlsx')
excl_wrtr = pd.ExcelWriter(flow_output_excel)
# Start the province OD allocations
for prn in range(len(province_list)):
province = province_list[prn]
province_name = province.replace(' ', '').lower()
# load provinces and get geometry of the right province
provinces = gpd.read_file(province_path)
provinces = provinces.to_crs({'init': 'epsg:4326'})
province_geom = provinces.loc[provinces.name_eng == province].geometry.values[0]
# clip all the populations to the province
prov_pop = gdf_clip(population_points_in, province_geom)
# create sindex of all villages to count number of villages in commune
prov_pop_sindex = prov_pop.sindex
# clip all the commune centers to the province
prov_commune_center = gdf_clip(commune_center_in, province_geom)
if object_id not in prov_commune_center.columns.values.tolist():
prov_commune_center[object_id] = prov_commune_center.index
sindex_commune_center = prov_commune_center.sindex
# clip all the communes to the province
prov_communes = gdf_clip(commune_path, province_geom)
commune_sindex = prov_communes.sindex
# load nodes of the network
nodes_in = os.path.join(network_data_path, '{}_roads_nodes.shp'.format(province_name))
nodes = gpd.read_file(nodes_in)
nodes = nodes.to_crs({'init': 'epsg:4326'})
sindex_nodes = nodes.sindex
province_ods_df = []
prov_commune_center['NEAREST_G_NODE'] = prov_commune_center.geometry.apply(
lambda x: get_nearest_node(x, sindex_nodes, nodes, node_id))
# Assign revenue values for each village to nearest road nodes
# And commune center point to nearest road nodes
# For Net Revenue OD pairs
print ('* Assigning revenue OD values for each village in {}'.format(province))
province_ods_df = netrevenue_values_to_province_od_nodes(
province_ods_df,prov_communes,commune_sindex,netrevenue,n_firms,
agri_prop,prov_pop,prov_pop_sindex,nodes,sindex_nodes,
prov_commune_center,sindex_commune_center,
node_id,object_id,exchange_rate)
# Get crop values and assign to the nearest road nodes
# And assign commune centers to nearest road nodes
# For crop OD pairs
print ('* Getting crop OD values in {}'.format(province))
province_ods_df = crop_values_to_province_od_nodes(
province_ods_df,province_geom,calc_path,
crop_data_path,crop_names,nodes,sindex_nodes,
prov_commune_center,sindex_commune_center,
node_id,object_id)
# Combine the Net Revenue abd Crop OD results
print ('* Combining OD values in {}'.format(province))
# Get totals across all crops
all_ods = pd.concat(province_ods_df, axis=0, sort='False', ignore_index=True).fillna(0)
all_ods_crop_cols = [c for c in all_ods.columns.values.tolist() if c in crop_names]
all_ods['crop_tot'] = all_ods[all_ods_crop_cols].sum(axis=1)
all_ods_val_cols = [c for c in all_ods.columns.values.tolist()
if c not in ('origin', 'destination')]
all_ods = all_ods.groupby(['origin', 'destination'])[
all_ods_val_cols].sum().reset_index()
# Find minimum and maximum crop daily tonnages
all_ods['croptons'] = all_ods.apply(lambda x: assign_monthly_tons_crops(
x, rice_month_file,crop_month_fields,province, all_ods_crop_cols), axis=1)
all_ods[['min_croptons', 'max_croptons']] = all_ods['croptons'].apply(pd.Series)
all_ods.drop('croptons', axis=1, inplace=True)
# Translate crop tonnages to netrevenues and compared with max netrevenue of firms
cost_values_df = pd.read_excel(os.path.join(
crop_data_path, 'crop_unit_costs.xlsx'), sheet_name='io_rev')
all_ods['croprev'] = all_ods.apply(lambda x: assign_io_rev_costs_crops(
x, cost_values_df,rice_month_file,crop_month_fields,province,
all_ods.columns.values.tolist(), exchange_rate), axis=1)
all_ods[['min_agrirev', 'max_croprev']] = all_ods['croprev'].apply(pd.Series)
all_ods.drop('croprev', axis=1, inplace=True)
all_ods['max_agrirev'] = all_ods[['max_croprev', 'netrev_agri']].max(axis=1)
all_ods.drop(['max_croprev', 'netrev_agri'], axis=1, inplace=True)
all_ods['min_netrev'] = all_ods['min_agrirev'] + all_ods['netrev_noagri']
all_ods['max_netrev'] = all_ods['max_agrirev'] + all_ods['netrev_noagri']
print ('* Writing {} values to Excel'.format(province))
all_ods.to_excel(excl_wrtr, province_name, index=False)
excl_wrtr.save()
if __name__ == '__main__':
main()
| 45.934236 | 127 | 0.677531 | 3,239 | 23,748 | 4.724606 | 0.129052 | 0.019865 | 0.029275 | 0.011762 | 0.533686 | 0.483957 | 0.424623 | 0.385741 | 0.35908 | 0.315036 | 0 | 0.007672 | 0.242547 | 23,748 | 516 | 128 | 46.023256 | 0.843062 | 0.423362 | 0 | 0.207373 | 0 | 0 | 0.121251 | 0.017311 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.041475 | 0 | 0.101382 | 0.02765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b498ac276e2df68b96a6609f47852d8c76bbc685 | 2,478 | py | Python | codes/train.py | pettod/lyft-kaggle | 352e6e54f16622a4fac4e698828c11148ead8c7e | [
"Apache-2.0"
] | null | null | null | codes/train.py | pettod/lyft-kaggle | 352e6e54f16622a4fac4e698828c11148ead8c7e | [
"Apache-2.0"
] | null | null | null | codes/train.py | pettod/lyft-kaggle | 352e6e54f16622a4fac4e698828c11148ead8c7e | [
"Apache-2.0"
] | null | null | null | import argparse
from datetime import datetime
import os
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as sched
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger as tb
from module import LyftModule
from net import LyftNet
from dataset import LyftLDM
def train_args(parent_parser):
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
# Dataset options
parser.add_argument(
"--shuffle", type=bool, default=True)
# Model options
parser.add_argument(
"--history_num_frames", "-hnf", type=int, default=10)
parser.add_argument(
"--future_num_frames", "-fnf", type=int, default=50)
# Train options
parser.add_argument(
"--batch_size", "-bs", type=int, default=4)
parser.add_argument(
"--distributed_backend", "-db", type=str, default="dp")
parser.add_argument(
"--epochs", type=int, default=4)
parser.add_argument(
"--iterations_per_epoch", "-ipe", type=int)
parser.add_argument(
"--experiment_name", "-exn", type=str,
default=datetime.now().strftime("%d_%m_%Y_%H_%M_%S"))
parser.add_argument(
"--resume", action="store_true")
parser.add_argument(
"--pretrained_path", "-pp", type=str)
args = parser.parse_args()
return args
def get_module(args):
model = LyftNet(args.history_num_frames, args.future_num_frames)
optimizer = optim.Adam(
model.parameters(), lr=1e-4)
scheduler = sched.CosineAnnealingWarmRestarts(
optimizer, 150)
criterion = nn.MSELoss()
return LyftModule(model, optimizer, scheduler, criterion)
def train(args, parser):
args = train_args(parser)
tb_logger = tb(".", "experiments", version=args.experiment_name)
trainer = Trainer(
gpus=args.gpu,
logger=tb_logger,
num_sanity_val_steps=1,
deterministic=True,
limit_train_batches=1.0 if args.iterations_per_epoch is None
else args.iterations_per_epoch,
limit_val_batches=1.0 if args.iterations_per_epoch is None
else args.iterations_per_epoch,
row_log_interval=1,
log_save_interval=1,
resume_from_checkpoint=args.pretrained_path if args.resume else None,
distributed_backend=args.distributed_backend,
)
trainer.fit(get_module(args), datamodule=LyftLDM(args, os.environ["L5KIT_DATA_FOLDER"]))
| 31.769231 | 92 | 0.686441 | 314 | 2,478 | 5.194268 | 0.382166 | 0.060699 | 0.104231 | 0.053955 | 0.118945 | 0.118945 | 0.118945 | 0.079706 | 0.079706 | 0.079706 | 0 | 0.00962 | 0.202986 | 2,478 | 77 | 93 | 32.181818 | 0.816203 | 0.017353 | 0 | 0.190476 | 0 | 0 | 0.097119 | 0.017695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.174603 | 0 | 0.253968 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b49aaea7ec39afb87fb3a52cd84feb0d5ae86507 | 2,459 | py | Python | Examples/example_crysalispro_cuts.py | DanPorter/Dans_Diffaction | 74aea3d2b54d841271f22841f405a9a7c6fa1c81 | [
"Apache-2.0"
] | 22 | 2018-05-03T13:13:43.000Z | 2022-02-28T16:55:45.000Z | Examples/example_crysalispro_cuts.py | DanPorter/Dans_Diffaction | 74aea3d2b54d841271f22841f405a9a7c6fa1c81 | [
"Apache-2.0"
] | 7 | 2018-05-21T06:01:13.000Z | 2022-03-25T10:39:35.000Z | Examples/example_crysalispro_cuts.py | DanPorter/Dans_Diffaction | 74aea3d2b54d841271f22841f405a9a7c6fa1c81 | [
"Apache-2.0"
] | 6 | 2020-03-08T17:40:50.000Z | 2022-02-28T04:49:31.000Z | """
Example Read recirpocal space cuts from CrysAlisPro
"""
import sys, os
import re
import numpy as np
import matplotlib.pyplot as plt
cf = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(cf, '..'))
import Dans_Diffraction as dif
print(dif.version_info())
def read_image(filename, resolution=0.8):
"""
Read uncompressed image file from CrysAlisPro
In CrysAlisPro, write an uncompressed image with:
>> wd inc "image.img"
In Pyhton, read the image with:
>> qx, qy, data = read_image("image.img")
>> plt.pcolormesh(qx, qy, data)
"""
# Get the file size from the header
with open(filename, 'rb') as file:
header = file.read()
NHEADER = int(re.findall(b'NHEADER=\s*\d+', header)[0].strip(b'NHEADER='))
NX = int(re.findall(b'NX=\s*\d+', header)[0].strip(b'NX='))
NY = int(re.findall(b'NY=\s*\d+', header)[0].strip(b'NY='))
# Separate header from data
with open(filename, 'rb') as file:
header = file.read(NHEADER)
data = np.fromfile(file, np.int32)
data = np.reshape(data, [NY, NX])
# Determine the pixel coordinates
qmax = 2 * np.pi / resolution
qpixel = 2 * qmax / NX
qxrange = np.arange(-qpixel * (NX / 2.), qpixel * (NX / 2.), qpixel)
qyrange = np.arange(-qpixel * (NY / 2.), qpixel * (NY / 2.), qpixel)
qx, qy = np.meshgrid(qxrange, qyrange)
return qx, qy, data
cif_file = r"C:\Users\dgpor\OneDrive - Diamond Light Source Ltd\Projects\NaFeMnO2\P2-NaFeMnO2_icsd194731_fixed.cif"
img_file = r"C:\Users\dgpor\OneDrive - Diamond Light Source Ltd\Projects\NaFeMnO2\correct_super_uncomp.img"
qx, qy, img = read_image(img_file, 0.8)
xtl = dif.Crystal(cif_file)
P = [[4, 2, 0], [2, 4, 0], [0, 0, 3]] # [a', b', c']=P*[a, b, c] 1/6th Supercell: a'=4a+2b, b'=2a+4b, c'=c
sup = xtl.generate_superstructure(P)
# Generate all the supercell lattice points in our recirpocal space plane
Qx, Qy, hkl = sup.Cell.reciprocal_space_plane(
x_axis=sup.parenthkl2super([1, 1, 0]),
y_axis=sup.parenthkl2super([0, 0, 1]),
centre=sup.parenthkl2super([-0.5, 0.5, 0]),
q_max=8.5,
cut_width=0.05,
)
plt.figure()
plt.pcolormesh(qx, qy, img, cmap=plt.get_cmap('hot_r'))
plt.clim([-1, 1e1])
plt.scatter(Qx, Qy, s=10, facecolors='none', edgecolors='b')
xtl.Plot.axis_reciprocal_lattice_lines([1, 1, 0], [0, 0, 1], [-0.5, 0.5, 0], lw=0.5, c='grey', q_max=8)
plt.axis('image')
plt.axis([-4.5, 4.5, -4.5, 4.5])
plt.show()
| 32.786667 | 115 | 0.640504 | 407 | 2,459 | 3.796069 | 0.361179 | 0.020712 | 0.007767 | 0.025243 | 0.177994 | 0.171521 | 0.137217 | 0.137217 | 0.137217 | 0.137217 | 0 | 0.043241 | 0.181781 | 2,459 | 74 | 116 | 33.22973 | 0.724652 | 0.209435 | 0 | 0.044444 | 0 | 0.022222 | 0.139694 | 0.076964 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.155556 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b49ac37fe88c794fc2754068fb491f153b63e256 | 4,339 | py | Python | qkeras/qtools/quantized_operators/fused_bn_factory.py | mkettn/qkeras | 9ea16325db86ba1dae465c4e3f1ef0575c5f3af5 | [
"Apache-2.0"
] | null | null | null | qkeras/qtools/quantized_operators/fused_bn_factory.py | mkettn/qkeras | 9ea16325db86ba1dae465c4e3f1ef0575c5f3af5 | [
"Apache-2.0"
] | null | null | null | qkeras/qtools/quantized_operators/fused_bn_factory.py | mkettn/qkeras | 9ea16325db86ba1dae465c4e3f1ef0575c5f3af5 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""quantized batch normliaztion quantizer implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import numpy as np
import copy
from qkeras.qtools.quantized_operators import adder_factory
from qkeras.qtools.quantized_operators import divider_factory
from qkeras.qtools.quantized_operators import multiplier_factory
from qkeras.qtools.quantized_operators import quantizer_impl
class FusedBNFactory:
"""determine which quantizer implementation to use.
Create an fused bn instance. The type and bit width of the output_quantizer
is deteremined from both the previous layer and batchnorm weight types:
z = bn(y) = bn_inv * x - fused_bias is the output of the previous
layer and the following bn layer, with:
bn_inv = gamma * rsqrt(variance^2+epsilon) is computed from the
bn layer weights with inverse_quantizer datatype
x is the previous layer's output
fused_bias = bn_inv * bias + beta - bn_inv*mean where bias is
the bias term from the previous layer, beta and mean are the bn
layer weights.
"""
def make_quantizer(
self, prev_output_quantizer: quantizer_impl.IQuantizer,
beta_quantizer: quantizer_impl.IQuantizer,
mean_quantizer: quantizer_impl.IQuantizer,
inverse_quantizer: quantizer_impl.IQuantizer,
prev_bias_quantizer: quantizer_impl.IQuantizer,
use_beta: bool,
use_bias: bool,
):
"""Makes a fused_bn quantizer.
Args:
prev_output_quantizer: IQuantizer type. Previous layer output quantizer
beta_quantizer: IQuantizer type. bn layer beta quantizer
mean_quantizer: IQuantizer type. layer mean quantizer
inverse_quantizer: IQuantizer type. bn layer inverse quantizer
prev_bias_quantizer: IQuantizer type. conv layer bias quantizer
use_beta: Bool. whether enabling beta in batch_normalization layer
use_bias: Bool. Whether bias is used in conv layer.
Returns:
None
"""
assert not isinstance(inverse_quantizer, quantizer_impl.FloatingPoint), (
"inverse_quantizer in batchnorm layer has to be set for "
"fused bn inference in hardware!")
# bn_inv * x
multiplier_instance = multiplier_factory.MultiplierFactory()
multiplier_x = multiplier_instance.make_multiplier(
inverse_quantizer, prev_output_quantizer)
# fused_bias = bn_inv * bias + beta - bn_inv*mean
# This step derives the datatype for bn_inv * mean
multiplier_mean = multiplier_instance.make_multiplier(
inverse_quantizer, mean_quantizer)
adder_instance = adder_factory.IAdder()
if use_bias:
# Derives datatype of bn_inv*bias
multiplier_bias = multiplier_instance.make_multiplier(
inverse_quantizer, prev_bias_quantizer)
# Derives datatype of bn_inv*bias - bn_inv*mean
adder_1 = adder_instance.make_quantizer(
multiplier_bias.output, multiplier_mean.output)
else:
# There is no bias from the previous layer,
# therefore datatype of bn_inv*bias - bn_inv*mean is the same
# as bn_inv*mean
adder_1 = multiplier_mean
if use_beta:
# Derives datatype of fused_bias = bn_inv * bias + beta - bn_inv*mean
adder_bias = adder_instance.make_quantizer(
adder_1.output, beta_quantizer)
else:
# Since beta is not used, fused_bias = bn_inv * bias - bn_inv*mean
adder_bias = adder_1
# bn_inv * x - fused_bias
adder = adder_instance.make_quantizer(
multiplier_x.output, adder_bias.output)
self.internal_accumulator = adder
self.internal_output = adder
| 37.730435 | 80 | 0.726896 | 579 | 4,339 | 5.238342 | 0.283247 | 0.031322 | 0.023739 | 0.052753 | 0.263436 | 0.186614 | 0.139136 | 0.049126 | 0.030663 | 0 | 0 | 0.004033 | 0.200046 | 4,339 | 114 | 81 | 38.061404 | 0.869778 | 0.52201 | 0 | 0.086957 | 0 | 0 | 0.043788 | 0 | 0 | 0 | 0 | 0 | 0.021739 | 1 | 0.021739 | false | 0 | 0.23913 | 0 | 0.282609 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b49c513be34b4f17b0d736578157499c3c26fc69 | 3,878 | py | Python | src/process/rpc/OperationProcess.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | src/process/rpc/OperationProcess.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | src/process/rpc/OperationProcess.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | import time
import traceback
from IocManager import IocManager
from datetime import datetime
from domain.operation.execution.services.OperationExecution import OperationExecution
from domain.operation.services.DataOperationJobService import DataOperationJobService
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.data.decorators.TransactionHandler import transaction_handler
from infrastructor.logging.SqlLogger import SqlLogger
from multiprocessing.context import Process
from models.dao.operation import DataOperation
class OperationProcess:
@transaction_handler
def start(self, data_operation_id: int, job_id: int, data_operation_job_execution_id: int):
start = time.time()
start_datetime = datetime.now()
sql_logger = SqlLogger()
sql_logger.info(f"{data_operation_id}-{job_id} Data Operations Started",
job_id=data_operation_job_execution_id)
try:
IocManager.injector.get(OperationExecution).start(data_operation_id=data_operation_id, job_id=job_id,
data_operation_job_execution_id=data_operation_job_execution_id)
sql_logger.info(
f"{data_operation_id}-{job_id} Data Operations Finished",
job_id=data_operation_job_execution_id)
except Exception as ex:
exc = traceback.format_exc() + '\n' + str(ex)
sql_logger.info(
f"{data_operation_id}-{job_id} Data Operations Finished With Error: {exc}",
job_id=data_operation_job_execution_id)
finally:
IocManager.injector.get(DataOperationJobService).check_removed_job(ap_scheduler_job_id=job_id)
end_datetime = datetime.now()
end = time.time()
sql_logger.info(
f"{data_operation_id}-{job_id} Start :{start_datetime} - End :{end_datetime} - ElapsedTime :{end - start}",
job_id=data_operation_job_execution_id)
del sql_logger
@staticmethod
def start_process(data_operation_id: int, job_id: int, data_operation_job_execution_id: int):
IocManager.initialize()
operation_process = OperationProcess()
operation_process.start(data_operation_id=data_operation_id, job_id=job_id,
data_operation_job_execution_id=data_operation_job_execution_id)
del operation_process
@transaction_handler
def start_operation_process(self, data_operation_id: int, job_id: int, data_operation_job_execution_id: int):
"""
:param job_id: Ap Scheduler Job Id
:param data_operation_id: Data Operation Id
:return:
"""
start = time.time()
start_datetime = datetime.now()
sql_logger = SqlLogger()
data_operation_query = IocManager.injector.get(RepositoryProvider).get(DataOperation).filter_by(
Id=data_operation_id)
data_operation = data_operation_query.first()
if data_operation is None:
raise Exception('Operation Not Found')
sql_logger.info(f"{data_operation_id}-{job_id}-{data_operation.Name} Execution Create started",
job_id=data_operation_job_execution_id)
operation_process = Process(target=OperationProcess.start_process,
args=(data_operation_id, job_id, data_operation_job_execution_id))
operation_process.start()
end_datetime = datetime.now()
end = time.time()
sql_logger.info(
f"{data_operation_id}-{job_id}-{data_operation.Name} Execution Create finished. Start :{start_datetime} - End :{end_datetime} - ElapsedTime :{end - start}",
job_id=data_operation_job_execution_id)
IocManager.injector.get(RepositoryProvider).close()
return
| 45.093023 | 168 | 0.691594 | 445 | 3,878 | 5.680899 | 0.186517 | 0.190269 | 0.106804 | 0.138449 | 0.524921 | 0.513845 | 0.501978 | 0.469146 | 0.450158 | 0.412184 | 0 | 0 | 0.232336 | 3,878 | 85 | 169 | 45.623529 | 0.849177 | 0.022434 | 0 | 0.358209 | 0 | 0.029851 | 0.140496 | 0.056518 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0 | 0.164179 | 0 | 0.238806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b49c9eb3c3eb69f27f9c4ba7b02c96c6ca9089e4 | 404 | py | Python | L_3_dz3.py | Malamut86/2075_Python | a78443d4de741cdca0c67c0b014f23daf10428c0 | [
"MIT"
] | null | null | null | L_3_dz3.py | Malamut86/2075_Python | a78443d4de741cdca0c67c0b014f23daf10428c0 | [
"MIT"
] | 2 | 2022-03-13T13:10:36.000Z | 2022-03-20T12:08:26.000Z | L_3_dz3.py | Malamut86/2075_Python | a78443d4de741cdca0c67c0b014f23daf10428c0 | [
"MIT"
] | null | null | null | def thesaurus(*args, bool=True) -> dict:
if bool:
args = sorted(list(args))
dict_out = {}
for words in args:
dict_value = dict_out.setdefault(words[0], list())
if words not in dict_value:
dict_value.append(words)
dict_out[words[0]] = dict_value
return dict_out
print(thesaurus("Иван", "Мария", "Петр", "Илья", "Анна"))
| 23.764706 | 59 | 0.564356 | 52 | 404 | 4.230769 | 0.480769 | 0.127273 | 0.118182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007092 | 0.30198 | 404 | 16 | 60 | 25.25 | 0.77305 | 0 | 0 | 0 | 0 | 0 | 0.054264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a37dfcf3562862645afaf93f9f77b2926dd541 | 12,886 | py | Python | robonet/video_prediction/models/deterministic_generator.py | russellmendonca/RoboNet | de30fa069dacb2888e62bd239e7a3471ea3aaa9d | [
"MIT"
] | 140 | 2019-10-25T03:05:04.000Z | 2022-03-07T17:41:56.000Z | robonet/video_prediction/models/deterministic_generator.py | russellmendonca/RoboNet | de30fa069dacb2888e62bd239e7a3471ea3aaa9d | [
"MIT"
] | 9 | 2019-12-22T20:52:47.000Z | 2022-02-22T07:56:43.000Z | robonet/video_prediction/models/deterministic_generator.py | russellmendonca/RoboNet | de30fa069dacb2888e62bd239e7a3471ea3aaa9d | [
"MIT"
] | 26 | 2019-10-21T04:49:55.000Z | 2021-09-17T15:50:17.000Z | """
Boiled down version of SAVP model from https://github.com/alexlee-gk/video_prediction
"""
from robonet.video_prediction.models.base_model import BaseModel
from robonet.video_prediction.utils import tf_utils
import tensorflow as tf
from collections import OrderedDict
from robonet.video_prediction import losses
from robonet.video_prediction import metrics
from robonet.video_prediction.models.deterministc_embedding_utils import onestep_encoder_fn, average_and_repeat, split_model_inference
import logging
def host_summary_fn(summary_dir, summary_queue_len, image_summary_freq, **summary_dict):
gs = summary_dict.pop('global_step')[0] # the 0 index here is crucial, will error on TPU otherwise
real_vs_gen = summary_dict.pop('real_vs_gen')
with tf.contrib.summary.create_file_writer(summary_dir, max_queue=summary_queue_len).as_default():
with tf.contrib.summary.record_summaries_every_n_global_steps(image_summary_freq, global_step=gs):
tf.contrib.summary.image("real_vs_gen", real_vs_gen, step=gs)
with tf.contrib.summary.always_record_summaries():
for k, v in summary_dict.items():
tf.contrib.summary.scalar(k, v, step=gs)
return tf.contrib.summary.all_summary_ops()
def wrap_host(summary_dir, summary_queue_len, image_summary_freq, fn):
def fn1(**kwargs):
return fn(summary_dir, summary_queue_len, image_summary_freq, **kwargs)
return fn1
class DeterministicModel(BaseModel):
def _model_default_hparams(self):
return {
"lr": 0.001,
"end_lr": 1e-8,
"decay_steps": [200000, 800000],
"beta1": 0.9,
"beta2": 0.999,
'l1_weight': 1.0,
'l2_weight': 0.0,
'num_scales': 1,
'vgg_cdist_weight': 0.0,
'state_weight': 0.0,
'tv_weight': 0.0,
"tpu_log_pad": 5
}
def _model_fn(self, model_inputs, model_targets, mode):
# prep inputs here
logger = logging.getLogger(__name__)
inputs, targets = {}, {}
inputs['actions'], inputs['images'] = tf.transpose(model_inputs['actions'], [1, 0, 2]), tf.transpose(model_inputs['images'], [1, 0, 2, 3, 4])
if mode == tf.estimator.ModeKeys.TRAIN:
targets['images'] = tf.transpose(model_targets['images'][:, self._hparams.context_frames:], [1, 0, 2, 3, 4])
if self._hparams.use_states:
inputs['states'] = tf.transpose(model_inputs['states'][:, self._hparams.context_frames:], [1, 0, 2])
if self._hparams.state_weight and mode == tf.estimator.ModeKeys.TRAIN:
targets['states'] = tf.transpose(model_targets['state'], [1, 0, 2])
else:
logger.warning('states supplied but state_weight=0 so no loss will be computed on predicted states')
elif self._hparams.state_weight > 0:
raise ValueError("states not supplied but state_weight > 0")
# if annotations are present construct 'pixel flow error metric'
if 'annotations' in model_inputs or 'pixel_distributions' in model_inputs:
if mode == tf.estimator.ModeKeys.TRAIN:
inputs['pix_distribs'] = tf.transpose(model_inputs['annotations'], [1, 0, 2, 3, 4])
targets['pix_distribs'] = tf.transpose(model_targets['annotations'][:, self._hparams.context_frames:], [1, 0, 2, 3, 4])
else:
inputs['pix_distribs'] = tf.transpose(model_inputs['pixel_distributions'], [1, 0, 2, 3, 4])
if 'encoder' in self._hparams and self._hparams.encoder == 'one_step':
assert mode == tf.estimator.ModeKeys.TRAIN
tlen = inputs['images'].get_shape().as_list()[0]
inputs_tr_inf, targets_tr_inf = split_model_inference(inputs, targets, self._hparams)
outputs_enc = onestep_encoder_fn(inputs_tr_inf['inference'], self._hparams)
self._hparams.e_dim = outputs_enc.get_shape().as_list()[2]
outputs_enc = average_and_repeat(outputs_enc, self._hparams, tlen)
inputs = inputs_tr_inf['train']
targets = targets_tr_inf['train']
else:
outputs_enc = None
inputs['outputs_enc'] = outputs_enc
# build the graph
self._model_graph = model_graph = self._graph_class()
outputs = model_graph.build_graph(mode, inputs, self._hparams, self._num_gpus, self._graph_scope)
pred_frames = tf.transpose(outputs["gen_images"], [1,0,2,3,4])
# if train build the loss function (don't support multi-gpu training)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
lr, optimizer = tf_utils.build_optimizer(self._hparams.lr, self._hparams.beta1, self._hparams.beta2,
decay_steps=self._hparams.decay_steps,
end_lr=self._hparams.end_lr,
global_step=global_step)
if self._tpu_mode and self._use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
gen_losses = OrderedDict()
if not (self._hparams.l1_weight or self._hparams.l2_weight or self._hparams.vgg_cdist_weight):
logger.error('no image loss is being created!')
raise ValueError
gen_images = outputs.get('gen_images_enc', outputs['gen_images'])
target_images = targets['images']
scalar_summaries = {'learning_rate': lr}
tensor_summaries = {'pred_frames': pred_frames}
if 'encoder' in self._hparams and self._hparams.encoder == 'one_step':
tensor_summaries['inference_images'] = inputs_tr_inf['inference']['images']
tensor_summaries['pred_targets'] = target_images
tensor_summaries['pred_target_dists'] = targets_tr_inf['train']['pix_distribs']
if 'annotations' in model_inputs:
tensor_summaries['pred_distrib'] = tf.transpose(outputs['gen_pix_distribs'], [1, 0, 2, 3, 4])
expected_dist = metrics.expected_pixel_distance(targets['pix_distribs'], outputs['gen_pix_distribs'])
expected_square_dist = metrics.expected_square_pixel_distance(targets['pix_distribs'], outputs['gen_pix_distribs'])
var_dist = expected_square_dist - tf.square(expected_dist)
expected_dist, var_dist = [tf.reduce_sum(x, 0) for x in [expected_dist, var_dist]]
scalar_summaries['robot_pixel_distance'] = tf.reduce_mean(expected_dist[:, 0])
scalar_summaries['robot_pixel_var'] = tf.reduce_mean(var_dist[:, 0])
if expected_dist.get_shape().as_list()[-1] > 1:
for o in range(1, expected_dist.get_shape().as_list()[-1]):
scalar_summaries['object{}_pixel_distance'.format(o)] = tf.reduce_mean(expected_dist[:, o])
scalar_summaries['object{}_pixel_var'.format(o)] = tf.reduce_mean(var_dist[:, o])
if 'ground_truth_sampling_mean' in outputs:
scalar_summaries['ground_truth_sampling_mean'] = outputs['ground_truth_sampling_mean']
if self._hparams.l1_weight:
gen_l1_loss = losses.l1_loss(gen_images, target_images)
gen_losses["gen_l1_loss"] = (gen_l1_loss, self._hparams.l1_weight)
scalar_summaries['l1_loss'] = gen_l1_loss
if self._hparams.l2_weight:
gen_l2_loss = losses.l2_loss(gen_images, target_images)
gen_losses["gen_l2_loss"] = (gen_l2_loss, self._hparams.l2_weight)
scalar_summaries['l2_loss'] = gen_l2_loss
if (self._hparams.l1_weight or self._hparams.l2_weight) and self._hparams.num_scales > 1:
for i in range(1, self._hparams.num_scales):
scale_factor = 2 ** i
gen_images_scale = tf_utils.with_flat_batch(pool2d)(gen_images, scale_factor, scale_factor, pool_mode='avg')
target_images_scale = tf_utils.with_flat_batch(pool2d)(target_images, scale_factor, scale_factor, pool_mode='avg')
if self._hparams.l1_weight:
gen_l1_scale_loss = losses.l1_loss(gen_images_scale, target_images_scale)
gen_losses["gen_l1_scale%d_loss" % i] = (gen_l1_scale_loss, self._hparams.l1_weight)
scalar_summaries['l1_loss_scale{}'.format(i)] = gen_l1_scale_loss
if self._hparams.l2_weight:
gen_l2_scale_loss = losses.l2_loss(gen_images_scale, target_images_scale)
gen_losses["gen_l2_scale%d_loss" % i] = (gen_l2_scale_loss, self._hparams.l2_weight)
scalar_summaries['l2_loss_scale{}'.format(i)] = gen_l2_scale_loss
if self._hparams.vgg_cdist_weight:
gen_vgg_cdist_loss = metrics.vgg_cosine_distance(gen_images, target_images)
gen_losses['gen_vgg_cdist_loss'] = (gen_vgg_cdist_loss, self._hparams.vgg_cdist_weight)
scalar_summaries['vgg_cdist_loss'] = gen_vgg_cdist_loss
if self._hparams.state_weight:
gen_states = outputs.get('gen_states_enc', outputs['gen_states'])
target_states = targets['states']
gen_state_loss = losses.l2_loss(gen_states, target_states)
gen_losses["gen_state_loss"] = (gen_state_loss, self._hparams.state_weight)
metric_summaries['state_loss'] = gen_state_loss
if self._hparams.tv_weight:
gen_flows = outputs.get('gen_flows_enc', outputs['gen_flows'])
flow_diff1 = gen_flows[..., 1:, :, :, :] - gen_flows[..., :-1, :, :, :]
flow_diff2 = gen_flows[..., :, 1:, :, :] - gen_flows[..., :, :-1, :, :]
# sum over the multiple transformations but take the mean for the other dimensions
gen_tv_loss = tf.reduce_mean(tf.reduce_sum(tf.abs(flow_diff1), axis=(-2, -1))) + \
tf.reduce_mean(tf.reduce_sum(tf.abs(flow_diff2), axis=(-2, -1)))
gen_losses['gen_tv_loss'] = (gen_tv_loss, self._hparams.tv_weight)
scalar_summaries['tv_loss'] = gen_tv_loss
loss = sum(loss * weight for loss, weight in gen_losses.values())
print('computing gradient and train_op')
g_gradvars = optimizer.compute_gradients(loss, var_list=model_graph.vars, colocate_gradients_with_ops=True)
g_train_op = optimizer.apply_gradients(g_gradvars, global_step=global_step)
if self._tpu_mode:
import numpy as np
try:
parameter_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("parameter_count =", parameter_count)
except TypeError:
pass
log_summaries = {}
log_summaries['global_step'] = tf.reshape(global_step, [1])
for k in scalar_summaries.keys():
log_summaries[k]= tf.reshape(scalar_summaries[k], [1])
reals, gen = [tf.split(tf.transpose(tens, [1, 0, 2, 3, 4]), tens.get_shape().as_list()[1], axis=0) for tens in [target_images, gen_images]]
reals, gen = [[tf.concat(tf.split(i[0], i.get_shape().as_list()[1], axis=0), axis=-2)[0] for i in img] for img in (reals, gen)]
pad = tf.ones([self._hparams.tpu_log_pad] + reals[0].get_shape().as_list()[1:])
real_gen = [tf.concat((r, pad, g), axis=0) for r, g in zip(reals, gen)]
log_tensor = [real_gen[0]]
for rg in real_gen[1:]:
log_tensor.extend([pad, pad, rg])
log_tensor = tf.concat(log_tensor, axis=0)[None]
log_summaries['real_vs_gen'] = tf.clip_by_value(log_tensor, 0, 1)
host_fn = wrap_host(self._summary_dir, self._summary_queue_len, self._image_summary_freq, host_summary_fn)
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=g_train_op, host_call=(host_fn, log_summaries))
est = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=g_train_op)
return est, scalar_summaries, tensor_summaries
ret_dict = {'predicted_frames': pred_frames[:, :, None]}
if 'gen_pix_distribs' in outputs:
ret_dict['predicted_pixel_distributions'] = tf.transpose(outputs['gen_pix_distribs'], [1, 0, 2, 3, 4])[:, :, None]
return ret_dict
| 57.526786 | 155 | 0.619122 | 1,648 | 12,886 | 4.496966 | 0.17051 | 0.06234 | 0.004858 | 0.004858 | 0.353259 | 0.252058 | 0.222237 | 0.173121 | 0.112805 | 0.036162 | 0 | 0.019406 | 0.268198 | 12,886 | 223 | 156 | 57.784753 | 0.76649 | 0.03011 | 0 | 0.067416 | 0 | 0 | 0.103965 | 0.010412 | 0 | 0 | 0 | 0 | 0.005618 | 1 | 0.02809 | false | 0.005618 | 0.050562 | 0.011236 | 0.123596 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a5bbf08a7b51f662f40c48b225c0d4fcbb721e | 8,200 | py | Python | models/nerf_net.py | peihaowang/nerf-pytorch | 24c11a42d65c381150d2bb6caa6d160920d6cae7 | [
"MIT"
] | 23 | 2021-02-03T07:59:22.000Z | 2022-03-28T07:13:45.000Z | models/nerf_net.py | peihaowang/nerf-pytorch | 24c11a42d65c381150d2bb6caa6d160920d6cae7 | [
"MIT"
] | null | null | null | models/nerf_net.py | peihaowang/nerf-pytorch | 24c11a42d65c381150d2bb6caa6d160920d6cae7 | [
"MIT"
] | 4 | 2021-12-06T12:18:44.000Z | 2022-03-29T16:08:12.000Z | import os, sys
import numpy as np
import imageio
import json
import random
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
from models.sampler import StratifiedSampler, ImportanceSampler
from models.renderer import VolumetricRenderer
from models.nerf_mlp import NeRFMLP
from utils.error import *
class NeRFNet(nn.Module):
def __init__(self, netdepth=8, netwidth=256, netdepth_fine=8, netwidth_fine=256, N_samples=64, N_importance=64,
viewdirs=True, use_embed=True, multires=10, multires_views=4, conv_embed=False, ray_chunk=1024*32, pts_chuck=1024*64,
perturb=1., raw_noise_std=0., white_bkgd=False):
super().__init__()
# Create sampler
self.N_samples, self.N_importance = N_samples, N_importance
self.point_sampler = StratifiedSampler(N_samples, perturb=perturb, lindisp=False, pytest=False)
self.importance_sampler = None
if N_importance > 0:
self.importance_sampler = ImportanceSampler(N_importance, perturb=perturb, lindisp=False, pytest=False)
# Ray renderer
self.renderer = VolumetricRenderer(raw_noise_std=raw_noise_std, white_bkgd=white_bkgd)
# Maximum number of rays to process simultaneously. Used to control maximum memory usage. Does not affect final results.
self.chunk = ray_chunk
# Save if use view directions (which cannot be changed after building networks)
self.use_viewdirs = viewdirs
# create nerf mlps
self.nerf = NeRFMLP(input_dim=3, output_dim=4, net_depth=netdepth, net_width=netwidth, skips=[4],
viewdirs=viewdirs, use_embed=use_embed, multires=multires, multires_views=multires_views,
conv_embed=conv_embed, netchunk=pts_chuck)
self.nerf_fine = self.nerf
if N_importance > 0:
self.nerf_fine = NeRFMLP(input_dim=3, output_dim=4, net_depth=netdepth_fine, net_width=netwidth_fine, skips=[4],
viewdirs=viewdirs, use_embed=use_embed, multires=multires, multires_views=multires_views,
conv_embed=conv_embed, netchunk=pts_chuck)
# render parameters
self.render_kwargs_train = {
'N_importance': N_importance,
'N_samples': N_samples,
'perturb': perturb,
'raw_noise_std': raw_noise_std,
'retraw': True, 'retpts': False
}
# copy from train rendering first
self.render_kwargs_test = self.render_kwargs_train.copy()
# no perturbation
self.render_kwargs_test['perturb'] = 0.
self.render_kwargs_test['raw_noise_std'] = 0.
def render_rays(self, rays_o, rays_d, near, far, viewdirs=None, raw_noise_std=0.,
verbose=False, retraw = False, retpts=False, pytest=False, **kwargs):
"""Volumetric rendering.
Args:
ray_o: origins of rays. [N_rays, 3]
ray_d: directions of rays. [N_rays, 3]
near: the minimal distance. [N_rays, 1]
far: the maximal distance. [N_rays, 1]
raw_noise_std: If True, add noise on raw output from nn
verbose: bool. If True, print more debugging info.
Returns:
rgb: [N_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
raw: [N_rays, N_samples, C]. Raw predictions from model.
pts: [N_rays, N_samples, 3]. Sampled points.
rgb0: See rgb_map. Output for coarse model.
raw0: See raw. Output for coarse model.
pts0: See acc_map. Output for coarse model.
z_std: [N_rays]. Standard deviation of distances along ray for each sample.
"""
bounds = torch.cat([near, far], -1) # [N_rays, 2]
# Primary sampling
pts, z_vals, _ = self.point_sampler(rays_o, rays_d, bounds, **kwargs) # [N_rays, N_samples, 3]
viewdirs_c = viewdirs[..., None, :].expand(pts.shape) # [N_rays, 3] -> [N_rays, N_samples, 3]
raw = self.nerf(pts, viewdirs_c)
ret = self.renderer(raw, z_vals, rays_d, raw_noise_std=raw_noise_std, pytest=pytest)
# Buffer raw/pts
if retraw:
ret['raw'] = raw
if retpts:
ret['pts'] = pts
# Secondary sampling
N_importance = kwargs.get('N_importance', self.N_importance)
if (self.importance_sampler is not None) and (N_importance > 0):
# backup coarse model output
ret0 = ret
# resample
pts, z_vals, sampler_extras = self.importance_sampler(rays_o, rays_d, z_vals, **ret, **kwargs) # [N_rays, N_samples + N_importance, 3]
viewdirs_f = viewdirs[..., None, :].expand(pts.shape) # [N_rays, 3] -> [N_rays, N_samples, 3]
# obtain raw data
raw = self.nerf_fine(pts, viewdirs_f)
# render raw data
ret = self.renderer(raw, z_vals, rays_d, raw_noise_std=raw_noise_std, pytest=pytest)
# Buffer raw/pts
if retraw:
ret['raw'] = raw
if retpts:
ret['pts'] = pts
# compute std of resampled point along rays
ret['z_std'] = torch.std(sampler_extras['z_samples'], dim=-1, unbiased=False) # [N_rays]
# buffer coarse model output
for k in ret0:
ret[k+'0'] = ret0[k]
return ret
def forward(self, ray_batch, bound_batch, **kwargs):
"""Render rays
Args:
ray_batch: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
Returns:
ret_all includes the following returned values:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
raw: [batch_size, N_sample, C]. Raw data of each point.
weight_map: [batch_size, N_sample, C]. Convert raw to weight scale (0-1).
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
"""
# Render settings
if self.training:
render_kwargs = self.render_kwargs_train.copy()
render_kwargs.update(kwargs)
else:
render_kwargs = self.render_kwargs_test.copy()
render_kwargs.update(kwargs)
# Disentangle ray batch
rays_o, rays_d = ray_batch
assert rays_o.shape == rays_d.shape
# Flatten ray batch
old_shape = rays_d.shape # [..., 3(+id)]
rays_o = torch.reshape(rays_o, [-1,rays_o.shape[-1]]).float()
rays_d = torch.reshape(rays_d, [-1,rays_d.shape[-1]]).float()
# Provide ray directions as input
if self.use_viewdirs:
viewdirs = rays_d
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1, viewdirs.shape[-1]]).float()
# Disentangle bound batch
near, far = bound_batch
if isinstance(near, int) or isinstance(near, float):
near = near * torch.ones_like(rays_d[...,:1], dtype=torch.float)
if isinstance(far, int) or isinstance(far, float):
far = far * torch.ones_like(rays_d[...,:1], dtype=torch.float)
# Batchify rays
all_ret = {}
for i in range(0, rays_o.shape[0], self.chunk):
end = min(i+self.chunk, rays_o.shape[0])
chunk_o, chunk_d = rays_o[i:end], rays_d[i:end]
chunk_n, chunk_f = near[i:end], far[i:end]
chunk_v = viewdirs[i:end] if self.use_viewdirs else None
# Render function
ret = self.render_rays(chunk_o, chunk_d, chunk_n, chunk_f, viewdirs=chunk_v, **render_kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}
# Unflatten
for k in all_ret:
k_sh = list(old_shape[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh) # [input_rays_shape, per_ray_output_shape]
return all_ret
| 42.051282 | 146 | 0.617805 | 1,113 | 8,200 | 4.339623 | 0.224618 | 0.016563 | 0.027329 | 0.016149 | 0.261077 | 0.166874 | 0.142443 | 0.142443 | 0.142443 | 0.128364 | 0 | 0.014232 | 0.280244 | 8,200 | 194 | 147 | 42.268041 | 0.804134 | 0.245976 | 0 | 0.163636 | 0 | 0 | 0.01889 | 0 | 0 | 0 | 0 | 0 | 0.009091 | 1 | 0.027273 | false | 0 | 0.236364 | 0 | 0.290909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a65d97b1e73201ba7a61c0189b55eeb9aa09c5 | 1,970 | py | Python | code/data_wash.py | ylf2002/lol | 476bea227227434fc19c012047243afbeab9d099 | [
"MIT"
] | 1 | 2021-05-26T03:03:33.000Z | 2021-05-26T03:03:33.000Z | code/data_wash.py | ylf2002/lol | 476bea227227434fc19c012047243afbeab9d099 | [
"MIT"
] | null | null | null | code/data_wash.py | ylf2002/lol | 476bea227227434fc19c012047243afbeab9d099 | [
"MIT"
] | null | null | null | #######################################
# School of Software Technology #
# Dalian University of Technology #
# yang lifan #
# 2862506026@qq.com #
#######################################
import numpy as np
import pandas as pd
df = pd.read_excel(r"lol\data\first_lol_role_data.xls")
''' *—————— 处理一英雄多角色的第一种方法——将一列变多列 ——————* '''
temp_list = [eval(x) for x in df["职业"].tolist()]
tag_all = set([j for i in temp_list for j in i])
print(tag_all)
zeros_data = pd.DataFrame(np.zeros((df.shape[0], len(tag_all))), columns=list(tag_all))
for i in range(df.shape[0]):
zeros_data.loc[i, temp_list[i]] = 1
data = pd.concat([df, zeros_data], axis=1).drop(labels="职业", axis=1)
data.to_csv("lol\\data\\clean_lol_role_data.csv",index=False)
''' **—————— 处理一英雄多角色的第二种方法——将一行变多行 ——————** '''
df.head()
df['职业']=df['职业'].map(lambda x:x.split(','))
df_new=df.explode('职业')
# 建立字典
roles_mapping = {'战士':0 ,'法师':1 , '坦克':2 ,'刺客':3 ,'辅助':4 ,'ADC':5,
' 战士':0 ,' 法师':1 , ' 坦克':2 ,' 刺客':3 ,' 辅助':4 ,' ADC':5}
# 替换特殊字符
df_new['职业'] = df_new['职业'].str.replace("'","")
df_new['职业'] = df_new['职业'].str.replace("[","")
df_new['职业'] = df_new['职业'].str.replace("]","")
# 替换
df_new['职业'] = df_new['职业'].map(roles_mapping)
df_new.to_csv("lol\\data\\clean_lol_role_data_length.csv",index=False)
''' ***—————— 方便机器学习分析进行的数据清洗 ——————***'''
# print(df_new)
# 去除不要的数据
df_new = df_new.drop(columns = ['编号'])
df_new = df_new.drop(columns = ['名称'])
df_new = df_new.drop(columns = ['英文名'])
df_new = df_new.drop(columns = ['中文名'])
df_new = df_new.drop(columns = ['点卷价格'])
df_new = df_new.drop(columns = ['蓝色精粹'])
df_new = df_new.drop(columns = ['周免'])
# 建立字典
roles_mapping = {0:'fighter' ,1:'mage' , 2:'tank' ,3:'assassin' ,4:'support' ,5:'marksman'}
# 替换
df_new['职业'] = df_new['职业'].map(roles_mapping)
df_new.to_csv("lol\\data\\roles.csv", index=False, header=False)
| 35.178571 | 92 | 0.561421 | 310 | 1,970 | 3.516129 | 0.335484 | 0.12844 | 0.06422 | 0.06422 | 0.36422 | 0.36422 | 0.229358 | 0.229358 | 0.188991 | 0.188991 | 0 | 0.020171 | 0.169543 | 1,970 | 55 | 93 | 35.818182 | 0.621638 | 0.09797 | 0 | 0.064516 | 0 | 0 | 0.168683 | 0.071909 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.064516 | 0 | 0.064516 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a68eef0211ea8367075b0a591513830e5f31fa | 1,116 | py | Python | development/models/layers/mesh_conv.py | atomicsulfate/meshcnn-4-cadseg | c0d91ec593293cb58eec422556d1322a3b4f6183 | [
"MIT"
] | 7 | 2021-04-07T06:31:58.000Z | 2022-01-27T09:49:51.000Z | development/models/layers/mesh_conv.py | atomicsulfate/meshcnn-4-cadseg | c0d91ec593293cb58eec422556d1322a3b4f6183 | [
"MIT"
] | null | null | null | development/models/layers/mesh_conv.py | atomicsulfate/meshcnn-4-cadseg | c0d91ec593293cb58eec422556d1322a3b4f6183 | [
"MIT"
] | 2 | 2021-05-19T03:39:04.000Z | 2021-08-12T08:20:19.000Z | import torch
from meshcnn.models.layers import mesh_conv
class MeshConv(mesh_conv.MeshConv):
def create_GeMM(self, x, Gi):
Gishape = Gi.shape
# pad the first row of every sample in batch with zeros
padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device)
# padding = padding.to(x.device)
x = torch.cat((padding, x), dim=2)
Gi = Gi + 1 # shift
# first flatten indices
Gi = self.flatten_gemm_inds(Gi)
Gi = Gi.view(-1).long()
#
odim = x.shape
x = x.permute(0, 2, 1).contiguous()
x = x.view(odim[0] * odim[2], odim[1])
x = torch.index_select(x, dim=0, index=Gi)
del Gi
x = x.view(Gishape[0], Gishape[1], Gishape[2], -1)
x = x.permute(0, 3, 1, 2)
# apply the symmetric functions for an equivariant conv
x[:, :, :, 1] += x[:, :, :, 3]
x[:, :, :, 2] += x[:, :, :, 4]
x[:, :, :, 3] = torch.abs(x[:, :, :, 1] - 2 * x[:, :, :, 3])
x[:, :, :, 4] = torch.abs(x[:, :, :, 2] - 2 * x[:, :, :, 4])
return x | 34.875 | 95 | 0.49552 | 163 | 1,116 | 3.349693 | 0.380368 | 0.014652 | 0.032967 | 0.03663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043025 | 0.312724 | 1,116 | 32 | 96 | 34.875 | 0.66884 | 0.149642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a7840aed45d076df5f2476093d1ece342ead3d | 3,389 | py | Python | miossl/schedulers.py | miossl/miossl | cf1d6e5375803e46c74d361ae650403f70fc2b4c | [
"Apache-2.0"
] | null | null | null | miossl/schedulers.py | miossl/miossl | cf1d6e5375803e46c74d361ae650403f70fc2b4c | [
"Apache-2.0"
] | null | null | null | miossl/schedulers.py | miossl/miossl | cf1d6e5375803e46c74d361ae650403f70fc2b4c | [
"Apache-2.0"
] | null | null | null | import math
import warnings
from typing import List
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class LinearWarmupCosineAnnealingLR(_LRScheduler):
def __init__(
self,
optimizer: Optimizer,
warmup_epochs: int,
max_epochs: int,
warmup_start_lr: float = 0.0,
eta_min: float = 0.0,
last_epoch: int = -1,
verbose = True
) -> None:
"""
Args:
optimizer (Optimizer): Wrapped optimizer.
warmup_epochs (int): Maximum number of iterations for linear warmup
max_epochs (int): Maximum number of iterations
warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
"""
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.warmup_start_lr = warmup_start_lr
self.eta_min = eta_min
super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self) -> List[float]:
"""
Compute learning rate using chainable form of the scheduler
"""
if not self._get_lr_called_within_step:
warnings.warn(
"To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.",
UserWarning,
)
if self.last_epoch == 0:
return [self.warmup_start_lr] * len(self.base_lrs)
elif self.last_epoch < self.warmup_epochs:
return [
group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
elif self.last_epoch == self.warmup_epochs:
return self.base_lrs
elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0:
return [
group["lr"] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)
]
return [
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) /
(
1 +
math.cos(math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs))
) * (group["lr"] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups
]
def _get_closed_form_lr(self) -> List[float]:
"""
Called when epoch is passed as a param to the `step` function of the scheduler.
"""
if self.last_epoch < self.warmup_epochs:
return [
self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)
for base_lr in self.base_lrs
]
return [
self.eta_min + 0.5 * (base_lr - self.eta_min) *
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))
for base_lr in self.base_lrs
]
| 39.406977 | 119 | 0.588374 | 424 | 3,389 | 4.457547 | 0.200472 | 0.100529 | 0.118519 | 0.060317 | 0.455556 | 0.421164 | 0.330688 | 0.293651 | 0.22963 | 0.22963 | 0 | 0.009466 | 0.314252 | 3,389 | 85 | 120 | 39.870588 | 0.803787 | 0.151372 | 0 | 0.147541 | 0 | 0 | 0.032632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.081967 | 0 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a7ca4b7618be6bc620a7d348026e8218320c74 | 4,305 | py | Python | compiler/lexer/lexer.py | KrishnaKanth1729/FireScript | 90c42163bfabff3b4e105f337f39da1f48dc4f3a | [
"MIT"
] | null | null | null | compiler/lexer/lexer.py | KrishnaKanth1729/FireScript | 90c42163bfabff3b4e105f337f39da1f48dc4f3a | [
"MIT"
] | null | null | null | compiler/lexer/lexer.py | KrishnaKanth1729/FireScript | 90c42163bfabff3b4e105f337f39da1f48dc4f3a | [
"MIT"
] | null | null | null | from typing import Tuple
from compiler.errors.errors import FEOLError, FParsingError, FEOFError
from compiler.lexer.tokens import *
from compiler.lexer.readers import Reader
class Lexer:
def __init__(self, reader: Reader):
self.reader = reader
def lex_identifier(self) -> str:
"""Grab an identifier"""
ident = self.reader.current_character()
while True:
self.reader.advance_pointer()
current = self.reader.current_character()
if not current.isalnum() or current == "EOF":
self.reader.retreat_pointer()
return ident
else:
ident += current
def lex_numeric(self) -> Tuple[str, str]:
"""Parse a number, and return the type (float/int), value"""
is_int = True
numeric = self.reader.current_character()
while True:
self.reader.advance_pointer()
current = self.reader.current_character()
if (
not (current.isdigit() or (current == "." and is_int))
or current == "EOF"
):
self.reader.retreat_pointer()
return ["float", "int"][is_int], numeric
else:
numeric += current
if current == ".":
is_int = False
def lex_string(self) -> str:
quote = self.reader.current_character()
string = ""
while True:
self.reader.advance_pointer()
current = self.reader.current_character()
# TODO: Raise error on finding newline/EOF
if current == quote:
return string
elif current == "EOF":
FEOFError(
self.reader.current_line_number(), "EOF while scanning string!"
).raise_error()
elif current == "\n":
FEOLError(
self.reader.current_line_number(), "EOL while scanning string!"
).raise_error
else:
string += current
def next_token(self) -> Token:
"""Lex, and return the next token from a reader"""
while True:
self.reader.advance_pointer()
current = self.reader.current_character()
if current == "EOF":
return EOF("", self.reader.current_line_number())
elif current.isspace() or current == "\n":
continue
elif current.isalpha():
ident = self.lex_identifier()
if ident in ["true", "false"]:
return Bool(ident, self.reader.current_line_number())
return Identifier(ident, self.reader.current_line_number())
elif current.isdigit() or current == "-":
self.reader.advance_pointer()
if self.reader.current_character().isdigit() or current != "-":
self.reader.retreat_pointer()
numeric_type, value = self.lex_numeric()
return [Float, Integer][numeric_type == "int"](
value, self.reader.current_line_number()
)
if current in "\"'":
return String(self.lex_string(), self.reader.current_line_number())
elif current == ";":
# Comment
while self.reader.current_character() != "\n":
self.reader.advance_pointer()
elif current in "+-*/":
return Operator(current, self.reader.current_line_number())
elif current == "=":
return EqualTo(current, self.reader.current_line_number())
elif current in "()":
return Bracket(current, self.reader.current_line_number())
elif current in "[]":
return SquareBracket(current, self.reader.current_line_number())
elif current in "<>":
return AngleBracket(current, self.reader.current_line_number())
else:
line = self.reader.current_line_number()
FParsingError(
line,
f"Unexpected '{current}' on line {line}!",
).raise_error()
| 35.286885 | 83 | 0.523577 | 412 | 4,305 | 5.315534 | 0.191748 | 0.150685 | 0.170776 | 0.124658 | 0.483105 | 0.372146 | 0.339726 | 0.287671 | 0.228767 | 0.228767 | 0 | 0 | 0.371661 | 4,305 | 121 | 84 | 35.578512 | 0.809612 | 0.039024 | 0 | 0.247312 | 0 | 0 | 0.035194 | 0 | 0 | 0 | 0 | 0.008264 | 0 | 1 | 0.053763 | false | 0 | 0.043011 | 0 | 0.247312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4a89b047d9526903b1666439bd632bebb9235df | 36,093 | py | Python | src/aceinna/devices/openrtk/uart_provider.py | xhaidong/python-openimu | 9cd20ed61f62d0abd964e37700972bc97e3d0e8c | [
"Apache-2.0"
] | null | null | null | src/aceinna/devices/openrtk/uart_provider.py | xhaidong/python-openimu | 9cd20ed61f62d0abd964e37700972bc97e3d0e8c | [
"Apache-2.0"
] | null | null | null | src/aceinna/devices/openrtk/uart_provider.py | xhaidong/python-openimu | 9cd20ed61f62d0abd964e37700972bc97e3d0e8c | [
"Apache-2.0"
] | null | null | null | import os
import time
import json
import datetime
import threading
import math
import re
import collections
import serial
import serial.tools.list_ports
from .ntrip_client import NTRIPClient
from ...framework.utils import (
helper, resource
)
from ...framework.context import APP_CONTEXT
from ..base.provider_base import OpenDeviceBase
from ..configs.openrtk_predefine import (
APP_STR, get_app_names
)
from ..decorator import with_device_message
from .firmware_parser import parser as firmware_content_parser
from ...models import InternalCombineAppParseRule
from ..upgrade_workers import (
FirmwareUpgradeWorker,
SDKUpgradeWorker
)
from ..upgrade_center import UpgradeCenter
from ..parsers.open_field_parser import encode_value
from ...framework.utils.print import print_green
from ...framework.utils.print import print_yellow
from ...framework.utils.print import print_red
class Provider(OpenDeviceBase):
'''
OpenRTK UART provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'RTK'
self.server_update_rate = 100
self.sky_data = []
self.pS_data = []
self.ps_dic = collections.OrderedDict()
self.inspva_flag = 0
self.bootloader_baudrate = 115200
self.app_config_folder = ''
self.device_info = None
self.app_info = None
self.parameters = None
self.setting_folder_path = None
self.data_folder = None
self.debug_serial_port = None
self.rtcm_serial_port = None
self.user_logf = None
self.debug_logf = None
self.rtcm_logf = None
self.debug_c_f = None
self.enable_data_log = False
self.is_app_matched = False
self.ntrip_client_enable = False
self.nmea_buffer = []
self.nmea_sync = 0
self.prepare_folders()
self.ntripClient = None
self.connected = True
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
config_file_name = 'openrtk.json'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
self.data_folder = data_folder_path
# copy contents of app_config under executor path
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name, 'openrtk')
for app_name in get_app_names():
app_name_path = os.path.join(self.setting_folder_path, app_name)
app_name_config_path = os.path.join(
app_name_path, config_file_name)
if not os.path.isfile(app_name_config_path):
if not os.path.isdir(app_name_path):
os.makedirs(app_name_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name, os.path.join('openrtk', app_name, config_file_name))
if app_config_content is None:
continue
with open(app_name_config_path, "wb") as code:
code.write(app_config_content)
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
port_name = device_access.port
return '# Connected {0} with UART on {1} #\nDevice:{2} \nFirmware:{3}'\
.format('OpenRTK', port_name, device_info, app_info)
def _build_device_info(self, text):
'''
Build device info
'''
split_text = [x for x in text.split(' ') if x != '']
sn = split_text[4]
# remove the prefix of SN
if sn.find('SN:') == 0:
sn = sn[3:]
self.device_info = {
'name': split_text[0],
'imu': split_text[1],
'pn': split_text[2],
'firmware_version': split_text[3],
'sn': sn
}
def _build_app_info(self, text):
'''
Build app info
'''
app_version = text
split_text = app_version.split(' ')
app_name = next(
(item for item in APP_STR if item in split_text), None)
if not app_name:
app_name = 'INS'
self.is_app_matched = False
else:
self.is_app_matched = True
self.app_info = {
'app_name': app_name,
'version': text
}
def load_properties(self):
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'openrtk.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
app_name = self.app_info['app_name']
app_file_path = os.path.join(
self.setting_folder_path, app_name, 'openrtk.json')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
def ntrip_client_thread(self):
self.ntripClient = NTRIPClient(self.properties, self.communicator)
self.ntripClient.run()
def build_connected_serial_port_info(self):
if not self.communicator.serial_port:
return None, None
user_port = self.communicator.serial_port.port
user_port_num = ''
port_name = ''
for i in range(len(user_port)-1, -1, -1):
if (user_port[i] >= '0' and user_port[i] <= '9'):
user_port_num = user_port[i] + user_port_num
else:
port_name = user_port[:i+1]
break
return user_port_num, port_name
def after_setup(self):
set_user_para = self.cli_options and self.cli_options.set_user_para
self.ntrip_client_enable = self.cli_options and self.cli_options.ntrip_client
# with_raw_log = self.cli_options and self.cli_options.with_raw_log
if set_user_para:
result = self.set_params(
self.properties["initial"]["userParameters"])
if (result['packetType'] == 'success'):
self.save_config()
if self.ntrip_client_enable:
t = threading.Thread(target=self.ntrip_client_thread)
t.start()
# if with_raw_log:
connection = None
debug_port = ''
rtcm_port = ''
try:
if (self.properties["initial"]["useDefaultUart"]):
user_port_num, port_name = self.build_connected_serial_port_info()
if not user_port_num or not port_name:
return False
debug_port = port_name + str(int(user_port_num) + 2)
rtcm_port = port_name + str(int(user_port_num) + 1)
else:
for x in self.properties["initial"]["uart"]:
if x['enable'] == 1:
if x['name'] == 'DEBUG':
debug_port = x["value"]
elif x['name'] == 'GNSS':
rtcm_port = x["value"]
if self.data_folder is not None:
dir_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
file_time = time.strftime(
"%Y_%m_%d_%H_%M_%S", time.localtime())
file_name = self.data_folder + '/' + 'openrtk_log_' + dir_time
os.mkdir(file_name)
self.user_logf = open(
file_name + '/' + 'user_' + file_time + '.bin', "wb")
if rtcm_port != '':
print_green('OpenRTK log GNSS UART {0}'.format(rtcm_port))
self.rtcm_serial_port = serial.Serial(
rtcm_port, '460800', timeout=0.1)
if self.rtcm_serial_port.isOpen():
self.rtcm_logf = open(
file_name + '/' + 'rtcm_rover_' + file_time + '.bin', "wb")
t = threading.Thread(
target=self.thread_rtcm_port_receiver, args=(file_name,))
t.start()
if debug_port != '':
print_green('OpenRTK log DEBUG UART {0}'.format(debug_port))
self.debug_serial_port = serial.Serial(
debug_port, '460800', timeout=0.1)
if self.debug_serial_port.isOpen():
if self.app_info['app_name'] == 'RAWDATA':
self.debug_logf = open(
file_name + '/' + 'rtcm_base_' + file_time + '.bin', "wb")
elif self.app_info['app_name'] == 'RTK':
self.debug_logf = open(
file_name + '/' + 'rtcm_base_' + file_time + '.bin', "wb")
else:
self.debug_logf = open(
file_name + '/' + 'rtcm_base_' + file_time + '.bin', "wb")
t = threading.Thread(
target=self.thread_debug_port_receiver, args=(file_name,))
t.start()
except Exception as e:
if self.debug_serial_port is not None:
if self.debug_serial_port.isOpen():
self.debug_serial_port.close()
if self.rtcm_serial_port is not None:
if self.rtcm_serial_port.isOpen():
self.rtcm_serial_port.close()
self.debug_serial_port = None
self.rtcm_serial_port = None
print_red('Can not log GNSS UART or DEBUG UART, pls check uart driver and connection!')
return False
def after_bootloader_switch(self):
self.communicator.serial_port.baudrate = self.bootloader_baudrate
def nmea_checksum(self, data):
data = data.replace("\r", "").replace("\n", "").replace("$", "")
nmeadata, cksum = re.split('\*', data)
calc_cksum = 0
for s in nmeadata:
calc_cksum ^= ord(s)
return int(cksum, 16), calc_cksum
def on_read_raw(self, data):
for bytedata in data:
if bytedata == 0x24:
self.nmea_buffer = []
self.nmea_sync = 0
self.nmea_buffer.append(chr(bytedata))
else:
self.nmea_buffer.append(chr(bytedata))
if self.nmea_sync == 0:
if bytedata == 0x0D:
self.nmea_sync = 1
elif self.nmea_sync == 1:
if bytedata == 0x0A:
try:
str_nmea = ''.join(self.nmea_buffer)
cksum, calc_cksum = self.nmea_checksum(
str_nmea)
if cksum == calc_cksum:
if str_nmea.find("$GPGGA") != -1:
#print()
if self.ntrip_client_enable and self.ntripClient != None:
self.ntripClient.send(str_nmea)
#print(str_nmea, end='')
APP_CONTEXT.get_print_logger().info(str_nmea.replace('\r\n',''))
# else:
# print("nmea checksum wrong {0} {1}".format(cksum, calc_cksum))
except Exception as e:
# print('NMEA fault:{0}'.format(e))
pass
self.nmea_buffer = []
self.nmea_sync = 0
if self.user_logf is not None:
self.user_logf.write(data)
def thread_debug_port_receiver(self, *args, **kwargs):
if self.debug_logf is None:
return
is_get_configuration = 0
file_name = args[0]
self.debug_c_f = open(file_name + '/' + 'configuration.json', "w")
while True:
if is_get_configuration:
break
cmd_configuration = 'get configuration\r\n'
self.debug_serial_port.write(cmd_configuration.encode())
try_times = 20
for i in range(try_times):
data_buffer = self.debug_serial_port.read(700)
if len(data_buffer):
try:
#print('len = {0}'.format(len(data_buffer)))
str_data = bytes.decode(data_buffer)
# print('{0}'.format(str_data))
json_data = json.loads(str_data)
for key in json_data.keys():
if key == 'openrtk configuration':
APP_CONTEXT.get_print_logger().info('{0}'.format(json_data))
if self.debug_c_f:
self.debug_c_f.write(str_data)
self.debug_c_f.close()
is_get_configuration = 1
if is_get_configuration:
break
except Exception as e:
#print('DEBUG PORT Thread:json error:', e)
# the json will not be completed
pass
cmd_log = 'log debug on\r\n'
self.debug_serial_port.write(cmd_log.encode())
# log data
while True:
try:
data = bytearray(self.debug_serial_port.read_all())
except Exception as e:
print_red('DEBUG PORT Thread error: {0}'.format(e))
return # exit thread receiver
if len(data):
self.debug_logf.write(data)
else:
time.sleep(0.001)
def thread_rtcm_port_receiver(self, *args, **kwargs):
if self.rtcm_logf is None:
return
while True:
try:
data = bytearray(self.rtcm_serial_port.read_all())
except Exception as e:
print_red('RTCM PORT Thread error: {0}'.format(e))
return # exit thread receiver
if len(data):
self.rtcm_logf.write(data)
else:
time.sleep(0.001)
def on_receive_output_packet(self, packet_type, data, error=None):
'''
Listener for getting output packet
'''
# $GPGGA,080319.00,3130.4858508,N,12024.0998832,E,4,25,0.5,12.459,M,0.000,M,2.0,*46
if packet_type == 'gN':
if self.ntrip_client_enable:
# $GPGGA
gpgga = '$GPGGA'
# time
timeOfWeek = float(data['GPS_TimeofWeek'])
dsec = int(timeOfWeek)
msec = timeOfWeek - dsec
sec = dsec % 86400
hour = int(sec / 3600)
minute = int(sec % 3600 / 60)
second = sec % 60
gga_time = format(hour*10000 + minute*100 +
second + msec - 18, '09.2f')
gpgga = gpgga + ',' + gga_time
# latitude
latitude = float(data['latitude']) * 180 / 2147483648.0
if latitude >= 0:
latflag = 'N'
else:
latflag = 'S'
latitude = math.fabs(latitude)
lat_d = int(latitude)
lat_m = (latitude-lat_d) * 60
lat_dm = format(lat_d*100 + lat_m, '012.7f')
gpgga = gpgga + ',' + lat_dm + ',' + latflag
# longitude
longitude = float(data['longitude']) * 180 / 2147483648.0
if longitude >= 0:
lonflag = 'E'
else:
lonflag = 'W'
longitude = math.fabs(longitude)
lon_d = int(longitude)
lon_m = (longitude-lon_d) * 60
lon_dm = format(lon_d*100 + lon_m, '013.7f')
gpgga = gpgga + ',' + lon_dm + ',' + lonflag
# positionMode
gpgga = gpgga + ',' + str(data['positionMode'])
# svs
gpgga = gpgga + ',' + str(data['numberOfSVs'])
# hop
gpgga = gpgga + ',' + format(float(data['hdop']), '03.1f')
# height
gpgga = gpgga + ',' + \
format(float(data['height']), '06.3f') + ',M'
#
gpgga = gpgga + ',0.000,M'
# diffage
gpgga = gpgga + ',' + \
format(float(data['diffage']), '03.1f') + ','
# ckm
checksum = 0
for i in range(1, len(gpgga)):
checksum = checksum ^ ord(gpgga[i])
str_checksum = hex(checksum)
if str_checksum.startswith("0x"):
str_checksum = str_checksum[2:]
gpgga = gpgga + '*' + str_checksum + '\r\n'
APP_CONTEXT.get_print_logger().info(gpgga)
if self.ntripClient != None:
self.ntripClient.send(gpgga)
return
elif packet_type == 'pS':
try:
if data['latitude'] != 0.0 and data['longitude'] != 0.0:
if self.pS_data:
if self.pS_data['GPS_Week'] == data['GPS_Week']:
if data['GPS_TimeofWeek'] - self.pS_data['GPS_TimeofWeek'] >= 0.2:
self.add_output_packet('stream', 'pos', data)
self.pS_data = data
if data['insStatus'] >= 3 and data['insStatus'] <= 5:
ins_status = 'INS_INACTIVE'
if data['insStatus'] == 3:
ins_status = 'INS_SOLUTION_GOOD'
elif data['insStatus'] == 4:
ins_status = 'INS_SOLUTION_FREE'
elif data['insStatus'] == 5:
ins_status = 'INS_ALIGNMENT_COMPLETE'
ins_pos_type = 'INS_INVALID'
if data['insPositionType'] == 1:
ins_pos_type = 'INS_SPP'
elif data['insPositionType'] == 4:
ins_pos_type = 'INS_RTKFIXED'
elif data['insPositionType'] == 5:
ins_pos_type = 'INS_RTKFLOAT'
inspva = '#INSPVA,%s,%10.2f, %s, %s,%12.8f,%13.8f,%8.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f' %\
(data['GPS_Week'], data['GPS_TimeofWeek'], ins_status, ins_pos_type,
data['latitude'], data['longitude'], data['height'],
data['velocityNorth'], data['velocityEast'], data['velocityUp'],
data['roll'], data['pitch'], data['heading'])
APP_CONTEXT.get_print_logger().info(inspva)
else:
self.add_output_packet('stream', 'pos', data)
self.pS_data = data
else:
self.add_output_packet('stream', 'pos', data)
self.pS_data = data
except Exception as e:
pass
elif packet_type == 'sK':
if self.sky_data:
if self.sky_data[0]['timeOfWeek'] == data[0]['timeOfWeek']:
self.sky_data.extend(data)
else:
self.add_output_packet('stream', 'skyview', self.sky_data)
self.add_output_packet('stream', 'snr', self.sky_data)
self.sky_data = []
self.sky_data.extend(data)
else:
self.sky_data.extend(data)
elif packet_type == 'g1':
self.ps_dic['positionMode'] = data['position_type']
self.ps_dic['numberOfSVs'] = data['number_of_satellites_in_solution']
self.ps_dic['hdop'] = data['hdop']
self.ps_dic['age'] = data['diffage']
if self.inspva_flag == 0:
self.ps_dic['GPS_Week'] = data['GPS_Week']
self.ps_dic['GPS_TimeofWeek'] = data['GPS_TimeOfWeek'] * 0.001
self.ps_dic['latitude'] = data['latitude']
self.ps_dic['longitude'] = data['longitude']
self.ps_dic['height'] = data['height']
self.ps_dic['velocityMode'] = 1
self.ps_dic['velocityNorth'] = data['north_vel']
self.ps_dic['velocityEast'] = data['east_vel']
self.ps_dic['velocityUp'] = data['up_vel']
self.ps_dic['latitude_std'] = data['latitude_standard_deviation']
self.ps_dic['longitude_std'] = data['longitude_standard_deviation']
self.ps_dic['height_std'] = data['height_standard_deviation']
self.ps_dic['north_vel_std'] = data['north_vel_standard_deviation']
self.ps_dic['east_vel_std'] = data['east_vel_standard_deviation']
self.ps_dic['up_vel_std'] = data['up_vel_standard_deviation']
self.add_output_packet('stream', 'pos', self.ps_dic)
elif packet_type == 'i1':
self.inspva_flag = 1
if data['GPS_TimeOfWeek'] % 200 == 0:
self.ps_dic['GPS_Week'] = data['GPS_Week']
self.ps_dic['GPS_TimeofWeek'] = data['GPS_TimeOfWeek'] * 0.001
self.ps_dic['latitude'] = data['latitude']
self.ps_dic['longitude'] = data['longitude']
self.ps_dic['height'] = data['height']
if data['ins_position_type'] != 1 and data['ins_position_type'] != 4 and data['ins_position_type'] != 5:
self.ps_dic['velocityMode'] = 2
else:
self.ps_dic['velocityMode'] = 1
self.ps_dic['insStatus'] = data['ins_status']
self.ps_dic['insPositionType'] = data['ins_position_type']
self.ps_dic['velocityNorth'] = data['north_velocity']
self.ps_dic['velocityEast'] = data['east_velocity']
self.ps_dic['velocityUp'] = data['up_velocity']
self.ps_dic['roll'] = data['roll']
self.ps_dic['pitch'] = data['pitch']
self.ps_dic['heading'] = data['heading']
self.ps_dic['latitude_std'] = data['latitude_std']
self.ps_dic['longitude_std'] = data['longitude_std']
self.ps_dic['height_std'] = data['height_std']
self.ps_dic['north_vel_std'] = data['north_velocity_std']
self.ps_dic['east_vel_std'] = data['east_velocity_std']
self.ps_dic['up_vel_std'] = data['up_velocity_std']
self.ps_dic['roll_std'] = data['roll_std']
self.ps_dic['pitch_std'] = data['pitch_std']
self.ps_dic['heading_std'] = data['heading_std']
self.add_output_packet('stream', 'pos', self.ps_dic)
elif packet_type == 'y1':
if self.sky_data:
if self.sky_data[0]['GPS_TimeOfWeek'] == data[0]['GPS_TimeOfWeek']:
self.sky_data.extend(data)
else:
self.add_output_packet('stream', 'skyview', self.sky_data)
self.add_output_packet('stream', 'snr', self.sky_data)
self.sky_data = []
self.sky_data.extend(data)
else:
self.sky_data.extend(data)
else:
output_packet_config = next(
(x for x in self.properties['userMessages']['outputPackets']
if x['name'] == packet_type), None)
if output_packet_config and output_packet_config.__contains__('active') \
and output_packet_config['active']:
timeOfWeek = int(data['GPS_TimeOfWeek']) % 60480000
data['GPS_TimeOfWeek'] = timeOfWeek / 1000
self.add_output_packet('stream', 'imu', data)
def do_write_firmware(self, firmware_content):
rules = [
InternalCombineAppParseRule('rtk', 'rtk_start:', 4),
InternalCombineAppParseRule('sdk', 'sdk_start:', 4),
]
parsed_content = firmware_content_parser(firmware_content, rules)
sdk_port = ''
if (self.properties["initial"]["useDefaultUart"]):
user_port_num, port_name = self.build_connected_serial_port_info()
sdk_port = port_name + str(int(user_port_num) + 3)
else:
for x in self.properties["initial"]["uart"]:
if x['enable'] == 1:
if x['name'] == 'DEBUG':
debug_port = x["value"]
elif x['name'] == 'GNSS':
rtcm_port = x["value"]
elif x['name'] == 'SDK':
sdk_port = x["value"]
sdk_uart = serial.Serial(sdk_port, 115200, timeout=0.1)
if not sdk_uart.isOpen():
raise Exception('Cannot open SDK upgrade port')
upgrade_center = UpgradeCenter()
upgrade_center.register(
FirmwareUpgradeWorker(self.communicator, parsed_content['rtk']))
upgrade_center.register(
SDKUpgradeWorker(sdk_uart, parsed_content['sdk']))
upgrade_center.on('progress', self.handle_upgrade_process)
upgrade_center.on('error', self.handle_upgrade_error)
upgrade_center.on('finish', self.handle_upgrade_complete)
upgrade_center.start()
return upgrade_center.total
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
# command list
def server_status(self, *args): # pylint: disable=invalid-name
'''
Get server connection status
'''
return {
'packetType': 'ping',
'data': {'status': '1'}
}
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name', 'value': self.device_info['name']},
{'name': 'IMU', 'value': self.device_info['imu']},
{'name': 'PN', 'value': self.device_info['pn']},
{'name': 'Firmware Version',
'value': self.device_info['firmware_version']},
{'name': 'SN', 'value': self.device_info['sn']},
{'name': 'App Version', 'value': self.app_info['version']}
]
}
def get_log_info(self):
'''
Build information for log
'''
return {
"type": self.type,
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"rtkProperties": json.dumps(self.properties)
}
}
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': self.properties['userConfiguration']
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
has_error = False
parameter_values = []
if self.app_info['app_name'] == 'INS':
conf_parameters = self.properties['userConfiguration']
conf_parameters_len = len(conf_parameters)-1
step = 10
for i in range(2, conf_parameters_len, step):
start_byte = i
end_byte = i+step-1 if i+step < conf_parameters_len else conf_parameters_len
time.sleep(0.1)
command_line = helper.build_packet(
'gB', [start_byte, end_byte])
result = yield self._message_center.build(command=command_line, timeout=10)
if result['error']:
has_error = True
break
parameter_values.extend(result['data'])
else:
command_line = helper.build_input_packet('gA')
result = yield self._message_center.build(command=command_line, timeout=3)
if result['error']:
has_error = True
parameter_values = result['data']
if not has_error:
self.parameters = parameter_values
yield {
'packetType': 'inputParams',
'data': parameter_values
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=params['paramId'])
# self.communicator.write(command_line)
# result = self.get_input_result('gP', timeout=1)
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if error:
yield {
'packetType': 'error',
'data': 'No Response'
}
if data:
self.parameters = data
yield {
'packetType': 'inputParam',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def set_params(self, params, *args): # pylint: disable=unused-argument
'''
Update paramters value
'''
input_parameters = self.properties['userConfiguration']
grouped_parameters = {}
for parameter in params:
exist_parameter = next(
(x for x in input_parameters if x['paramId'] == parameter['paramId']), None)
if exist_parameter:
has_group = grouped_parameters.__contains__(
exist_parameter['category'])
if not has_group:
grouped_parameters[exist_parameter['category']] = []
current_group = grouped_parameters[exist_parameter['category']]
current_group.append(
{'paramId': parameter['paramId'], 'value': parameter['value'], 'type': exist_parameter['type']})
for group in grouped_parameters.values():
message_bytes = []
for parameter in group:
message_bytes.extend(
encode_value('int8', parameter['paramId'])
)
message_bytes.extend(
encode_value(parameter['type'], parameter['value'])
)
# print('parameter type {0}, value {1}'.format(
# parameter['type'], parameter['value']))
# result = self.set_param(parameter)
command_line = helper.build_packet(
'uB', message_bytes)
# for s in command_line:
# print(hex(s))
result = yield self._message_center.build(command=command_line)
packet_type = result['packet_type']
data = result['data']
if packet_type == 'error':
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
if data > 0:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
yield {
'packetType': 'success',
'data': {
'error': 0
}
}
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=params['paramId'], value=params['value'])
# self.communicator.write(command_line)
# result = self.get_input_result('uP', timeout=1)
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
command_line = helper.build_input_packet('sC')
# self.communicator.write(command_line)
# result = self.get_input_result('sC', timeout=2)
result = yield self._message_center.build(command=command_line, timeout=2)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': error
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def reset_params(self, params, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
command_line = helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': data
}
def upgrade_framework(self, params, *args): # pylint: disable=unused-argument
'''
Upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
thread = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thread.start()
print("Upgrade OpenRTK firmware started at:[{0}].".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
| 38.851453 | 131 | 0.506469 | 3,750 | 36,093 | 4.631467 | 0.1216 | 0.01831 | 0.023837 | 0.012034 | 0.419219 | 0.345636 | 0.278328 | 0.224551 | 0.184132 | 0.174286 | 0 | 0.015402 | 0.382983 | 36,093 | 928 | 132 | 38.893319 | 0.764481 | 0.052198 | 0 | 0.31479 | 0 | 0.002714 | 0.111492 | 0.008048 | 0 | 0 | 0.000355 | 0 | 0 | 1 | 0.037992 | false | 0.004071 | 0.032564 | 0.001357 | 0.097693 | 0.017639 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ab5336f243e3a61cf46c03fff4d5667a06f2b8 | 2,250 | py | Python | src/77. Combinations.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/77. Combinations.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | src/77. Combinations.py | wisesky/LeetCode-Practice | 65549f72c565d9f11641c86d6cef9c7988805817 | [
"MIT"
] | null | null | null | from typing import List
from itertools import product
import random
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# s1 inmitate std practice
# nums = list(range(1,n+1))
# res = self.combinations(nums, k)
# return list(res)
# s2 DFS
# nums = list(range(1, n+1))
# marked = ['0']*n
# res = []
# flag = False
# if k > n//2:
# k = n-k
# flag = True
# self.myCombinations(nums, k, marked, res)
# result = []
# for r in res:
# tmp = []
# for i, mark in enumerate(r):
# if not flag:
# if mark == '1':
# tmp.append(nums[i])
# else:
# if mark == '0':
# tmp.append(nums[i])
# result.append(tmp)
# return result
# s3 optim DFS
nums = list(range(1,n+1))
res = []
self.myCombinations_1(nums, k, 0, [],res)
return res
def combinations(self, nums, k):
n = len(nums)
for indices in product(range(n), repeat=k):
if len(set(indices)) == len(indices) and sorted(indices) == list(indices):
yield [nums[i] for i in indices]
# DFS
def myCombinations(self, nums, k, marked, res):
if k == 0:
str_marked = ''.join(marked)
if str_marked not in res:
res.append(str_marked)
return
for i, num in enumerate(nums):
if marked[i] == '0':
marked[i] = '1'
self.myCombinations(nums, k-1, marked, res)
marked[i] = '0'
return
# optim DFS
def myCombinations_1(self, nums, k, start, r,res):
if len(r) == k :
r_copy = r.copy()
res.append(r_copy)
return
for i in range(start, len(nums) - (k-len(r)) + 1):
r.append(nums[i])
self.myCombinations_1(nums, k, i+1, r, res)
r.pop()
return
if __name__ == "__main__":
so = Solution()
n = 9
k = 8
res = so.combine(n, k)
for r in res:
print(r) | 28.125 | 86 | 0.450667 | 278 | 2,250 | 3.589928 | 0.241007 | 0.04509 | 0.039078 | 0.042084 | 0.112224 | 0.068136 | 0.068136 | 0.046092 | 0 | 0 | 0 | 0.019939 | 0.420444 | 2,250 | 80 | 87 | 28.125 | 0.745399 | 0.246667 | 0 | 0.093023 | 0 | 0 | 0.006599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.069767 | 0 | 0.302326 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ae6154e286558196a78ebd191992f0143763b1 | 4,082 | py | Python | src/object_detector.py | jaswanthbjk/BCDC-Net | a83b7bc31e53ab89d9025fd6b7e3d45c9dbd4f4f | [
"MIT"
] | null | null | null | src/object_detector.py | jaswanthbjk/BCDC-Net | a83b7bc31e53ab89d9025fd6b7e3d45c9dbd4f4f | [
"MIT"
] | null | null | null | src/object_detector.py | jaswanthbjk/BCDC-Net | a83b7bc31e53ab89d9025fd6b7e3d45c9dbd4f4f | [
"MIT"
] | null | null | null | import cv2
import numpy as np
class Image_Detector:
""" Detector class for performing
1) resizing to required image size
2) Perform inference on a new image using the trained network
Args:
label_dict: Dictionary of class_id mapped to class_names
frozen_graph: Tensorflow frozen graph of trained detection model
pbtxt: configuration file of the model choosen
Outputs:
result: list of lists, every list representing a bounding box for the
caps present in the image
final_image: Image with bounding boxes drawn on it """
def __init__(self, label_dict: dict, frozen_graph: str, pbtxt: str):
self.image = None
self.detector_path = None
self.save_output = True
self.Threshold = 50
self.label_dict = label_dict
self.Net = cv2.dnn.readNetFromTensorflow(model=frozen_graph,
config=pbtxt)
def img_resizer(self, image, op_size):
"""Resize the input Image to required size
Args:
op_size: size to which the input has to be resized
output:
resized Image: Image after resizing """
self.resize = op_size
self.resized_image = cv2.resize(image, self.resize,
interpolation=cv2.INTER_AREA)
return self.resized_image
def detect_from_image(self, image):
""" Perform Inferencing
Args:
image: Input to the detection model
outputs:
detection: All the bounding boxes inferenced by the model"""
self.image = image
self.image_h, self.image_w = np.shape(self.image)
self.Net.setInput(cv2.dnn.blobFromImage(self.resized_image,
size=self.resize, swapRB=True,
crop=True))
self.detections = self.Net.forward()
def provide_output(self):
"""Re-arrange the model outputs into understandable values"""
self.result_array = list()
for detection in self.detections[0, 0, :, :]:
score = float(detection[2])
if score > self.Threshold:
x_min = int(detection[3] * self.image_w)
y_min = int(detection[4] * self.image_h)
x_max = int(detection[5] * self.image_w)
y_max = int(detection[6] * self.image_h)
cls_label = self.label_dict[int(detection[1])]
single_result = [x_min, y_min, x_max, y_max, cls_label,
float(score)]
self.result_array.append(single_result)
return self.result_array
def show_save_image(self, save_output: bool, output_dir: str):
""" To display the image after bounding box marking
Args:
save_output: Flag for saving the generated image or not
output_dir: Path in which the output should be saved
"""
final_img = self.result_array.copy()
if not self.result_array:
return final_img
else:
for image_id in range(len(self.result_array)):
x_min = self.result_array[image_id][0]
y_min = self.result_array[image_id][1]
x_max = self.result_array[image_id][2]
y_max = self.result_array[image_id][3]
cls = str(self.result_array[image_id][4])
score = str(np.round(self.result_array[image_id][-1], 2))
text = cls + ": " + score
cv2.rectangle(final_img, (x_min, y_min), (x_max, y_max),
(0, 255, 0), 1)
cv2.rectangle(final_img, (x_min, y_min - 20), (x_min, y_min),
(255, 255, 255), -1)
cv2.putText(final_img, text, (x_min + 5, y_min - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
if save_output:
cv2.imwrite(output_dir, final_img)
return final_img
| 41.232323 | 78 | 0.566144 | 514 | 4,082 | 4.307393 | 0.29572 | 0.054201 | 0.081301 | 0.054201 | 0.102078 | 0.092141 | 0.036134 | 0.036134 | 0 | 0 | 0 | 0.021477 | 0.349829 | 4,082 | 98 | 79 | 41.653061 | 0.812735 | 0.246203 | 0 | 0.033898 | 0 | 0 | 0.000688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.033898 | 0 | 0.20339 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4b1e6c3082e6ef4193d783b860478fded91a496 | 1,548 | py | Python | src/test/rdflib/__init__.py | sffjunkie/mogul | 1634fd3e630dd27e7d875cbd2f053e97eaa1da6f | [
"Apache-2.0"
] | null | null | null | src/test/rdflib/__init__.py | sffjunkie/mogul | 1634fd3e630dd27e7d875cbd2f053e97eaa1da6f | [
"Apache-2.0"
] | null | null | null | src/test/rdflib/__init__.py | sffjunkie/mogul | 1634fd3e630dd27e7d875cbd2f053e97eaa1da6f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2009-2014 Simon Kennedy <sffjunkie+code@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from lxml import etree
import rdflib
from mogul.misc.rdfetree import ETreeInputSource
NSMAP = {
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
}
def filename(name):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data', name)
def test_Empty_RDF():
tree = etree.parse(filename('empty.xmp'))
root = tree.xpath('rdf:RDF', namespaces=NSMAP)
g = rdflib.Graph()
g.parse(data=etree.tostring(root[0]), format='application/rdf+xml')
assert g is not None
def test_ETree_Plugin():
rdflib.plugin.register('etree', rdflib.parser.Parser, 'mogul.misc.rdfetree', 'RDFETreeParser')
tree = etree.parse(filename('empty.xmp'))
root = tree.xpath('rdf:RDF', namespaces=NSMAP)
source = ETreeInputSource(root)
g = rdflib.Graph()
g.parse(source=source, format='etree')
if __name__ == '__main__':
test_Empty_RDF()
test_ETree_Plugin()
| 30.96 | 98 | 0.709948 | 224 | 1,548 | 4.816964 | 0.53125 | 0.055607 | 0.024096 | 0.029657 | 0.151993 | 0.118628 | 0.118628 | 0.118628 | 0.118628 | 0.118628 | 0 | 0.017067 | 0.167313 | 1,548 | 50 | 99 | 30.96 | 0.820016 | 0.379199 | 0 | 0.24 | 0 | 0 | 0.160338 | 0 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.12 | false | 0 | 0.16 | 0.04 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4b219e46b871a781176af732787b6b6cfc80947 | 4,753 | py | Python | ChiEngProj.py | meson200/ChiEngProj | 9661f3d4ce66ddfc2b6e0c36a64bec67a31ef5aa | [
"MIT"
] | null | null | null | ChiEngProj.py | meson200/ChiEngProj | 9661f3d4ce66ddfc2b6e0c36a64bec67a31ef5aa | [
"MIT"
] | null | null | null | ChiEngProj.py | meson200/ChiEngProj | 9661f3d4ce66ddfc2b6e0c36a64bec67a31ef5aa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 23:25:47 2015
Data incubator challenge question
"predicting power usage for new home owners" version 1.0
based on 2010 usage and weather data in Chicago
@author: Sangkyu Lee
"""
import pandas as pd
import numpy as np
import requests
import json
import calendar
from ggplot import *
#matplotlib.style.use('ggplot')
##################subfunctions#######################################
# month name parsing
def IsItMonth(colname):
found = False
monthnames = [x.lower() for x in calendar.month_name[1:]]
found = any(s in colname for s in monthnames)
return found
# used as a key function to sort month columns
def MonthSorting(colname):
month_key = {m.lower(): i for i, m in enumerate(calendar.month_name[1:])}
for mon_name in month_key.keys():
if mon_name in colname:
ind_to_return = month_key[mon_name]
return ind_to_return
# returns the rows with extreme values (defined by deviation from mean)
def DetectOutlier(df,sigma):
from scipy import stats as stats
in_rows = (np.abs(stats.zscore(df)) < sigma).all(axis=1)
return in_rows
#####################################################################
# API data import
url = 'https://data.cityofchicago.org/resource/energy-usage-2010.json?'
# create a filter
filt = [
'$limit=50000',
#'&building_subtype=Multi+7%2B',
'&building_type=Residential',
#'&average_stories=2',
'&$where=occupied_units_percentage > 0.5']
token = 'TNukBspJMhzXso6cZ9guqb6w2'
r = requests.get(url+''.join(filt), headers={'X-App-Token':token})
print(r.status_code)
data_json = json.loads(r.text)
_data_raw = pd.DataFrame(data_json)
_data_raw = _data_raw.convert_objects(convert_numeric=True)
_data_raw.rename(columns={'term_april_2010': 'therm_april_2010'}, inplace=True)
data_raw = _data_raw[_data_raw.notnull().all(axis=1)] # remove NaN
#separate time series data for power and gas consumption
el_time_cols = [col for col in data_raw.columns if IsItMonth(col) & ('kwh' in col)]
gas_time_cols = [col for col in data_raw.columns if IsItMonth(col) & ('therm' in col)]
time_cols = [col for col in data_raw.columns if IsItMonth(col)]
# normalize the month-by-month consumption by square feet
data_raw.loc[:,el_time_cols] = data_raw[el_time_cols].div(data_raw['kwh_total_sqft'],axis='index')
data_raw.loc[:,gas_time_cols] = data_raw[gas_time_cols].div(data_raw['therms_total_sqft'],axis='index')
# factor into different occupancy factor
data_raw.loc[:,time_cols] = data_raw[time_cols].div(data_raw['occupied_units_percentage'],axis='index')
# remove outliers (defined here as deviation larger than 3sigma)
in_rows = DetectOutlier(data_raw[time_cols],3)
data_raw_2 = data_raw[in_rows]
# calculate monthly change in consumption
el_timeseries = pd.pivot_table(data_raw_2, values=el_time_cols, columns = 'building_subtype', aggfunc=np.average)
gas_timeseries = pd.pivot_table(data_raw_2, values=gas_time_cols, columns = 'building_subtype', aggfunc=np.average)
# sort rows
el_timeseries = el_timeseries.reindex(sorted(el_time_cols,key=MonthSorting))
gas_timeseries = gas_timeseries.reindex(sorted(gas_time_cols,key=MonthSorting))
# rearrange the data for plotting monthly data
el_timeseries.index = el_timeseries.index.map(lambda st: st.replace('kwh_',''))
el_timeseries.reset_index(level=0, inplace=True)
el_timeseries = el_timeseries.rename(columns = {'index':'month'})
el_timeseries['type'] = ['electricity']*12
el_timeseries_long = pd.melt(el_timeseries,id_vars = ['month','type'])
gas_timeseries.index = gas_timeseries.index.map(lambda st: st.replace('therm_',''))
gas_timeseries.reset_index(level=0, inplace=True)
gas_timeseries = gas_timeseries.rename(columns = {'index':'month'})
gas_timeseries['type'] = ['gas']*12
gas_timeseries_long = pd.melt(gas_timeseries,id_vars = ['month','type'])
frames = [gas_timeseries_long,el_timeseries_long]
timeseries_long = pd.concat(frames)
timeseries_long['month'] = timeseries_long['month'].map(lambda st: st.replace('_',' '))
timeseries_long['month'] = pd.to_datetime(timeseries_long['month'])
plot1 = ggplot(aes(x='month',y='value',colour='building_subtype'),timeseries_long) + \
geom_line() + \
facet_grid('type',scales='free_y') + \
ylab('average consumption per sqft') + \
scale_x_date(labels='%b %y',breaks=date_breaks('month'))
ggsave(plot1,'figure1.eps')
# scatterplot that shows the effect of building age on heat consumption
plot2 = ggplot(aes(x='kwh_july_2010', y='therm_january_2010',colour='average_age'), data=data_raw_2) + \
geom_point() + \
ylab('gas consumption per sqft, January 2010 (therm)') + \
xlab('electricity consumption per sqft, July 2010 (kwh)')
ggsave(plot2,'figure2.eps')
| 42.4375 | 115 | 0.723122 | 699 | 4,753 | 4.69671 | 0.341917 | 0.053305 | 0.01523 | 0.012793 | 0.194639 | 0.136765 | 0.136765 | 0.092903 | 0.042949 | 0.042949 | 0 | 0.019875 | 0.121397 | 4,753 | 111 | 116 | 42.81982 | 0.766284 | 0.184936 | 0 | 0 | 0 | 0 | 0.172803 | 0.029293 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.098592 | 0 | 0.183099 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4b498934bcc4f96ba6a32615a180933e174d2c4 | 1,697 | py | Python | setup.py | isabella232/resync | 6e9ddfa83087a0c122f72a6cc375c490f758b016 | [
"Apache-2.0"
] | 1 | 2016-11-30T18:08:02.000Z | 2016-11-30T18:08:02.000Z | setup.py | EHRI/resync | 6e9ddfa83087a0c122f72a6cc375c490f758b016 | [
"Apache-2.0"
] | 1 | 2021-06-22T08:24:40.000Z | 2021-06-22T08:24:40.000Z | setup.py | isabella232/resync | 6e9ddfa83087a0c122f72a6cc375c490f758b016 | [
"Apache-2.0"
] | 1 | 2021-06-22T08:22:25.000Z | 2021-06-22T08:22:25.000Z | from setuptools import setup
# setuptools used instead of distutils.core so that
# dependencies can be handled automatically
# Extract version number from resync/_version.py. Here we
# are very strict about the format of the version string
# as an extra sanity check. (Thanks for comments in
# http://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package )
import re
VERSIONFILE = "resync/_version.py"
verfilestr = open(VERSIONFILE, "rt").read()
match = re.search(r"^__version__ = '(\d\.\d.\d+(\.\d+)?)'", verfilestr, re.MULTILINE)
if match:
version = match.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE))
setup(
name='resync',
version=version,
packages=['resync'],
#scripts=['bin/resync', 'bin/resync-explorer'],
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent", # is this true? know Linux & OS X ok
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment"],
author='Simeon Warner',
author_email='simeon.warner@cornell.edu',
description='ResourceSync library and client',
#long_description=open('README').read(),
url='http://github.com/resync/resync',
install_requires=[
"requests",
"python-dateutil>=1.5"
],
test_suite="resync.test",
)
| 39.465116 | 95 | 0.64231 | 193 | 1,697 | 5.595855 | 0.678756 | 0.036111 | 0.027778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008346 | 0.223335 | 1,697 | 42 | 96 | 40.404762 | 0.811077 | 0.274602 | 0 | 0 | 0 | 0 | 0.470106 | 0.038493 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4b57c305f668f559a512cca575b3c48c7b7230f | 2,966 | py | Python | tests/test_config.py | ARMmbed/snippet | e186338ceeca8727b1dc0843f22c5cc486c00045 | [
"Apache-2.0"
] | 4 | 2018-11-09T13:51:07.000Z | 2022-03-02T08:16:16.000Z | tests/test_config.py | ARMmbed/snippet | e186338ceeca8727b1dc0843f22c5cc486c00045 | [
"Apache-2.0"
] | 2 | 2020-04-09T07:38:44.000Z | 2020-04-09T07:49:32.000Z | tests/test_config.py | ARMmbed/snippet | e186338ceeca8727b1dc0843f22c5cc486c00045 | [
"Apache-2.0"
] | 15 | 2019-02-03T12:10:44.000Z | 2022-03-02T20:40:32.000Z | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
import os
import shutil
import filecmp
import subprocess
import sys
import textwrap
import unittest
from snippet import config as snippet_config
from tests import tmp_test_dir
from tests import sample_input_dir
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
# use a plain directory not-really-in-tmp to avoid cross-process perms issues in windows
os.makedirs(tmp_test_dir)
cls.tmp_fp = os.path.join(tmp_test_dir, "config.toml")
with open(cls.tmp_fp, "w", encoding="utf8") as fh:
fh.write(
textwrap.dedent(
"""
[snippet]
# an example: this config is itself an example
input_glob = "does not match anything"
stop_on_first_failure = true
end_flag = "custom value"
foo = "bar"
fizz = "buzz"
"""
).lstrip()
)
cls.tmp_fp_2 = os.path.join(tmp_test_dir, "config2.toml")
with open(cls.tmp_fp_2, "w", encoding="utf8") as fh:
fh.write(
textwrap.dedent(
"""
[snippet]
input_glob = "config.toml"
foo = "baz"
"""
).lstrip()
)
@classmethod
def tearDownClass(cls):
shutil.rmtree(tmp_test_dir)
def test_config_from_file(self):
# explicitly load config from a file
config = snippet_config.get_config(config_paths=[self.tmp_fp])
self.assertEqual(config.end_flag, "custom value")
self.assertEqual(config.foo, "bar")
self.assertEqual(config.fizz, "buzz")
def test_config_from_multi_globs(self):
# explicitly load from two files
config = snippet_config.get_config(config_paths=[self.tmp_fp, self.tmp_fp_2])
self.assertEqual(config.foo, "baz")
self.assertEqual(config.fizz, "buzz")
def test_config_from_cli(self):
# load config when run as a module
subprocess.check_call(
[
sys.executable,
"-m",
"snippet",
str(tmp_test_dir),
"--config",
str(self.tmp_fp),
"--config",
str(self.tmp_fp_2),
],
stderr=subprocess.STDOUT,
)
self.assertTrue(
filecmp.cmp(
os.path.join(tmp_test_dir, "this_config_is_itself_an_example.md"),
os.path.join(sample_input_dir, "config_fixture.md"),
shallow=False,
)
)
def test_auto_config(self):
# load config, without explicitly setting the config path
config = snippet_config.get_config()
self.assertEqual(config.end_flag, "custom value")
self.assertEqual(config.fizz, "buzz")
| 29.366337 | 96 | 0.567094 | 345 | 2,966 | 4.681159 | 0.35942 | 0.027864 | 0.043344 | 0.024149 | 0.388854 | 0.344272 | 0.248916 | 0.248916 | 0.248916 | 0.19195 | 0 | 0.006602 | 0.336143 | 2,966 | 100 | 97 | 29.66 | 0.813611 | 0.110249 | 0 | 0.238095 | 0 | 0 | 0.067797 | 0.015611 | 0 | 0 | 0 | 0 | 0.126984 | 1 | 0.095238 | false | 0 | 0.15873 | 0 | 0.269841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4b6b4138ca66bccd63341d93918ed34f71c9d55 | 6,705 | py | Python | Train.py | lythings/FaceEmotionCamera | 430545201a2ea2d1423ed5509d882f00f9d7dba6 | [
"MIT"
] | null | null | null | Train.py | lythings/FaceEmotionCamera | 430545201a2ea2d1423ed5509d882f00f9d7dba6 | [
"MIT"
] | null | null | null | Train.py | lythings/FaceEmotionCamera | 430545201a2ea2d1423ed5509d882f00f9d7dba6 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from tqdm import tqdm
import matplotlib.pyplot as plt
class DataSet(torch.utils.data.Dataset):
def __init__(self, dataPath, transform=None):
self.dataset = pd.read_csv(dataPath)
self.dataset = self.dataset[["emotion", "pixels"]] # 获取了相应的数据集了
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index): # 可以根据下表取数据
emotion = self.dataset.loc[index, "emotion"]
pixels = self.dataset.loc[index, "pixels"] # 2304 = 48 * 48
pixels = pixels.split(" ")
pixels = list(map(float, pixels))
pixels = torch.Tensor(pixels).reshape(48, 48)
if self.transform:
pixels = self.transform(pixels)
# emotion_onehot = np.zeros(7,1)
# emotion_onehot[emotion][0] = 1
# emotion_onehot = torch.Tensor(emotion_onehot)
return pixels, emotion
TheTransform = torchvision.transforms.Compose([ # 处理工作
torchvision.transforms.ToPILImage(), # 转化为PIL Image
# torchvision.transforms.Grayscale(),
# torchvision.transforms.Resize(224),
torchvision.transforms.RandomHorizontalFlip(), # 随机翻转一下
torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5), # 随机调整亮度和对比度
torchvision.transforms.ToTensor(), # 再变回tensor
])
def vgg_block(num_convs, in_channels, out_channels):#定义产生VGG块的东西
blk = []
for i in range(num_convs):
if i == 0:
blk.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
else:
blk.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
blk.append(nn.ReLU())
# 宽高减半
blk.append(nn.MaxPool2d(kernel_size=2, stride=2))
return nn.Sequential(*blk)
class FlattenLayer(nn.Module): #设置个摊平的
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x):
x = x.view(x.size(0), -1)
return x #按照我的dataset的话应该是(batch_size,emotion,pixels)的样子?
def VGG(conv_arch, fc_features, fc_hidden_units = 4096):
net = nn.Sequential()
for i,(num_convs, in_channels, out_channels) in enumerate(conv_arch): #加VGG块了
net.add_module("vggBlock_" + str(i + 1), vgg_block(num_convs, in_channels, out_channels))
net.add_module("fc", nn.Sequential(FlattenLayer(),
nn.Linear(fc_features, fc_hidden_units),
nn.ReLU(), #使用LeajyReLu,虽然不确定到底好不好用,但是感觉比ReLu高级一些
nn.Dropout(0.5),
nn.Linear(fc_hidden_units, fc_hidden_units),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_hidden_units, 7))) #输出成7个感情
return net
Trainloss = []
TrainAcc = []
ValAcc = []
def train(train_iter, test_iter, net, optimzer, device, num_epochs):
axis_x = range(1, num_epochs + 1) #画图用
net = net.to(device)
loss = nn.CrossEntropyLoss()
batch_count = 0
for epoch in range(1, num_epochs + 1):
train_loss_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time() #计时
for X, y in train_iter:
X = X.to(device)
y = y.to(device)
# print(X.shape)
y_hat = net(X)
l = loss(y_hat, y)
optimzer.zero_grad()
l.backward()
optimzer.step()
train_loss_sum += l.cpu().item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
ValAcc.append(test_acc)
TrainAcc.append(train_acc_sum / n)
Trainloss.append(train_loss_sum / batch_count)
print('epoch {}, loss {:.4f}, train acc {:.4f}, test acc{:.4f}, time {:.2f} sec'
.format(epoch, train_loss_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
torch.save(model, '/content/drive/My Drive/Emotion/model' + str(epoch) +".pth")
plt.plot(axis_x,Trainloss,label = "TrainLoss",color = "r" )
plt.plot(axis_x,TrainAcc,label = "TrainAcc",color = "b" )
plt.plot(axis_x,ValAcc,label = "ValAcc",color = "g" )
plt.xlabel('Epoch')
plt.title('Result of my train')
plt.legend()
plt.show()
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, torch.nn.Module):
# 如果没指定device就使用net的device
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
with torch.no_grad():
for X, y in data_iter:
if isinstance(net, torch.nn.Module):
# 评估模式, 关闭dropout
net.eval()
acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
# 改回训练模式
net.train()
else:
if ('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数
# 将is_training设置成False
acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
if __name__ == '__main__': #不加这个不可以多进程读取DataSet
DEVICE = "cuda"
batch_size = 64
dataset_train = DataSet("/content/drive/My Drive/Emotion/data2/data/Train.csv", transform=TheTransform)
dataset_vali = DataSet("/content/drive/My Drive/Emotion/data2/data/Val.csv", transform=TheTransform)
DataLoader_train = torch.utils.data.DataLoader(dataset=dataset_train,
batch_size=batch_size,
shuffle=True,
num_workers=2)
DataLoader_vali = torch.utils.data.DataLoader(dataset=dataset_vali,
batch_size=batch_size,
shuffle=True,
num_workers=2)
conv_arch = ((2, 1, 32), (2, 32, 64), (1, 64, 128))
# 经过3个vgg_block, 宽高会减半3次, 变成 48 / 8 = 6
fc_features = 128 * 6 * 6 # c * w * h 128是进过VGG后的通道数
fc_hidden_units = 1024
num_epochs = 30
model = VGG(conv_arch, fc_features, fc_hidden_units).to(DEVICE)
lr = 0.001
optimizer = optim.Adam(model.parameters(), lr=lr)
train(DataLoader_train, DataLoader_vali, model, optimizer, DEVICE, num_epochs) | 43.258065 | 107 | 0.579717 | 822 | 6,705 | 4.542579 | 0.281022 | 0.014462 | 0.024371 | 0.022496 | 0.219336 | 0.159882 | 0.131762 | 0.119443 | 0.038565 | 0 | 0 | 0.024602 | 0.296793 | 6,705 | 155 | 108 | 43.258065 | 0.767338 | 0.085011 | 0 | 0.11194 | 0 | 0.007463 | 0.053244 | 0.010813 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067164 | false | 0 | 0.067164 | 0.007463 | 0.19403 | 0.007463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4b8ba2e649969fc6bb6f38a691b8170998175d9 | 1,579 | py | Python | behaviour-analysis-pipeline.py | zhen-lab/behaviour-analysis | 14cddaa342ad04538428faba1f91a4f34ab7b1e2 | [
"MIT"
] | null | null | null | behaviour-analysis-pipeline.py | zhen-lab/behaviour-analysis | 14cddaa342ad04538428faba1f91a4f34ab7b1e2 | [
"MIT"
] | 7 | 2020-02-07T14:16:48.000Z | 2020-02-25T20:43:03.000Z | behaviour-analysis-pipeline.py | zhen-lab/behaviour-analysis | 14cddaa342ad04538428faba1f91a4f34ab7b1e2 | [
"MIT"
] | null | null | null | import cv2
import glob
import os
import shutil
from tierpsy.processing.processMultipleFilesFun import processMultipleFilesFun
from tierpsy.summary.collect import calculate_summaries
path = './data/jpeg-30s/'
img_extension = "*.jpg"
fps = 10
masked_video_dir = path + 'MaskedVideos'
results_dir = path + 'Results'
parameters_file = path + 'parameters.json'
try:
shutil.rmtree(masked_video_dir)
shutil.rmtree(results_dir)
except OSError as e:
print("error couldnt delete files")
def file_name_str_to_int(f_path):
f_name = os.path.basename(f_path)
str_key, _ = os.path.splitext(f_name)
return int(str_key)
img_files = glob.glob(path + img_extension)
img_files.sort(key=file_name_str_to_int)
images = [cv2.imread(file) for file in img_files]
width, height, layers = images[0].shape
video = cv2.VideoWriter(path + "test.avi", cv2.VideoWriter_fourcc(*'XVID'), fps, (height, width))
for img in images:
video.write(img)
video.release()
# code to call tierpsy batch processing
processMultipleFilesFun(path, masked_video_dir, results_dir,
'', parameters_file, '', '*.avi', '', 3, 10.0, False,
'COMPRESS', 'FEAT_TIERPSY', False,
['COMPRESS', 'TRAJ_CREATE', 'TRAJ_JOIN', 'SKE_INIT', 'BLOB_FEATS', 'SKE_CREATE', 'SKE_FILT', 'SKE_ORIENT', 'INT_PROFILE', 'INT_SKE_ORIENT', 'FEAT_INIT', 'FEAT_TIERPSY'],
False, False, True
)
# code to generate results csv
fold_args = dict(n_folds = 5, frac_worms_to_keep = 0.8, time_sample_seconds = 600.0)
calculate_summaries(
path,
'tierpsy',
'plate',
False,
True,
**fold_args
) | 26.762712 | 173 | 0.721343 | 224 | 1,579 | 4.839286 | 0.459821 | 0.030443 | 0.038745 | 0.023985 | 0.02952 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014903 | 0.150095 | 1,579 | 59 | 174 | 26.762712 | 0.792847 | 0.041799 | 0 | 0 | 0 | 0 | 0.165453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.136364 | 0 | 0.181818 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4c15794d223dfbfa08064e0cd66ab6569b6d28b | 3,353 | py | Python | tests/attr/core/strategies/test_margin_sampling.py | nocotan/orakl | b524bc311f008b7ac46f5c289e4cc86322f4c5e3 | [
"Apache-2.0"
] | null | null | null | tests/attr/core/strategies/test_margin_sampling.py | nocotan/orakl | b524bc311f008b7ac46f5c289e4cc86322f4c5e3 | [
"Apache-2.0"
] | null | null | null | tests/attr/core/strategies/test_margin_sampling.py | nocotan/orakl | b524bc311f008b7ac46f5c289e4cc86322f4c5e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
from orakl.attr import MarginSampling
from ...helpers.utils import BaseTest
class Test(BaseTest):
def test_call_with_empty_data_pool(self):
ms = MarginSampling()
model = tf.keras.Model()
with self.assertRaises(AssertionError):
ms(model)
def test_call_with_random_data_pool(self):
n_samples = 10
n_classes = 3
ms = MarginSampling()
initializer = tf.initializers.he_normal(seed=0)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
n_classes,
input_shape=(10,),
kernel_initializer=initializer),
])
loss_function = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
data_pool = np.random.rand(100, 10)
indexes, samples = ms(model,
loss_function=loss_function,
n_classes=n_classes,
data_pool=data_pool,
n_samples=n_samples)
assert(len(indexes) == n_samples)
assert(len(samples) == n_samples)
def test_call_with_multi_dim_data(self):
n_samples = 10
n_classes = 3
ms = MarginSampling()
initializer = tf.initializers.he_normal(seed=0)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
n_classes,
input_shape=(10, 10, ),
kernel_initializer=initializer),
])
loss_function = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
data_pool = np.random.rand(100, 10, 10)
indexes, samples = ms(model,
loss_function=loss_function,
n_classes=n_classes,
data_pool=data_pool,
n_samples=n_samples)
assert(len(indexes) == n_samples)
assert(len(samples) == n_samples)
def test_repr(self):
state = {
"data_pool": np.random.rand(100, 10, 10),
"excluded_indexes": [0, 1, 2],
"loss_function": None,
"n_classes": 10,
"n_samples": 5,
"batch_size": 10,
}
s = "\n================================"
s += "\ndata_pool: {}".format(len(state["data_pool"]))
s += "\nexcluded_indexes: {}".format(state["excluded_indexes"])
s += "\nloss_function: {}".format(state["loss_function"])
s += "\nn_classes: {}".format(state["n_classes"])
s += "\nn_samples: {}".format(state["n_samples"])
s += "\nbatch_size: {}".format(state["batch_size"])
s += "\n================================"
ms = MarginSampling(
data_pool=state["data_pool"],
excluded_indexes=state["excluded_indexes"],
loss_function=state["loss_function"],
n_classes=state["n_classes"],
n_samples=state["n_samples"],
batch_size=state["batch_size"],
)
assert(repr(ms) == s)
| 31.632075 | 71 | 0.532061 | 350 | 3,353 | 4.857143 | 0.228571 | 0.065882 | 0.030588 | 0.04 | 0.560588 | 0.560588 | 0.560588 | 0.560588 | 0.544706 | 0.544706 | 0 | 0.018817 | 0.334327 | 3,353 | 105 | 72 | 31.933333 | 0.742832 | 0.006263 | 0 | 0.5 | 0 | 0 | 0.110477 | 0.020414 | 0 | 0 | 0 | 0 | 0.073171 | 1 | 0.04878 | false | 0 | 0.04878 | 0 | 0.109756 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4c5a0efbd3a143079b5efd4a1992af1f5fc5e31 | 13,196 | py | Python | openpnm/models/physics/diffusive_conductance.py | edgargmartinez/OpenPNM | c68745993b3e9895f53938164a9cf6305500748e | [
"MIT"
] | 3 | 2019-07-17T01:35:09.000Z | 2021-05-08T02:03:35.000Z | openpnm/models/physics/diffusive_conductance.py | edgargmartinez/OpenPNM | c68745993b3e9895f53938164a9cf6305500748e | [
"MIT"
] | null | null | null | openpnm/models/physics/diffusive_conductance.py | edgargmartinez/OpenPNM | c68745993b3e9895f53938164a9cf6305500748e | [
"MIT"
] | null | null | null | r"""
.. autofunction:: openpnm.models.physics.diffusive_conductance.ordinary_diffusion
.. autofunction:: openpnm.models.physics.diffusive_conductance.taylor_aris_diffusion
.. autofunction:: openpnm.models.physics.diffusive_conductance.generic_conductance
"""
import scipy as _sp
def ordinary_diffusion(target,
pore_area='pore.area',
throat_area='throat.area',
pore_diffusivity='pore.diffusivity',
throat_diffusivity='throat.diffusivity',
conduit_lengths='throat.conduit_lengths',
conduit_shape_factors='throat.poisson_shape_factors'):
r"""
Calculate the diffusive conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
pore_diffusivity : string
Dictionary key of the pore diffusivity values
throat_diffusivity : string
Dictionary key of the throat diffusivity values
conduit_lengths : string
Dictionary key of the conduit length values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing diffusive conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
"""
return generic_conductance(target=target, transport_type='diffusion',
pore_area=pore_area,
throat_area=throat_area,
pore_diffusivity=pore_diffusivity,
throat_diffusivity=throat_diffusivity,
conduit_lengths=conduit_lengths,
conduit_shape_factors=conduit_shape_factors)
def taylor_aris_diffusion(
target,
pore_area='pore.area',
throat_area='throat.area',
pore_diffusivity='pore.diffusivity',
pore_pressure='pore.pressure',
throat_hydraulic_conductance='throat.hydraulic_conductance',
throat_diffusivity='throat.diffusivity',
conduit_lengths='throat.conduit_lengths',
conduit_shape_factors='throat.poisson_shape_factors'):
r"""
Calculate the diffusive conductance of conduits in network considering the
Taylor-Aris effect (effect of fluid flow on diffusion), where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
pore_diffusivity : string
Dictionary key of the pore diffusivity values
pore_pressure : string
Dictionary key of the pore pressure values
throat_hydraulic_conductance : string
Dictionary key of the throat hydraulic_conductance values
throat_diffusivity : string
Dictionary key of the throat diffusivity values
conduit_lengths : string
Dictionary key of the conduit length values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing diffusive conductance values (with Taylor-Aris effect)
for conduits in the geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties are
already calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
"""
return generic_conductance(
target=target,
transport_type='taylor_aris_diffusion',
pore_area=pore_area,
throat_area=throat_area,
pore_diffusivity=pore_diffusivity,
throat_diffusivity=throat_diffusivity,
conduit_lengths=conduit_lengths,
conduit_shape_factors=conduit_shape_factors,
pore_pressure=pore_pressure,
throat_hydraulic_conductance=throat_hydraulic_conductance)
def generic_conductance(target, transport_type, pore_area, throat_area,
pore_diffusivity, throat_diffusivity,
conduit_lengths, conduit_shape_factors, **kwargs):
r"""
Calculate the generic conductance (could be mass, thermal, electrical,
ionic, or hydraylic) of conduits in the network, where a conduit is
( 1/2 pore - full throat - 1/2 pore ).
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
transport_type : string
Dictionary key of the transport type
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
pore_diffusivity : string
Dictionary key of the pore diffusivity values
throat_diffusivity : string
Dictionary key of the throat diffusivity values
conduit_lengths : string
Dictionary key of the conduit length values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing conductance values for conduits in the geometry
attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper shape factor.
(4) shape_factor depends on the physics of the problem, i.e. diffusion-like
processes and fluid flow need different shape factors.
"""
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
phase = target.project.find_phase(target)
cn = network['throat.conns'][throats]
# Getting equivalent areas
A1 = network[pore_area][cn[:, 0]]
At = network[throat_area][throats]
A2 = network[pore_area][cn[:, 1]]
# Getting conduit lengths
L1 = network[conduit_lengths + '.pore1'][throats]
Lt = network[conduit_lengths + '.throat'][throats]
L2 = network[conduit_lengths + '.pore2'][throats]
# Preallocating g
g1, g2, gt = _sp.zeros((3, len(Lt)))
# Setting g to inf when Li = 0 (ex. boundary pores)
# INFO: This is needed since area could also be zero, which confuses NumPy
m1, m2, mt = [Li != 0 for Li in [L1, L2, Lt]]
g1[~m1] = g2[~m2] = gt[~mt] = _sp.inf
# Getting shape factors
try:
SF1 = phase[conduit_shape_factors+'.pore1'][throats]
SFt = phase[conduit_shape_factors+'.throat'][throats]
SF2 = phase[conduit_shape_factors+'.pore2'][throats]
except KeyError:
SF1 = SF2 = SFt = 1.0
# Interpolate pore phase property values to throats
try:
Dt = phase[throat_diffusivity][throats]
except KeyError:
Dt = phase.interpolate_data(propname=pore_diffusivity)[throats]
try:
D1 = phase[pore_diffusivity][cn[:, 0]]
D2 = phase[pore_diffusivity][cn[:, 1]]
except KeyError:
D1 = phase.interpolate_data(propname=throat_diffusivity)[cn[:, 0]]
D2 = phase.interpolate_data(propname=throat_diffusivity)[cn[:, 1]]
# Find g for half of pore 1, throat, and half of pore 2
if transport_type == 'diffusion':
g1[m1] = (D1*A1)[m1] / L1[m1]
g2[m2] = (D2*A2)[m2] / L2[m2]
gt[mt] = (Dt*At)[mt] / Lt[mt]
elif transport_type == 'taylor_aris_diffusion':
for k, v in kwargs.items():
if k == 'pore_pressure':
pore_pressure = v
elif k == 'throat_hydraulic_conductance':
throat_hydraulic_conductance = v
P = phase[pore_pressure]
gh = phase[throat_hydraulic_conductance]
Qt = -gh*_sp.diff(P[cn], axis=1).squeeze()
u1 = Qt[m1]/A1[m1]
u2 = Qt[m2]/A2[m2]
ut = Qt[mt]/At[mt]
Pe1 = u1 * ((4*A1[m1]/_sp.pi)**0.5) / D1[m1]
Pe2 = u2 * ((4*A2[m2]/_sp.pi)**0.5) / D2[m2]
Pet = ut * ((4*At[mt]/_sp.pi)**0.5) / Dt[mt]
g1[m1] = D1[m1]*(1+(Pe1**2)/192)*A1[m1] / L1[m1]
g2[m2] = D2[m2]*(1+(Pe2**2)/192)*A2[m2] / L2[m2]
gt[mt] = Dt[mt]*(1+(Pet**2)/192)*At[mt] / Lt[mt]
else:
raise Exception('Unknown keyword for "transport_type", can only be' +
' "diffusion" or "taylor_aris_diffusion"')
# Apply shape factors and calculate the final conductance
return (1/gt/SFt + 1/g1/SF1 + 1/g2/SF2)**(-1)
def classic_ordinary_diffusion(target,
pore_molar_density='pore.molar_density',
pore_diffusivity='pore.diffusivity',
pore_area='pore.area',
pore_diameter='pore.diameter',
throat_area='throat.area',
throat_length='throat.length',
throat_diameter='throat.diameter',
shape_factor='throat.shape_factor',
**kwargs):
r"""
Calculate the diffusive conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ) based on the areas
Parameters
----------
network : OpenPNM Network Object
phase : OpenPNM Phase Object
The phase of interest
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
"""
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
phase = target.project.find_phase(target)
# Get Nt-by-2 list of pores connected to each throat
Ps = network['throat.conns']
# Get properties in every pore in the network
parea = network[pore_area]
pdia = network[pore_diameter]
# Get the properties of every throat
tdia = network[throat_diameter]
tarea = _sp.pi * (tdia / 2) ** 2
tlen = network[throat_length]
# Interpolate pore phase property values to throats
DABt = phase.interpolate_data(propname=pore_diffusivity)[throats]
ct = phase.interpolate_data(propname=pore_molar_density)[throats]
# Get pore lengths
plen1 = (0.5 * pdia[Ps[:, 0]])
plen2 = (0.5 * pdia[Ps[:, 1]])
# Remove any non-positive lengths
plen1[plen1 <= 1e-12] = 1e-12
plen2[plen2 <= 1e-12] = 1e-12
# Find g for half of pore 1
gp1 = ct * DABt * parea[Ps[:, 0]] / plen1
gp1[_sp.isnan(gp1)] = _sp.inf
gp1[~(gp1 > 0)] = _sp.inf # Set 0 conductance pores (boundaries) to inf
# Find g for half of pore 2
gp2 = ct * DABt * parea[Ps[:, 1]] / plen2
gp2[_sp.isnan(gp2)] = _sp.inf
gp2[~(gp2 > 0)] = _sp.inf # Set 0 conductance pores (boundaries) to inf
# Find g for full throat, remove any non-positive lengths
tlen[tlen <= 0] = 1e-12
# Get shape factor
try:
sf = network[shape_factor]
except KeyError:
sf = _sp.ones(network.num_throats())
sf[_sp.isnan(sf)] = 1.0
gt = (1 / sf) * ct * DABt * tarea / tlen
# Set 0 conductance pores (boundaries) to inf
gt[~(gt > 0)] = _sp.inf
value = (1 / gt + 1 / gp1 + 1 / gp2) ** (-1)
return value
| 37.91954 | 84 | 0.646863 | 1,660 | 13,196 | 5.024096 | 0.151205 | 0.014988 | 0.047842 | 0.052878 | 0.691966 | 0.663189 | 0.636691 | 0.573022 | 0.573022 | 0.573022 | 0 | 0.022144 | 0.267657 | 13,196 | 347 | 85 | 38.028818 | 0.840853 | 0.479994 | 0 | 0.278571 | 0 | 0 | 0.097708 | 0.035169 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.007143 | 0 | 0.064286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4c5e19a5fb4217eaf5c3e579db4be69866e0425 | 2,281 | py | Python | app/caffeine.py | pknn1/radii | 29a55161e283e972545f2fa6ab86eb06162aeb8e | [
"MIT"
] | null | null | null | app/caffeine.py | pknn1/radii | 29a55161e283e972545f2fa6ab86eb06162aeb8e | [
"MIT"
] | 65 | 2018-10-17T09:13:21.000Z | 2019-05-12T15:27:28.000Z | app/caffeine.py | pknn1/radii | 29a55161e283e972545f2fa6ab86eb06162aeb8e | [
"MIT"
] | 2 | 2018-11-28T20:42:58.000Z | 2019-10-26T07:31:25.000Z | import logging
import os
import tqdm
import codecs
import h5py
from scipy.sparse import coo_matrix, csr_matrix
from implicit.als import AlternatingLeastSquares
import numpy as np
log = logging.getLogger("implicit")
def calculate_similar_event(path, output_filename):
model = AlternatingLeastSquares()
a, b = read_event_data(path)
event, users = hfd5_from_dataframe(a, b, output_filename)
users.eliminate_zeros()
users.data = np.ones(len(users.data))
log.info("Start fitting")
model.fit(users)
user_count = np.ediff1d(users.indptr)
to_generate = sorted(np.arange(len(event)), key=lambda x: -user_count[x])
with tqdm.tqdm(total=len(to_generate)) as progress:
with codecs.open(output_filename, "w", "utf-8") as o:
for eventid in to_generate:
if users.indptr[eventid] != users.indptr[eventid + 1]:
name = event[eventid]
for other, score in model.similar_items(
eventid, int(len(event) * 2 / 3)
):
o.write(f"{name},{event[other]},{score}\n")
progress.update(1)
def read_event_data(path):
import pandas
users = pandas.read_csv(os.path.join(path, "likes.csv"))
event = pandas.read_csv(os.path.join(path, "events.csv"))
print(users.columns.tolist())
return users, event
def hfd5_from_dataframe(users, event, output_filename):
print(users.columns.tolist())
m = coo_matrix(
((users["like"].astype(np.int32)), (users["eventID"], users["userID"]))
).tocsr()
with h5py.File(output_filename, "w") as f:
g = f.create_group("users")
g.create_dataset("data", data=m.data)
g.create_dataset("indptr", data=m.indptr)
g.create_dataset("indices", data=m.indices)
name = np.empty(shape=(event.eventID.max() + 1,), dtype=np.object)
name[event.eventID] = event.name
dt = h5py.special_dtype(vlen=str)
dset = f.create_dataset("event", (len(name),), dtype=dt)
dset[:] = name
plays = csr_matrix((g.get("data"), g.get("indices"), g.get("indptr")))
return np.array(f["event"]), plays
# return f
calculate_similar_event("./data", "similar-event.csv")
| 29.623377 | 79 | 0.624288 | 303 | 2,281 | 4.580858 | 0.369637 | 0.050432 | 0.030259 | 0.024496 | 0.038905 | 0.038905 | 0.038905 | 0 | 0 | 0 | 0 | 0.008005 | 0.233231 | 2,281 | 76 | 80 | 30.013158 | 0.785592 | 0.003507 | 0 | 0.037736 | 0 | 0 | 0.073536 | 0.01365 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056604 | false | 0 | 0.169811 | 0 | 0.264151 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4c923ce3d32af3eeec668c17548a734deee2fca | 5,563 | py | Python | .ipynb_checkpoints/quickr-checkpoint.py | victorfica/utils | b61935a860838a0e70afde7c9ecf2c68f51a2c4b | [
"MIT"
] | 5 | 2015-12-16T01:23:07.000Z | 2020-04-27T11:41:43.000Z | .ipynb_checkpoints/quickr-checkpoint.py | victorfica/utils | b61935a860838a0e70afde7c9ecf2c68f51a2c4b | [
"MIT"
] | 1 | 2021-05-06T23:47:20.000Z | 2021-05-06T23:48:33.000Z | .ipynb_checkpoints/quickr-checkpoint.py | victorfica/utils | b61935a860838a0e70afde7c9ecf2c68f51a2c4b | [
"MIT"
] | 6 | 2016-04-29T14:04:22.000Z | 2021-05-06T23:49:34.000Z | import subprocess
import pandas as pd
import tempfile
import os
__all__ = ['runRscript']
def runRscript(Rcmd, inDf=None, outputFiles=0, removeTempFiles=None):
"""Runs an R cmd with option to provide a DataFrame as input and file
as output.
Params
------
Rcmd : str
String containing the R-script to run.
inDf : pd.DataFrame or list of pd.DataFrame's
Data to be passed to the R script via a CSV file.
Object should be referenced in the script as "INPUTDF" or "INPUTDF0" etc. if list
outputFiles : int
Number of output CSV files available for writing by the R-script.
The contents of the file are returned as a pd.DataFrame.
File name should be referenced as "OUTPUTFNX" in the R-script
removeTempFiles : True, False or None
For debugging. If True then the temporary script and data files will
always be removed. If None then they will be removed if there is not an error.
If False they will not be removed.
Returns
-------
stdout : str
Output of the R-script at the terminal (including stderr)
output : pd.DataFrame or list of pd.DataFrames
Optionally, the contents of CSV file(s) written by the R-script as a pd.DataFrame"""
"""Write data to a tempfile if required"""
if not inDf is None:
if not type(inDf) is list:
inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput-', text=True)
readCmd = 'INPUTDF <- read.csv("%s")\n' % inputFn
Rcmd = readCmd + Rcmd
os.close(inputH)
inDf.to_csv(inputFn)
else:
inputFilenames = []
for i, idf in enumerate(inDf):
inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput%d-' % i, text=True)
readCmd = 'INPUTDF%d <- read.csv("%s")\n' % (i, inputFn)
Rcmd = readCmd + Rcmd
os.close(inputH)
idf.to_csv(inputFn)
inputFilenames.append(inputFn)
"""Set up an output file if required"""
outFn = []
for outi in range(outputFiles):
outputH, outputFn = tempfile.mkstemp(suffix='.txt', prefix='tmp-Routput-', text=True)
outCmd = 'OUTPUTFN%d <- "%s"\n' % (outi, outputFn)
Rcmd = outCmd + Rcmd
outFn.append(outputFn)
os.close(outputH)
"""Write script to tempfile"""
scriptH, scriptFn = tempfile.mkstemp(suffix='.R', prefix='tmp-Rscript-', text=True)
with open(scriptFn, 'w') as fh:
fh.write(Rcmd)
os.close(scriptH)
"""Run the R script and collect output"""
try:
cmdList = ['Rscript', '--vanilla', scriptFn]
res = subprocess.check_output(cmdList, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
res = bytes('STDOUT:\n%s\nSTDERR:\n%s' % (e.stdout, e.stderr), 'utf-8')
print('R process returned an error')
if removeTempFiles is None:
print('Leaving tempfiles for debugging.')
print(' '.join(cmdList))
if not inDf is None:
print(inputFn)
for outputFn in outFn:
print(outputFn)
removeTempFiles = False
"""Read the ouptfile if required"""
outDf = []
for outputFn in outFn:
try:
tmp = pd.read_csv(outputFn)
outDf.append(tmp)
except:
print('Cannot read output CSV: reading as text (%s)' % outputFn)
with open(outputFn, 'r') as fh:
tmp = fh.read()
if len(tmp) == 0:
print('Output file is empty! (%s)' % outputFn)
tmp = None
outDf.append(tmp)
# outDf = [pd.read_csv(outputFn) for outputFn in outFn]
if len(outDf) == 0:
outDf = None
elif len(outDf) == 1:
outDf = outDf[0]
"""Cleanup the temporary files"""
if removeTempFiles is None or removeTempFiles:
os.remove(scriptFn)
if not inDf is None:
if not type(inDf) is list:
os.remove(inputFn)
else:
for inputFn in inputFilenames:
os.remove(inputFn)
else:
print('Leaving tempfiles for debugging.')
print(' '.join(cmdList))
if not inDf is None:
print(inputFn)
for outputFn in outFn:
print(outputFn)
if outputFiles == 0:
return res.decode('utf-8')
else:
return res.decode('utf-8'), outDf
def _test_simple():
Rcmd = """ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
anova(lm.D9)
summary(lm.D90)"""
res = runRscript(Rcmd)
print(res)
def _test_io():
ctrl = [4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14]
trt = [4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69]
inDf = pd.DataFrame({'weight':ctrl + trt,
'group': ['Ctl']*len(ctrl) + ['Trt']*len(trt)})
Rcmd = """print(head(INPUTDF))
lm.D9 <- lm(weight ~ group, data=INPUTDF)
lm.D90 <- lm(weight ~ group - 1, data=INPUTDF) # omitting intercept
anova(lm.D9)
summary(lm.D90)
write.csv(data.frame(summary(lm.D90)$coefficients), OUTPUTFN)
"""
res, outputFile = runRscript(Rcmd, inDf=inDf, outputFiles=1)
print(res)
print(outputFile)
| 35.433121 | 104 | 0.577926 | 763 | 5,563 | 4.196592 | 0.266055 | 0.008745 | 0.021861 | 0.013741 | 0.252967 | 0.230481 | 0.205497 | 0.183635 | 0.1599 | 0.12742 | 0 | 0.038776 | 0.295344 | 5,563 | 156 | 105 | 35.660256 | 0.778061 | 0.190545 | 0 | 0.321101 | 0 | 0.018349 | 0.216519 | 0.042731 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027523 | false | 0 | 0.036697 | 0 | 0.082569 | 0.137615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4c9ce487e10dba3071c2961a4b998890acc04b4 | 1,790 | py | Python | website/trafficlights/__init__.py | matthewrkitson/traffic-lights | 4a469fe9e2b78d140f79e411f57f73a10161608c | [
"MIT"
] | 1 | 2017-07-24T08:21:38.000Z | 2017-07-24T08:21:38.000Z | website/trafficlights/__init__.py | matthewrkitson/traffic-lights | 4a469fe9e2b78d140f79e411f57f73a10161608c | [
"MIT"
] | 1 | 2017-08-26T22:48:40.000Z | 2017-08-26T22:49:31.000Z | website/trafficlights/__init__.py | matthewrkitson/traffic-lights | 4a469fe9e2b78d140f79e411f57f73a10161608c | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
import trafficlights.controller as controller
import trafficlights.poller as poller
from trafficlights.updaters.teamcity_updater import TeamCityUpdater
from trafficlights.updaters.flash_updater import FlashUpdater
import os
import pwd
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
def username():
return pwd.getpwuid(os.geteuid()).pw_name
def log_file_path():
return '/var/tmp/' + username() + 'trafficlights.log'
log_file = log_file_path()
file_handler = RotatingFileHandler(log_file, maxBytes=100000, backupCount=3)
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.DEBUG)
app.logger.info('')
app.logger.info('-------------------------------------------------------')
app.logger.info('Starting traffiglights website')
app.logger.info('Running as user ' + username())
try:
def poweroff():
for i in range(lights.num_indicators):
lights.set_indicator(i, controller.Controller.BOTH)
os.system('sudo poweroff')
lights = controller.Controller(controller.FULLSIZE_V1, app.logger)
if lights.num_inputs > 0:
lights.add_input_response(0, poweroff)
app.logger.info('Creating updaters')
teamcity_updater = TeamCityUpdater(lights, app.logger)
flash_updater = FlashUpdater(lights, app.logger, enable_lights=False)
app.logger.debug('Starting poller')
poller = poller.Poller(lights, [teamcity_updater, flash_updater], app.logger)
poller.start()
import trafficlights.views.index
import trafficlights.views.admin
import trafficlights.views.logs
import trafficlights.views.teamcity
except Exception as ex:
app.logger.exception(ex)
raise
| 30.862069 | 81 | 0.732961 | 214 | 1,790 | 6 | 0.38785 | 0.091122 | 0.050623 | 0.035826 | 0.068536 | 0.030374 | 0 | 0 | 0 | 0 | 0 | 0.006506 | 0.141341 | 1,790 | 57 | 82 | 31.403509 | 0.828887 | 0 | 0 | 0 | 0 | 0 | 0.096143 | 0.030743 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.295455 | 0.045455 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4cbcf09ac9214d01520bb499e084a430a45cce9 | 6,855 | py | Python | models/backbones/psa.py | EmanuelNk/semantic-segmentation | 20ff16da49691fb407724909d9c7e84b47e2fee0 | [
"MIT"
] | null | null | null | models/backbones/psa.py | EmanuelNk/semantic-segmentation | 20ff16da49691fb407724909d9c7e84b47e2fee0 | [
"MIT"
] | null | null | null | models/backbones/psa.py | EmanuelNk/semantic-segmentation | 20ff16da49691fb407724909d9c7e84b47e2fee0 | [
"MIT"
] | null | null | null | import torch
from torch import nn, Tensor
from torch.nn import functional as F
class PSAP(nn.Module):
def __init__(self, c1, c2):
super().__init__()
ch = c2 // 2
self.conv_q_right = nn.Conv2d(c1, 1, 1, bias=False)
self.conv_v_right = nn.Conv2d(c1, ch, 1, bias=False)
self.conv_up = nn.Conv2d(ch, c2, 1, bias=False)
self.conv_q_left = nn.Conv2d(c1, ch, 1, bias=False)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(c1, ch, 1, bias=False)
def spatial_pool(self, x: Tensor) -> Tensor:
input_x = self.conv_v_right(x) # [B, C, H, W]
context_mask = self.conv_q_right(x) # [B, 1, H, W]
B, C, _, _ = input_x.shape
input_x = input_x.view(B, C, -1)
context_mask = context_mask.view(B, 1, -1).softmax(dim=2)
context = input_x @ context_mask.transpose(1, 2)
context = self.conv_up(context.unsqueeze(-1)).sigmoid()
x *= context
return x
def channel_pool(self, x: Tensor) -> Tensor:
g_x = self.conv_q_left(x)
B, C, H, W = g_x.shape
avg_x = self.avg_pool(g_x).view(B, C, -1).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(B, C, -1)
context = avg_x @ theta_x
context = context.softmax(dim=2).view(B, 1, H, W).sigmoid()
x *= context
return x
def forward(self, x: Tensor) -> Tensor:
return self.spatial_pool(x) + self.channel_pool(x)
class PSAS(nn.Module):
def __init__(self, c1, c2):
super().__init__()
ch = c2 // 2
self.conv_q_right = nn.Conv2d(c1, 1, 1, bias=False)
self.conv_v_right = nn.Conv2d(c1, ch, 1, bias=False)
self.conv_up = nn.Sequential(
nn.Conv2d(ch, ch // 4, 1),
nn.LayerNorm([ch // 4, 1, 1]),
nn.ReLU(),
nn.Conv2d(ch // 4, c2, 1)
)
self.conv_q_left = nn.Conv2d(c1, ch, 1, bias=False)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(c1, ch, 1, bias=False)
def spatial_pool(self, x: Tensor) -> Tensor:
input_x = self.conv_v_right(x) # [B, C, H, W]
context_mask = self.conv_q_right(x) # [B, 1, H, W]
B, C, _, _ = input_x.shape
input_x = input_x.view(B, C, -1)
context_mask = context_mask.view(B, 1, -1).softmax(dim=2)
context = input_x @ context_mask.transpose(1, 2)
context = self.conv_up(context.unsqueeze(-1)).sigmoid()
x *= context
return x
def channel_pool(self, x: Tensor) -> Tensor:
g_x = self.conv_q_left(x)
B, C, H, W = g_x.shape
avg_x = self.avg_pool(g_x).view(B, C, -1).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(B, C, -1).softmax(dim=2)
context = avg_x @ theta_x
context = context.view(B, 1, H, W).sigmoid()
x *= context
return x
def forward(self, x: Tensor) -> Tensor:
return self.channel_pool(self.spatial_pool(x))
class BasicBlock(nn.Module):
"""2 Layer No Expansion Block
"""
expansion: int = 1
def __init__(self, c1, c2, s=1, downsample= None) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 3, s, 1, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.deattn = PSAS(c2, c2)
self.conv2 = nn.Conv2d(c2, c2, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = self.deattn(out)
out = self.bn2(self.conv2(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return F.relu(out)
class Bottleneck(nn.Module):
"""3 Layer 4x Expansion Block
"""
expansion: int = 4
def __init__(self, c1, c2, s=1, downsample=None) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, s, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.deattn = PSAP(c2, c2)
self.conv3 = nn.Conv2d(c2, c2 * self.expansion, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(c2 * self.expansion)
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.deattn(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None: identity = self.downsample(x)
out += identity
return F.relu(out)
resnet_settings = {
'18': [BasicBlock, [2, 2, 2, 2]],
'34': [BasicBlock, [3, 4, 6, 3]],
'50': [Bottleneck, [3, 4, 6, 3]],
'101': [Bottleneck, [3, 4, 23, 3]],
'152': [Bottleneck, [3, 8, 36, 3]]
}
class ResNet(nn.Module):
def __init__(self, model_name: str = '50') -> None:
super().__init__()
assert model_name in resnet_settings.keys(), f"ResNet model name should be in {list(resnet_settings.keys())}"
block, depths = resnet_settings[model_name]
self.inplanes = 64
self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.maxpool = nn.MaxPool2d(3, 2, 1)
self.layer1 = self._make_layer(block, 64, depths[0], s=1)
self.layer2 = self._make_layer(block, 128, depths[1], s=2)
self.layer3 = self._make_layer(block, 256, depths[2], s=2)
self.layer4 = self._make_layer(block, 512, depths[3], s=2)
def _make_layer(self, block, planes, depth, s=1) -> nn.Sequential:
downsample = None
if s != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, 1, s, bias=False),
nn.BatchNorm2d(planes * block.expansion)
)
layers = nn.Sequential(
block(self.inplanes, planes, s, downsample),
*[block(planes * block.expansion, planes) for _ in range(1, depth)]
)
self.inplanes = planes * block.expansion
return layers
def forward(self, x: Tensor) -> Tensor:
x = self.maxpool(F.relu(self.bn1(self.conv1(x)))) # [1, 64, H/4, W/4]
x1 = self.layer1(x) # [1, 64/256, H/4, W/4]
x2 = self.layer2(x1) # [1, 128/512, H/8, W/8]
x3 = self.layer3(x2) # [1, 256/1024, H/16, W/16]
x4 = self.layer4(x3) # [1, 512/2048, H/32, W/32]
return x1, x2, x3, x4
if __name__ == '__main__':
model = ResNet('18')
x = torch.zeros(2, 3, 224, 224)
outs = model(x)
for y in outs:
print(y.shape)
| 34.104478 | 117 | 0.564989 | 1,029 | 6,855 | 3.618076 | 0.135083 | 0.042976 | 0.045394 | 0.037604 | 0.628794 | 0.579103 | 0.547408 | 0.523771 | 0.493688 | 0.475423 | 0 | 0.062398 | 0.28461 | 6,855 | 200 | 118 | 34.275 | 0.696778 | 0.034136 | 0 | 0.503226 | 0 | 0 | 0.012875 | 0.004544 | 0 | 0 | 0 | 0 | 0.006452 | 1 | 0.096774 | false | 0 | 0.019355 | 0.012903 | 0.225806 | 0.006452 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4cc740af35ab7b4d6f5b995924f2b85986c8d1a | 2,318 | py | Python | ruconlluconv/space/dataset.py | shkarupa-alex/ruconlluconv | 7b1c2c5af7724f407b56e412629921dc9b4f163b | [
"MIT"
] | 1 | 2019-10-24T10:23:53.000Z | 2019-10-24T10:23:53.000Z | ruconlluconv/space/dataset.py | shkarupa-alex/ruconlluconv | 7b1c2c5af7724f407b56e412629921dc9b4f163b | [
"MIT"
] | null | null | null | ruconlluconv/space/dataset.py | shkarupa-alex/ruconlluconv | 7b1c2c5af7724f407b56e412629921dc9b4f163b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import os
import random
from conllu import parse
def create_dataset(src_files, dest_path):
data = []
for sf in src_files:
with open(sf, 'rb') as f:
sentences = parse(f.read().decode('utf-8'))
for s in sentences:
tokens = []
labels = []
for t in s:
if '.' in str(t['id']):
continue
tokens.append(t['form'].replace(' ', '_'))
labels.append('N' if t['misc'] is not None and 'SpaceAfter' in t['misc'] else 'Y')
if not len(tokens):
continue
data.append((
' '.join(tokens),
' '.join(labels)
))
random.shuffle(data)
test_size = len(data) // 100
test_data, train_data = data[:test_size], data[test_size:]
del data
with open(os.path.join(dest_path, 'test.txt'), 'w', newline='') as f:
csvwriter = csv.writer(f, quoting=csv.QUOTE_ALL)
for d in test_data:
csvwriter.writerow(d)
curr_id = 0
while len(train_data):
curr_data, train_data = train_data[:10000], train_data[10000:]
curr_id += 1
with open(os.path.join(dest_path, 'train-{}.txt'.format(curr_id)), 'w', newline='') as f:
csvwriter = csv.writer(f, quoting=csv.QUOTE_ALL)
for d in curr_data:
csvwriter.writerow(d)
def main():
parser = argparse.ArgumentParser(
description='Create dataset from files with CoNLL-U markup')
parser.add_argument(
'src_path',
type=str,
help='Directory with source CoNLL-U files')
parser.add_argument(
'dest_path',
type=str,
help='Directory to store dataset files')
argv, _ = parser.parse_known_args()
assert os.path.exists(argv.src_path) and os.path.isdir(argv.src_path)
assert not os.path.exists(argv.dest_path) or os.path.isdir(argv.dest_path)
source_files = []
for root, _, files in os.walk(argv.src_path):
source_files.extend([os.path.join(root, file) for file in files if file.endswith('.conllu')])
create_dataset(source_files, argv.dest_path)
| 29.717949 | 101 | 0.592752 | 310 | 2,318 | 4.251613 | 0.33871 | 0.042489 | 0.036419 | 0.021244 | 0.157815 | 0.121396 | 0.121396 | 0.081942 | 0.081942 | 0.081942 | 0 | 0.009674 | 0.286454 | 2,318 | 77 | 102 | 30.103896 | 0.787183 | 0 | 0 | 0.163934 | 0 | 0 | 0.084556 | 0 | 0 | 0 | 0 | 0 | 0.032787 | 1 | 0.032787 | false | 0 | 0.131148 | 0 | 0.163934 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4cd052381d84e741e9aba129e01ded11236ccc7 | 5,471 | py | Python | code/head_and_flare_plot_rti_together.py | ryanvolz/thesis_defense | 0ada54d632c0c98edaf338390a56f85a8c29381f | [
"CC-BY-4.0",
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2022-03-24T22:52:14.000Z | 2022-03-24T22:52:14.000Z | code/head_and_flare_plot_rti_together.py | ryanvolz/thesis_defense | 0ada54d632c0c98edaf338390a56f85a8c29381f | [
"CC-BY-4.0",
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | code/head_and_flare_plot_rti_together.py | ryanvolz/thesis_defense | 0ada54d632c0c98edaf338390a56f85a8c29381f | [
"CC-BY-4.0",
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits import axes_grid1
import cPickle
import copy
import os
import echolect as el
params = {#'figure.subplot.left': 0.01,
#'figure.subplot.bottom': 0.01,
#'figure.subplot.right': .99,
#'figure.subplot.top': .99,
#'figure.subplot.wspace': .025,
#'figure.subplot.hspace': .025,
'font.size': 10,
'font.family': 'sans-serif',
'font.sans-serif': ['Linux Biolinum O', 'Arial', 'sans-serif'],
'pdf.fonttype': 42,
'ps.fonttype': 42,
#'ps.usedistiller': 'pdftk',
'axes.titlesize': 10,
'axes.labelsize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'lines.markersize': 1,
'lines.linewidth': 0.45,
'axes.linewidth': 0.45,
'xtick.major.size': 2,
'xtick.major.pad': 2,
'ytick.major.size': 2,
'ytick.major.pad': 3,
'text.usetex': False}
#'text.latex.preamble': ['\usepackage{amsmath}']}
plt.rcParams.update(params)
def plot_block(z, z_unc, t, r, dpi, pixelaspect=1, **kwargs):
tlen = len(t)
rlen = len(r)
xinches = float(tlen)/dpi
yinches = float(rlen)/dpi*pixelaspect
# approximate size for figure
# (doesn't matter if saving with tight bbox)
fig = plt.figure(figsize=(xinches + .225 + 1.2, yinches + 2.25))
# size for between upper and lower plots
# we will add sizes for labels and titles later
h = [axes_grid1.Size.Fixed(xinches/5)]*5
v = [axes_grid1.Size.Fixed(yinches)]
gs = matplotlib.gridspec.GridSpec(1, 1, left=0.085, bottom=0.0875, right=1, top=1)
div = axes_grid1.SubplotDivider(fig, gs[0],
horizontal=h, vertical=v)
loc0 = div.new_locator(nx=0, ny=0)
loc1 = div.new_locator(nx=1, ny=0)
loc2 = div.new_locator(nx=2, ny=0)
loc3 = div.new_locator(nx=3, ny=0)
loc4 = div.new_locator(nx=4, ny=0)
ax0 = fig.add_axes(loc0(None, None), label='ax0')
ax1 = fig.add_axes(loc1(None, None), label='ax1', sharey=ax0)
ax2 = fig.add_axes(loc2(None, None), label='ax2', sharey=ax0)
ax3 = fig.add_axes(loc3(None, None), label='ax3', sharey=ax0)
ax4 = fig.add_axes(loc4(None, None), label='ax4', sharey=ax0)
# turn off unwanted (duplicate) tick labels
plt.setp(ax1.get_yticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
plt.setp(ax4.get_yticklabels(), visible=False)
# locate the axes in the divider
ax0.set_axes_locator(loc0)
ax1.set_axes_locator(loc1)
ax2.set_axes_locator(loc2)
ax3.set_axes_locator(loc3)
ax4.set_axes_locator(loc4)
# also have to override get_subplotspec after setting locator
# so tight_layout works
ax0.get_subplotspec = loc0.get_subplotspec
ax1.get_subplotspec = loc1.get_subplotspec
ax2.get_subplotspec = loc2.get_subplotspec
ax3.get_subplotspec = loc3.get_subplotspec
ax4.get_subplotspec = loc4.get_subplotspec
# plot the frequency shift
# plot the images
img0 = el.rtiplot(z[0::5, :], t[0::5], r/1e3, title='Barker 13',
ylabel='Range (km)', ax=ax0, cbar=False, xbins=5,
exact_ticks=False, interpolation='none', **kwargs)
img1 = el.rtiplot(z[1::5, :], t[1::5], r/1e3, title='MSL',
ax=ax1, cbar=False, xbins=5,
exact_ticks=False, interpolation='none', **kwargs)
img2 = el.rtiplot(z_unc, t[2::5], r/1e3, title='Uncoded',
ax=ax2, cbar=False, xbins=5,
exact_ticks=False, interpolation='none', **kwargs)
img3 = el.rtiplot(z[3::5, :], t[3::5], r/1e3, title='LFM',
ax=ax3, cbar=False, xbins=5,
exact_ticks=False, interpolation='none', **kwargs)
img4 = el.rtiplot(z[4::5, :], t[4::5], r/1e3, title='PSRND',
clabel='SNR (dB)', ax=ax4, xbins=5,
exact_ticks=False, interpolation='none', **kwargs)
# erase all but one xlabel on plots for separate codes so they don't overlap
ax0.set_xlabel('')
ax1.set_xlabel('')
ax3.set_xlabel('')
ax4.set_xlabel('')
# tight layout
#gs.tight_layout(fig)
plt.draw()
return fig
basefilename = 'head_and_flare'
with open(basefilename + '.pkl', 'rb') as f:
data = cPickle.load(f)
with open(basefilename + '_mf.pkl', 'rb') as f:
mf = cPickle.load(f)
dpi = 75*4 # should be sized to match font size
savedpi = dpi*1 # should be a multiple of dpi
pixelaspect = 4
basedir = 'figures'
if not os.path.exists(basedir):
os.makedirs(basedir)
cmap = copy.copy(plt.cm.coolwarm)
cmap.set_bad(cmap(0))
rslc = el.slice_by_value(mf.r, 86000, 97000)
fig = plot_block(20*np.log10(np.abs(mf.vlt[:, rslc])/mf.noise_sigma),
20*np.log10(np.abs(data.vlt[2::5, rslc])/data.noise_sigma),
mf.t, mf.r[rslc],
dpi=dpi, pixelaspect=pixelaspect,
vmin=0, vmax=40,
csize=0.0625, cpad=0.05)
fpath = os.path.join(basedir, basefilename + '_mf_rti_block.pdf')
fig.savefig(fpath, dpi=savedpi, bbox_inches='tight', pad_inches=0, transparent=True)
plt.close(fig)
plt.show() | 36.718121 | 86 | 0.599708 | 770 | 5,471 | 4.172727 | 0.344156 | 0.04793 | 0.02023 | 0.023343 | 0.119203 | 0.110489 | 0.079676 | 0.079676 | 0.065982 | 0.065982 | 0 | 0.052979 | 0.251325 | 5,471 | 149 | 87 | 36.718121 | 0.731445 | 0.140742 | 0 | 0.045872 | 0 | 0 | 0.094251 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009174 | false | 0 | 0.073395 | 0 | 0.091743 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ce3c157a11cf1e8eaa0e95bc2a27f12658839f | 1,161 | py | Python | second/utils/print_test.py | rogeriobonatti/wysiwyg | 04a26c6e9125f55222bd0b5d5b0cfbfaebbdbcdf | [
"MIT",
"BSD-3-Clause"
] | 92 | 2020-04-16T08:52:55.000Z | 2022-03-02T15:52:55.000Z | second/utils/print_test.py | rogeriobonatti/wysiwyg | 04a26c6e9125f55222bd0b5d5b0cfbfaebbdbcdf | [
"MIT",
"BSD-3-Clause"
] | 6 | 2020-08-07T03:18:41.000Z | 2022-03-09T04:49:07.000Z | second/utils/print_test.py | rogeriobonatti/wysiwyg | 04a26c6e9125f55222bd0b5d5b0cfbfaebbdbcdf | [
"MIT",
"BSD-3-Clause"
] | 18 | 2020-05-21T15:47:48.000Z | 2021-09-28T02:22:11.000Z | import os
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='nuscenes')
parser.add_argument('--step', type=int, default='-1')
parser.add_argument('--metric', type=str, default='mean_dist_aps')
parser.add_argument('--thresh', type=str, default="")
args = parser.parse_args()
classes = [
'car', 'pedestrian', 'barrier', 'traffic_cone', 'truck', 'bus', 'trailer', 'construction_vehicle', 'motorcycle', 'bicycle'
]
name = "freespace"
res_file = f"utils/test_results.json"
if os.path.exists(res_file):
with open(res_file, 'r') as f:
summary = json.load(f)
print(summary)
# delim = '\t'
delim = ' & '
metric = args.metric
print('{:24}'.format(f'mAP[{args.thresh}]'), end=delim)
for cls in classes:
print('{:5}'.format(cls[:5]), end=delim)
print('{:5}'.format('avg'))
print('{:24}'.format(name), end=delim)
APs = []
for cls in classes:
n = summary[metric][cls]
if args.thresh in n:
AP = n[args.thresh]
else:
AP = sum(n.values())/len(n)
APs.append(AP)
print('{:.3f}'.format(AP), end=delim)
mAP = sum(APs)/len(APs)
print('{:.3f}'.format(mAP))
| 25.8 | 126 | 0.64255 | 166 | 1,161 | 4.415663 | 0.439759 | 0.049113 | 0.092769 | 0.040928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010081 | 0.145564 | 1,161 | 44 | 127 | 26.386364 | 0.728831 | 0.010336 | 0 | 0.055556 | 0 | 0 | 0.196164 | 0.020052 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.194444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ce7143a08fd78efc9047f19ca66cad8f2c7504 | 1,924 | py | Python | core/snake.py | LucienShui/SnakeAI | 9636d881f5d9647bf8f8a3f60ec890ccf7a6e245 | [
"Apache-2.0"
] | 1 | 2020-08-12T07:10:43.000Z | 2020-08-12T07:10:43.000Z | core/snake.py | LucienShui/SnakeAI | 9636d881f5d9647bf8f8a3f60ec890ccf7a6e245 | [
"Apache-2.0"
] | 1 | 2020-08-19T07:38:38.000Z | 2020-08-19T07:38:38.000Z | core/snake.py | LucienShui/SnakeAI | 9636d881f5d9647bf8f8a3f60ec890ccf7a6e245 | [
"Apache-2.0"
] | null | null | null | import typing
from .point import Point
from .action import Action
class Apple(object):
def __init__(self, point: Point):
self.position: Point = point
class Snake(object):
def __init__(self, x: int, y: int, length: int = 2):
self.length: int = length
self.initial_x: int = x
self.initial_y: int = y
self.points: typing.List[Point] = ...
self.delta: Point = ...
self.direction: int = ...
self.reset()
def reset(self):
self.points: typing.List[Point] = [Point(self.initial_x, self.initial_y, Point.Type.HEAD)]
for i in range(1, self.length - 1):
self.points.append(Point(self.initial_x, self.initial_y - i))
self.points.append(Point(self.initial_x, self.initial_y - self.length + 1, Point.Type.TAIL))
self.delta: Point = Point(0, 1, Point.Type.DIRECT)
self.direction: int = Action.RIGHT
def move(self, apple: Apple) -> bool:
self.points.insert(0, self.points[0] + self.delta)
self.points[1].type = Point.Type.BODY
if self.points[0] == apple.position:
return True
self.points.pop()
self.points[-1].type = Point.Type.TAIL
return False
def change_direction(self, direction: int) -> None:
"""
调整蛇头的方向
:param direction:
:return:
"""
if self.direction == direction:
return
if self.direction & 12 and direction & 12:
return
if self.direction & 3 and direction & 3:
return
self.direction = direction
if direction & 12:
self.delta.x = 1 if direction == Action.DOWN else -1
self.delta.y = 0
else:
self.delta.x = 0
self.delta.y = -1 if direction == Action.LEFT else 1
def __getitem__(self, index: int) -> Point:
return self.points[index]
| 26.722222 | 100 | 0.572765 | 248 | 1,924 | 4.358871 | 0.229839 | 0.101758 | 0.044403 | 0.048104 | 0.256244 | 0.154487 | 0.110083 | 0.083256 | 0.083256 | 0.083256 | 0 | 0.018783 | 0.308212 | 1,924 | 71 | 101 | 27.098592 | 0.793388 | 0.017672 | 0 | 0.065217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.065217 | 0.021739 | 0.369565 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4cf2c91db7b015aa8aa4991bdda7ef428e1f465 | 915 | py | Python | Python 3/8600TransientTrace.py | BKPrecisionCorp/8600DCLoad | 94ab102dca952acc19af9a5216a686546c73340a | [
"Apache-2.0"
] | null | null | null | Python 3/8600TransientTrace.py | BKPrecisionCorp/8600DCLoad | 94ab102dca952acc19af9a5216a686546c73340a | [
"Apache-2.0"
] | null | null | null | Python 3/8600TransientTrace.py | BKPrecisionCorp/8600DCLoad | 94ab102dca952acc19af9a5216a686546c73340a | [
"Apache-2.0"
] | null | null | null | import time
import visa
rm=visa.ResourceManager()
li=rm.list_resources()
for index in range(len(li)):
print(str(index)+" - "+li[index])
choice = input("Which device?: ")
vi=rm.open_resource(li[int(choice)])
print(vi.query("*idn?"))
vi.write("FUNC CURR")
vi.write("trace:clear")
vi.write("trace:feed two")
vi.write("trace:feed:control next")
#vi.write("trace:points 100")
vi.write("TRACE:TIMER 0.005")
#vi.write("TRAN ON")
#vi.write("CURR:TRAN:MODE TOGG")
#vi.write("CURR:SLEW MIN")
#vi.write("CURR:TRAN:ALEV 0")
#vi.write("CURR:TRAN:BLEV 1") #my power supply is small...
vi.write("source:input:state ON")
input("set the transient rate to slow, from fast: [enter] to continue")
vi.write("trig:imm")
time.sleep(3) # The trace data is a live buffer now
# so we need to wait till the transient is finished.
print(vi.query("TRACE:DATA?"))
vi.write("source:input:state off") # turn off the output
| 28.59375 | 71 | 0.689617 | 157 | 915 | 4.006369 | 0.528662 | 0.155803 | 0.09539 | 0.071542 | 0.073132 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012469 | 0.123497 | 915 | 31 | 72 | 29.516129 | 0.77182 | 0.321311 | 0 | 0 | 0 | 0 | 0.361702 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4cf7e1c86e3205958291e7425a9a9f4f73be2ce | 9,057 | py | Python | tests/test_ssdp.py | esev/pywemo | 95ee8271c4253c4872112bdfd02f7e24d2ae4aa5 | [
"MIT"
] | null | null | null | tests/test_ssdp.py | esev/pywemo | 95ee8271c4253c4872112bdfd02f7e24d2ae4aa5 | [
"MIT"
] | null | null | null | tests/test_ssdp.py | esev/pywemo | 95ee8271c4253c4872112bdfd02f7e24d2ae4aa5 | [
"MIT"
] | null | null | null | """Tests for SSDP and discovery."""
import queue
import socket
import unittest.mock as mock
import pytest
import requests
from pywemo import ssdp
MOCK_CALLBACK_PORT = 8989
MOCK_IP_ADDRESS = "5.6.7.8"
@pytest.fixture()
def mock_interface_addresses():
"""Mock for util.interface_addresses."""
addresses = ["127.0.0.1"]
with mock.patch("pywemo.ssdp.interface_addresses", return_value=addresses):
yield addresses
@pytest.fixture()
def mock_get_ip_address():
"""Mock for util.get_ip_address."""
with mock.patch(
"pywemo.ssdp.get_ip_address", return_value=MOCK_IP_ADDRESS
):
yield MOCK_IP_ADDRESS
@pytest.fixture()
def mock_socket():
"""Mock socket instance returned from socket.socket."""
sock = mock.create_autospec(socket.socket, instance=True)
with mock.patch("socket.socket", return_value=sock) as mock_sock:
yield sock
assert mock_sock.call_count == 1
@pytest.fixture()
def mock_select():
"""Queue for delivering return values from select.select.
This will cause select.select to block until an item is put into the queue.
The return value from the mock select.select call will be the value that
was put into the queue.
"""
return_queue = queue.Queue()
def do_select(*_):
return return_queue.get()
with mock.patch("select.select", side_effect=do_select):
yield return_queue
@pytest.fixture()
def discovery_responder(
mock_select, mock_socket, mock_interface_addresses, mock_get_ip_address
):
"""Fixture for DiscoveryResponder instance.
Returns a callable(msg, addr). When called, (msg, addr) will be the
return value from the mock sock.recvfrom. If it is expected that mock
sock.sendto is called, the arguments to that mock will be returned from the
callable. Example:
sendto_msg, sendto_addr = discovery_responder(recvfrom_msg, recvfrom_addr)
Within the DiscoveryResponder instance, the mock recvfrom/sendto will map
to the values from the example callable above:
(recvfrom_msg, recvfrom_addr) = sock.recvfrom(1024)
sock.sendto(sendto_msg, sendto_addr)
"""
sendto_count = 0
def do_once(req, source, expect_sendto=True, sendto_exception=None):
nonlocal sendto_count
if sendto_exception:
sendto_count += 1
expect_sendto = False
mock_socket.sendto.side_effect = sendto_exception
if expect_sendto:
sendto_count += 1
send_queue = queue.Queue()
def sendto(msg, addr):
send_queue.put((msg, addr))
mock_socket.sendto.side_effect = sendto
mock_socket.recvfrom.return_value = (req.encode("UTF-8"), source)
# Unblock the select.select call with a socket, indicating data
# is ready.
mock_select.put(([mock_socket],))
if expect_sendto:
return send_queue.get()
resp = ssdp.DiscoveryResponder(callback_port=MOCK_CALLBACK_PORT)
resp._notify_enabled = False
resp.start()
try:
yield do_once
finally:
# Signal that the thread should exit, and unblock
# the select.select call
resp._exit.set()
mock_select.put(([],))
# Stop the discovery responder
resp.stop()
# Make sure the expected number of calls were made to sock.sendto.
assert mock_socket.sendto.call_count == sendto_count
def test_discovery_responder_notify(mock_socket, mock_interface_addresses):
resp = ssdp.DiscoveryResponder(callback_port=MOCK_CALLBACK_PORT)
resp.send_notify()
for addr in mock_interface_addresses:
mock_socket.sendto.assert_called_with(
(ssdp.SSDP_NOTIFY % (addr, MOCK_CALLBACK_PORT)).encode('utf-8'),
('239.255.255.250', 1900),
)
def test_discovery_responder_responds_to_wemo(discovery_responder):
"""The DiscoveryResponder responds to WeMo M-SEARCH messages."""
from_addr = ("1.2.3.4", 54321)
msg = """M-SEARCH * HTTP/1.1
ST: urn:Belkin:service:basicevent:1
MX: 1
MAN: "ssdp:discover"
HOST: 239.255.255.250:1900
"""
resp_msg, resp_to_addr = discovery_responder(msg, from_addr)
expected_response = ssdp.SSDP_REPLY % (MOCK_IP_ADDRESS, MOCK_CALLBACK_PORT)
assert resp_msg.decode("UTF-8") == expected_response
# The reply should go back to the source.
assert resp_to_addr == from_addr
def test_discovery_responder_ignores_notify(discovery_responder):
"""The DiscoveryResponder does not reply to NOTIFY messages."""
from_addr = ("1.2.3.4", 54321)
msg = (
"""NOTIFY * HTTP/1.1
HOST: 239.255.255.250:1900
CACHE-CONTROL: max-age=1800
LOCATION: http://%s:%d/setup.xml
SERVER: Unspecified, UPnP/1.0, Unspecified
NT: urn:Belkin:service:basicevent:1
NTS: ssdp:alive
USN: uuid:Socket-1_0-SERIALNUMBER::urn:Belkin:service:basicevent:1
"""
% from_addr
)
discovery_responder(msg, from_addr, expect_sendto=False)
def test_discovery_responder_ignores_non_wemo(discovery_responder):
"""The DiscoveryResponder does not reply to non-WeMo M-SEARCH requests."""
from_addr = ("1.2.3.4", 54321)
msg = """M-SEARCH * HTTP/1.1
ST: ssdp:all
MX: 2
MAN: "ssdp:discover"
HOST: 239.255.255.250:1900
"""
discovery_responder(msg, from_addr, expect_sendto=False)
def test_discovery_responder_ignores_sendto_exception(discovery_responder):
"""The DiscoveryResponder does not fail if sendto fails."""
from_addr = ("1.2.3.4", 54321)
msg = """M-SEARCH * HTTP/1.1
ST: urn:Belkin:service:basicevent:1
MX: 1
MAN: "ssdp:discover"
HOST: 239.255.255.250:1900
"""
discovery_responder(msg, from_addr, sendto_exception=OSError)
# Verify that the DiscoveryResponder is still working.
test_discovery_responder_responds_to_wemo(discovery_responder)
class TestScan:
"""Tests for the ssdp.scan method."""
_R1 = '\r\n'.join(
[
'HTTP/1.1 200 OK',
'HOST: 239.255.255.250:1900',
'CACHE-CONTROL: max-age=1800',
'LOCATION: http://192.168.1.100:49158/setup.xml',
'SERVER: Unspecified, UPnP/1.0, Unspecified',
'ST: urn:Belkin:service:basicevent:1',
'USN: uuid:Socket-1_0-SERIAL::urn:Belkin:service:basicevent:1',
'',
]
).encode()
_R2 = '\r\n'.join(
[
'HTTP/1.1 200 OK',
'HOST: 239.255.255.250:1900',
'CACHE-CONTROL: max-age=1800',
'LOCATION: http://192.168.1.100:49158/setup.xml',
'SERVER: Unspecified, UPnP/1.0, Unspecified',
'ST: upnp:rootdevice',
'USN: uuid:Socket-1_0-SERIAL2::upnp:rootdevice',
'',
]
).encode()
@pytest.mark.parametrize(
"kwargs,expected_count",
[
({'match_udn': 'no_match'}, 0),
({}, 2),
({'match_udn': 'uuid:Socket-1_0-SERIAL'}, 1),
({'match_udn': 'uuid:Socket-1_0-SERIAL2'}, 1),
],
)
def test_scan(
self,
mock_interface_addresses,
mock_socket,
mock_select,
kwargs,
expected_count,
):
mock_socket.recv.side_effect = [self._R1, self._R1, self._R2]
mock_select.put(([mock_socket],)) # _R1.
mock_select.put(([mock_socket],)) # _R1 is received twice.
mock_select.put(([mock_socket],)) # _R2.
mock_select.put(([],)) # Exit.
entries = ssdp.scan(st=ssdp.ST, timeout=0, **kwargs)
assert len(entries) == expected_count
def test_scan_no_setup_xml(
self, mock_interface_addresses, mock_socket, mock_select
):
mock_socket.recv.return_value = self._R1
mock_select.put(([mock_socket],))
mock_select.put(([],))
entries = ssdp.scan(st=ssdp.ST, timeout=0)
assert len(entries) == 1
entry = entries[0]
assert entry.udn == 'uuid:Socket-1_0-SERIAL'
assert entry.st == 'urn:Belkin:service:basicevent:1'
assert repr(entry) == (
'<UPNPEntry urn:Belkin:service:basicevent:1 - '
'http://192.168.1.100:49158/setup.xml - uuid:Socket-1_0-SERIAL>'
)
with mock.patch('requests.get', side_effect=requests.RequestException):
assert entry.description == {}
class TestUPNPEntry:
"""Tests for the UPNPEntry class."""
_R1 = TestScan._R1.decode()
_R2 = TestScan._R2.decode()
def test_properties(self):
r1 = ssdp.UPNPEntry.from_response(self._R1)
assert r1.st == "urn:Belkin:service:basicevent:1"
assert (
r1.usn == "uuid:Socket-1_0-SERIAL::urn:Belkin:service:basicevent:1"
)
assert r1.udn == "uuid:Socket-1_0-SERIAL"
assert r1.location == "http://192.168.1.100:49158/setup.xml"
assert r1.is_expired is False
r2 = ssdp.UPNPEntry.from_response(self._R2)
assert r1 != r2
r1_2 = ssdp.UPNPEntry.from_response(self._R1)
assert r1_2 == r1
items = set((r1, r2, r1_2))
assert len(items) == 2
| 30.701695 | 79 | 0.646572 | 1,199 | 9,057 | 4.69141 | 0.185154 | 0.032 | 0.028444 | 0.046222 | 0.449956 | 0.363733 | 0.3024 | 0.278578 | 0.187022 | 0.153778 | 0 | 0.049712 | 0.233742 | 9,057 | 294 | 80 | 30.806122 | 0.760807 | 0.178426 | 0 | 0.307292 | 0 | 0.005208 | 0.193718 | 0.074758 | 0 | 0 | 0 | 0 | 0.098958 | 1 | 0.083333 | false | 0 | 0.03125 | 0.005208 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4d0137f7fae0bad4c0708d52cc3a2d28862307e | 4,563 | py | Python | MCSH/logging.py | RealAllenDa/MinecraftServerHelper | 888217070443c0cc04823ebe4a41c7f24ff785ec | [
"MIT"
] | null | null | null | MCSH/logging.py | RealAllenDa/MinecraftServerHelper | 888217070443c0cc04823ebe4a41c7f24ff785ec | [
"MIT"
] | null | null | null | MCSH/logging.py | RealAllenDa/MinecraftServerHelper | 888217070443c0cc04823ebe4a41c7f24ff785ec | [
"MIT"
] | null | null | null | """
***************************************
MCSH - A Minecraft Server Helper.
Coded by AllenDa 2020.
Licensed under MIT.
***************************************
Module Name: MCSH.logging
Module Revision: 0.0.1-18
Module Description:
A module for all the shared functions.
Including Logging, Downloading, etc.
"""
import os
import tarfile
import time
from MCSH.consts import LOGGING_COLORS
from MCSH.crash_report import generate_crash_report
logging_file_name = ""
DEBUG = False
color_enabled = False
# The logger in-program.
def log(log_module, log_severity, log_text, override_color=False):
"""
The logging function for MCSH.
log_severity: FATAL, ERROR, WARNING, INFO, DEBUG
"""
try:
if color_enabled:
log_color = LOGGING_COLORS[log_severity]
else:
log_color = ""
except:
log_color = ""
# Color override (in case config isn't here)
if override_color:
log_color = ""
# Convert to string
log_text = str(log_text)
log_text_lines = log_text.split("\n")
for log_text in log_text_lines:
log_formatted_text = "[{time}-{process_time}] [{log_module}/{log_severity}]: {log}".format(**{
"time": time.strftime("%H:%M:%S", time.localtime()),
"process_time": time.process_time(),
"log_module": log_module,
"log_severity": log_severity,
"log": log_text
})
log_formatted_output = "{color}{log}\033[0m".format(**{
"color": log_color,
"log": log_formatted_text
})
if logging_file_name != "":
try:
with open(logging_file_name, "a+") as f:
f.write(log_formatted_text + "\n")
f.close()
except:
pass
# If DEBUG is False, don't output debug messages
if log_severity == "DEBUG":
if DEBUG:
print(log_formatted_output)
else:
print(log_formatted_output)
def crash(crash_info):
"""
Handle all crashing.
"""
log("crash_watchdog", "FATAL", "MCSH had crashed!\n"
"For detailed information, "
"see crash reports under ./MCSH/crash_report folder.")
try:
program_traceback = crash_info["program_traceback"]
except KeyError:
program_traceback = "MCSH program exception"
except:
program_traceback = "Unknown exception occurred in crash watchdog."
generate_crash_report(crash_info["description"],
crash_info["exception"],
crash_info["computer_info"],
program_traceback)
def initialize_logger():
"""
Initialize the logging file handler.
Default log output directory: ./MCSH/logs
Default log threshold: 10 logs
"""
global DEBUG, color_enabled
path = "./MCSH/logs"
from MCSH.debug import debugging_check
DEBUG = debugging_check(suppress_warning=True)
# First-time initialization
if not os.path.exists(path):
os.mkdir(path)
# Logging color detection
try:
from MCSH.consts import config_instance
with open("./MCSH/config/MCSH.json", "r") as f:
import json
temp_config = json.load(f)
f.close()
color_enabled = bool(temp_config["color_enabled"])
except:
color_enabled = False
# Auto-packing logs
if len([lists for lists in os.listdir(path) if os.path.isfile(os.path.join(path, lists))]) >= 10:
try:
tar = tarfile.open("./MCSH/logs/pack.tar.gz", "w:gz")
for root, directory, files in os.walk("./MCSH/logs"):
for file in files:
if file != "pack.tar.gz":
file_path = os.path.join(root, file)
tar.add(file_path, arcname=file)
os.remove(file_path)
tar.close()
except:
print("Failed to pack logs. Please delete logs manually under ./MCSH/logs.")
# Set the logging file name
global logging_file_name
logging_file_name = "{}/{}.log".format(path, time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()))
try:
with open(logging_file_name, "w+") as f:
f.write("Logger initialized -- Start logging...\n")
f.close()
except:
print("WARNING: Can't write a log to the file. Logging function will be disabled.")
logging_file_name = ""
| 33.8 | 102 | 0.573526 | 537 | 4,563 | 4.698324 | 0.307263 | 0.039239 | 0.047562 | 0.023781 | 0.0761 | 0.042013 | 0 | 0 | 0 | 0 | 0 | 0.005338 | 0.301994 | 4,563 | 134 | 103 | 34.052239 | 0.786813 | 0.163708 | 0 | 0.294737 | 0 | 0 | 0.183449 | 0.026513 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031579 | false | 0.010526 | 0.084211 | 0 | 0.115789 | 0.042105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4d33929de3fd0bf625b63e39a95f8cfb6dfd254 | 6,366 | py | Python | train.py | firehose-dataset/congrad | 20792f43aa89beae75454e30b82b2e1280ed3106 | [
"MIT"
] | 9 | 2020-07-21T14:37:22.000Z | 2021-07-14T12:44:13.000Z | train.py | firehose-dataset/congrad | 20792f43aa89beae75454e30b82b2e1280ed3106 | [
"MIT"
] | 2 | 2020-09-22T18:05:03.000Z | 2020-11-19T09:42:21.000Z | train.py | firehose-dataset/congrad | 20792f43aa89beae75454e30b82b2e1280ed3106 | [
"MIT"
] | 2 | 2020-07-21T16:39:12.000Z | 2020-07-30T02:20:47.000Z | # coding: utf-8
import argparse
import json
import time
import math
import os, sys
import itertools
import numpy as np
import os.path as osp
import torch
import torch.nn as nn
import torch.optim as optim
from core.dataset.corpus import get_lm_corpus
from core.configs import get_basic_parser
from core.trainer import OnlineTrainer, batch_evaluate
#TODO: Dangerous line of code below, make sure remove it when you don"t know what you ignored
import warnings
warnings.filterwarnings("ignore")
def postprocess_args(args):
args.tie_weight = not args.not_tied
args.d_embed = args.d_model if args.d_embed < 0 else args.d_embed
args.d_user_embed = args.d_embed if args.d_user_embed < 0 else args.d_user_embed
assert args.batch_size % args.batch_chunk == 0
if args.snapshot_dir is not None:
with open(os.path.join(args.snapshot_dir, "configs.json")) as fd:
max_step = args.max_step
snapshot_dir = args.snapshot_dir
args_json = json.load(fd)
args = argparse.Namespace(**args_json)
args.max_step = max_step
args.snapshot_dir = snapshot_dir
else:
args.work_dir = "_".join([ _ for _ in [
args.work_dir,
args.dataset,
"cased" if args.cased else None,
] if _ is not None ])
args.work_dir = os.path.join( args.work_dir, "_".join([ _ for _ in [
args.learner,
"{}_online".format(args.online_buffer_strategy),
"{}_replay".format(args.replay_buffer_strategy),
"{:03g}databsz".format(args.batch_size),
"{:03g}obsz".format(args.online_batch_size),
"{:03g}rbsz".format(args.replay_batch_size),
"{:02g}opusize".format(args.online_per_user_rbsize),
"{:02g}rpusize".format(args.replay_per_user_rbsize),
"{:02g}maxk".format(args.max_k_steps),
"{}".format(args.mtl_type) if args.model_class.startswith("MTL") else None,
"allowZeroStep" if args.allow_zero_step else None,
"fromPretrained" if args.init_weights is not None else None,
args.postfix,
] if _ is not None ]),
)
args.work_dir = os.path.join( args.work_dir, "_".join([ _ for _ in [
args.model_class,
"{}".format(args.mtl_type) if args.model_class.startswith("MTL") else None,
"maxlen{:03d}".format(args.max_seqlen),
"lr{:.4g}".format(args.lr),
"time{}".format(time.strftime("%Y%m%d_%H%M%S"))
] if _ is not None ]),
)
return args
def _command_line_parser():
parser = argparse.ArgumentParser(parents=[get_basic_parser()])
parser.add_argument("--dataset_path", type=str, default="data/Firehose10M",
help="location of the data corpus")
parser.add_argument("--dataset", type=str, default="Firehose10M",
help="dataset name")
parser.add_argument("--cased", default=False, action="store_true",
help="use cased or uncased corpus")
parser.add_argument("--vocab_file", type=str, required=True,
help="vocabulary")
# replay buffer configs
parser.add_argument("--online_batch_size", type=int, default=128,
help="online batch size")
parser.add_argument("--online_per_user_rbsize", type=int, default=1,
help="per user online memory buffer size")
parser.add_argument("--replay_batch_size", type=int, default=128,
help="replay batch size")
parser.add_argument("--replay_per_user_rbsize", type=int, default=8,
help="per user replay memory buffer size")
parser.add_argument("--online_buffer_strategy", type=str, default="greedy",
help="online cache strategy (default: greedy)",
choices=["greedy", "reservoir", "stratified", "stratified-reservoir"])
parser.add_argument("--replay_buffer_strategy", type=str, default="greedy",
help="replay buffer strategy (default: greedy)",
choices=["greedy", "reservoir", "stratified", "stratified-reservoir"])
parser.add_argument("--allow_zero_step", action="store_false",
help="whether allow the minimum number of gradient steps to be zero in ConGraD.")
parser.add_argument("--max_k_steps", type=int, default=1,
help="the maximum number of gradient steps per online data chunk.")
parser.add_argument("--learner", type=str, default="OnlineOnly",
help="type of online learning algorithms",
choices=["AGEM", "OnlineOnly", "ReplayOnly", "MixedReplay",
"ConGraD_AGEM", "ConGraD_OnlineOnly", "ConGraD_ReplayOnly",
"ConGraD_MixedReplay"])
return parser
if __name__ == "__main__":
parser = _command_line_parser()
args = parser.parse_args()
args = postprocess_args(args)
corpus = get_lm_corpus(args.dataset_path, args.dataset, args.vocab_file, args.cased)
# Use the actual number of tokens from dictionary
ntokens = len(corpus.vocab)
args.n_token = ntokens
trainer = OnlineTrainer(
args,
corpus,
)
epoch = 0
done = trainer.train(epoch)
# remove epoch snapshot to save memory
trainer.save_snapshot(-1)
val_token_loss, val_word_loss = batch_evaluate(trainer.test_data, trainer.model, trainer.args)
print("* Final Model Ends at Epoch #{}".format(epoch+1))
print("| val token/word ppl {:9.3f} / {:9.3f} ".format(math.exp(val_token_loss), math.exp(val_word_loss)))
| 47.864662 | 113 | 0.56409 | 721 | 6,366 | 4.768377 | 0.273232 | 0.034031 | 0.064282 | 0.024433 | 0.242292 | 0.197208 | 0.162304 | 0.115765 | 0.115765 | 0.115765 | 0 | 0.009144 | 0.330035 | 6,366 | 132 | 114 | 48.227273 | 0.796952 | 0.033302 | 0 | 0.072072 | 0 | 0 | 0.189818 | 0.015615 | 0 | 0 | 0 | 0.007576 | 0.009009 | 1 | 0.018018 | false | 0 | 0.135135 | 0 | 0.171171 | 0.018018 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4d9bf8f6d4100f5fbe1040cb0ba4a581a667576 | 2,589 | py | Python | nncf/common/quantization/quantizers.py | xiao1228/nncf | 307262119ee3f50eec2fa4022b2ef96693fd8448 | [
"Apache-2.0"
] | null | null | null | nncf/common/quantization/quantizers.py | xiao1228/nncf | 307262119ee3f50eec2fa4022b2ef96693fd8448 | [
"Apache-2.0"
] | null | null | null | nncf/common/quantization/quantizers.py | xiao1228/nncf | 307262119ee3f50eec2fa4022b2ef96693fd8448 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple
def calculate_symmetric_level_ranges(
num_bits: int,
signed: bool,
narrow_range: bool = False) -> Tuple[int, int, int]:
"""
Calculates the numbers of the low and high quant and the number of
quantization levels for the symmetric quantization scheme.
:param num_bits: The bitwidth of the quantization.
:param signed: The flag specifying type of the symmetric quantization scheme
if it is True then the symmetric quantization scheme is the signed and
the un-signed otherwise.
:param narrow_range: The flag specifying quantization range if it is True
then [1; 2^num_bits - 1] and [0; 2^num_bits - 1] otherwise.
:return: A Tuple
level_low - the low quant number
level_high - the high quant number
levels - the number of quantization levels
"""
levels = 2 ** num_bits
if signed:
level_high = (levels // 2) - 1
level_low = -(levels // 2)
else:
level_high = levels - 1
level_low = 0
if narrow_range:
level_low = level_low + 1
levels = levels - 1
return level_low, level_high, levels
def calculate_asymmetric_level_ranges(
num_bits: int,
narrow_range: bool = False) -> Tuple[int, int, int]:
"""
Calculates the numbers of the low and high quant and the number of
quantization levels for the asymmetric quantization scheme.
:param num_bits: The bitwidth of the quantization
:param narrow_range: The flag specifying quantization range if it is True
then [1; 2^num_bits - 1] and [0; 2^num_bits - 1] otherwise
:return: A Tuple
level_low - the low quant number
level_high - the high quant number
levels - the number of quantization levels
"""
levels = 2 ** num_bits
level_high = levels - 1
level_low = 0
if narrow_range:
level_low = level_low + 1
levels = levels - 1
return level_low, level_high, levels
| 34.065789 | 80 | 0.679413 | 373 | 2,589 | 4.608579 | 0.281501 | 0.051193 | 0.027923 | 0.053519 | 0.589296 | 0.556719 | 0.556719 | 0.556719 | 0.556719 | 0.556719 | 0 | 0.017214 | 0.25956 | 2,589 | 75 | 81 | 34.52 | 0.879499 | 0.637698 | 0 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.038462 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4e075d99493283f713d77ff2a9998ebc61bca27 | 6,905 | py | Python | crawler/bot.py | danhorsley/my_fx_bot | fc62a9c6c8a596546d028bedd0ada5769038ca93 | [
"MIT"
] | null | null | null | crawler/bot.py | danhorsley/my_fx_bot | fc62a9c6c8a596546d028bedd0ada5769038ca93 | [
"MIT"
] | 1 | 2021-06-02T00:37:56.000Z | 2021-06-02T00:37:56.000Z | crawler/bot.py | danhorsley/my_fx_bot | fc62a9c6c8a596546d028bedd0ada5769038ca93 | [
"MIT"
] | null | null | null | #import pandas as pd
import numpy as np
import random
from tqdm import tqdm
#from sklearn.linear_model import LinearRegression
#from pandas.core.common import SettingWithCopyWarning
#import warnings
#from .dbtonumpy import eurusd_prices
#warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
from datetime import datetime, timedelta
import datetime as dt
start_date = dt.date.today()
y = dt.timedelta(days=1*365)
end_date = start_date + y
nb_paths = 10
initial_price = 1.10
def r2_score_and_slope(y):
"""takes numpy array of prices and returns r2 score, slope and constant"""
y = np.array(y)
x = np.vstack([list(range(len(y))),np.ones(len(y))]).T
m, c = np.linalg.lstsq(x, y, rcond=None)[0]
y_hat = [(xx*m + c) for xx in list(range(len(y)))]
y_bar = np.sum(y)/len(y)
ssreg = np.sum((y_hat-y_bar)**2)
sstot = np.sum((y - y_bar)**2)
r_2 = ssreg / sstot
return r_2, m, c
import datetime as dt
def monte_carlo(arr, n_days=500, paths=100,detrend=True,starting_point = 1.1):
"""Monte carlo simulation for date range - start date and end date
n is number of simualations
detrend will take trend out of data - i.e. absolute all values and assign + or - to returns
with 50/50 probability"""
if detrend:
ss = np.absolute(arr.reshape(1,-1))
ones = np.random.choice([-1,1],len(arr))
ss = ss * ones
sampled_returns = np.random.choice(ss[0], size=(n_days, paths)) + 1
#print(sampled_returns)
else:
sampled_returns = np.random.choice(array.reshape(1,-1)[0], size=(n_days, paths)) + 1
date_list = [(datetime.today() + timedelta(days = i)) for i in range(n_days)]
cum_returns = np.cumprod(sampled_returns,axis=0) * starting_point
#df_price = pd.DataFrame(cum_returns, index = date_list)
return [date_list,cum_returns]
def p_and_l_np(arr, all_trades):
arr = np.array(arr)
trades = np.array(all_trades)
current_position = np.cumsum(trades)
pos_value = arr * current_position
cost = -arr*trades
p_and_l = (pos_value + np.cumsum(cost))/(arr)
return p_and_l, current_position
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def mean_reversion_np(arr,pda=50,devs=1,window=20):
arr = np.array(arr)
max_window = max(pda,window)
std_rolling = np.std(rolling_window(arr, pda), 1)
mov_av = np.mean(rolling_window(arr, window), 1)
devs_away = np.where(abs(mov_av[max(0,pda-window):]-arr[max_window-1:])>=std_rolling[max(0,window-pda):]*devs,1,0)
b_or_s = np.where(mov_av[max(0,pda-window):]-arr[max_window-1:]>=0,1,-1)
action = b_or_s * devs_away
action_shift = action[1:]
mr_trade = np.append(action[0], action_shift - action[:-1])
#return np.append(np.zeros(pda-1),mr_trade)
return mr_trade
class trading_rules:
"""class to hold trading rules for bot"""
def __init__(self,portfolio_size = 1000000 , trade_increment = 100000,
stop_loss = -5, stop_profit = 10,
trend_follow1=10, trend_follow2=30, trend_follow3=50,tlev = 1,
mean_revert=False, mean_revert_inc = 0.5, trend_score = 0.8,):
self.ps = portfolio_size
self.ti = trade_increment
self.sl = stop_loss
self.sp = stop_profit
self.tf1 = trend_follow1
self.tf2 = trend_follow2
self.tf3 = trend_follow3
self.tlev = tlev
self.mr = mean_revert
self.mri = mean_revert_inc
self.ts = trend_score
self.rsl = 0 #rolling stop loss
def trend_finder(self,rg):
"""rg is slice of the close prices"""
#col_name = rg.columns[0]
slices = []
for period in [self.tf1,self.tf2,self.tf3]:
if period != 0:
slices.append(rg[-period:])
correl = []
coeff = []
for sl in slices:
y = np.array(sl)
scr, m, c = r2_score_and_slope(y)
correl.append(scr)
coeff.append(m)
#print(correl, coeff)
return correl,coeff
def trade_generator(self,test_monte_so_far,t_so_far):#,t_rules = trading_rules()):
"""generates trades given rules for bot"""
#frame = p_and_l(test_monte_so_far,t_so_far)
p_and_l, cur_pos = p_and_l_np(test_monte_so_far,t_so_far)
new_trade = 0
#finding trend conditions
trend_scores = self.trend_finder(test_monte_so_far)#, s = t_rules.tf1, m = t_rules.tf2, l = t_rules.tf3)
is_trend = np.where(np.array(trend_scores[0])>self.ts,1,0)
r2_condition = is_trend.sum()
#print(r2_condition)
coeff_dot = np.dot(np.array(trend_scores[1]), is_trend)
direction = np.sign(np.dot(np.array(trend_scores[1]), is_trend))
#stop loss or stop profit
if p_and_l[-1] > self.ps * self.sp * 0.01 + self.rsl\
or p_and_l[-1] < self.ps * self.sl * 0.01 + self.rsl:
new_trade = -cur_pos[-1]
self.rsl = self.rsl + p_and_l[-1]
#trend trades - check to see that you don't exceed portfolio size
elif r2_condition == 1 and abs(cur_pos[-1] + direction*self.ti)<self.ps*self.tlev:
new_trade = np.sign(np.dot(np.array(trend_scores[1]), is_trend))*self.ti
elif r2_condition >= 2 and abs(cur_pos[-1] + direction*self.ti)<self.ps*self.tlev:
if abs(cur_pos[-1] + 2*direction*self.ti) <= self.ps:
new_trade = 2*direction*self.ti
else:
new_trade = direction*self.ti
return new_trade
def run_bot_over_montes(self, monte_group, pda = 50):#, tr = trading_rules()):
"""generates positions and p&ls for bot over different scenarios
pda is the initial data before you start runnign the scenario"""
trade_histories = []
for j in tqdm(range(len(monte_group[1][0]))):
self.rsl = 0
#monte = monte_group[[monte_group.columns[j]]].copy()
monte = monte_group[1][:,j]
#print(monte.shape)
#monte = make_reversion_columns(monte)
mr_trade = mean_reversion_np(monte,pda=pda) * self.mri*self.ps
no_trades = [0 for x in range(pda)]
for i in range(len(monte)-pda):
new_trade = self.trade_generator(monte[:pda+i],no_trades)
#adding mean reversion here to try and speed up
#mr_trade = monte['mr_trade'][pda+i] * self.mri*self.ps
new_trade = new_trade + mr_trade[i]
no_trades.append(new_trade)
trade_history = p_and_l_np(monte,no_trades)
trade_histories.append(trade_history)
return trade_histories | 39.232955 | 116 | 0.623172 | 1,058 | 6,905 | 3.873346 | 0.236295 | 0.009761 | 0.012201 | 0.013665 | 0.118106 | 0.091508 | 0.083699 | 0.061249 | 0.061249 | 0.053685 | 0 | 0.028106 | 0.25286 | 6,905 | 176 | 117 | 39.232955 | 0.766234 | 0.204634 | 0 | 0.067227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07563 | false | 0 | 0.05042 | 0 | 0.201681 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4e1de52882ddfbf3559c816d3bb41dcee39e97e | 3,824 | py | Python | test/test_stream.py | pkch/executors | 326677ab98de374314bfa76e75624a705c34bdda | [
"MIT"
] | 1 | 2017-07-17T14:11:18.000Z | 2017-07-17T14:11:18.000Z | test/test_stream.py | pkch/executors | 326677ab98de374314bfa76e75624a705c34bdda | [
"MIT"
] | 3 | 2017-05-29T10:24:36.000Z | 2017-05-30T09:20:11.000Z | test/test_stream.py | pkch/executors | 326677ab98de374314bfa76e75624a705c34bdda | [
"MIT"
] | 1 | 2020-11-21T18:53:52.000Z | 2020-11-21T18:53:52.000Z | from itertools import islice, count
from functools import partial
import time
import os
import pytest
from streamexecutors import StreamThreadPoolExecutor, StreamProcessPoolExecutor
approx = partial(pytest.approx, abs=0.5)
test_classes = [StreamThreadPoolExecutor, StreamProcessPoolExecutor]
# pytest bug with skipif(sys.platform != 'win32'): https://github.com/pytest-dev/pytest/issues/1296
test_classes_timing = [StreamThreadPoolExecutor]
class Timer:
def __enter__(self):
self.start = time.perf_counter()
return self
def elapsed(self):
return time.perf_counter() - self.start
def print(self):
print('{:.2f} sec'.format(self.elapsed()))
def __exit__(self, *args):
self.print()
def produce(n=None, error=None):
for i in count():
if i == n:
break
if i == error:
raise ValueError
time.sleep(0.2)
yield i
def process(i):
s = time.perf_counter()
time.sleep(0.1)
return i + 1
@pytest.mark.parametrize("test_class", test_classes)
def test_unused_generator(test_class):
# Testing for deadlocks observed earlier
executor = test_class(max_workers=2)
gen = produce()
executor.map(process, gen, buffer_size=10)
# Delay to reproduce deadlock observed earlier
# and to allow gc to collect result of map
time.sleep(0.2)
last_processed = None
gen = produce()
executor.map(process, gen, buffer_size=10)
last_processed = None
gen = produce()
executor.map(process, gen, buffer_size=1)
last_processed = None
gen = produce()
with test_class(max_workers=2) as executor:
executor.map(process, gen, buffer_size=10)
@pytest.mark.parametrize("test_class", test_classes)
def test_error(test_class):
with test_class(max_workers=2) as executor:
g = executor.map(process, produce(error=2))
with pytest.raises(ValueError):
list(g)
input_size = 10
is_odd = lambda x: x%2
@pytest.mark.parametrize("test_class", test_classes_timing)
def test_timing_2_workers(test_class):
with Timer() as t:
# test_class.map takes 0.1 * 20 / 2 = 1 sec
# starts processing here, without waiting for iteration
executor = test_class(max_workers=2)
m = executor.map(process, count())
g = islice(filter(is_odd, m), input_size)
assert t.elapsed() == approx(0)
time.sleep(0.5)
assert list(g) == list(range(1, 2*input_size, 2))
assert t.elapsed() == approx(1)
@pytest.mark.parametrize("test_class", test_classes_timing)
def test_timing_10_workers(test_class):
executor = test_class(max_workers=10)
with Timer() as t:
print(list(islice(filter(None, executor.map(process, count())), input_size)))
if test_class == StreamThreadPoolExecutor:
assert t.elapsed() == approx(0.1)
with Timer() as t:
it = islice(filter(None, executor.map(process, produce())), input_size)
for x in it:
if test_class == StreamThreadPoolExecutor:
t.elapsed() == approx(0.3)
break
for x in it:
pass
assert t.elapsed() == approx(2.2)
with Timer() as t:
it = islice(filter(None, executor.map(process, produce())), input_size)
time.sleep(3)
for x in it:
break
for x in it:
pass
assert t.elapsed() == approx(3)
# Imitate abnormal main thread exit
@pytest.mark.xfail
@pytest.mark.parametrize("test_class", test_classes)
def test_abnormal_termination(test_class):
executor = test_class(max_workers=2)
m = executor.map(process, count())
raise RuntimeError()
| 30.592 | 100 | 0.631799 | 493 | 3,824 | 4.754564 | 0.25355 | 0.072952 | 0.076792 | 0.048635 | 0.430887 | 0.401451 | 0.375 | 0.341724 | 0.31186 | 0.231229 | 0 | 0.020509 | 0.26046 | 3,824 | 124 | 101 | 30.83871 | 0.808345 | 0.09205 | 0 | 0.431579 | 0 | 0 | 0.017964 | 0 | 0 | 0 | 0 | 0 | 0.063158 | 1 | 0.115789 | false | 0.021053 | 0.063158 | 0.010526 | 0.221053 | 0.042105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4e324cf93a35e2065c6896d6b9be38a85fb0fe0 | 4,463 | py | Python | src/remote_pdb.py | MatthewWilkes/python-remote-pdb | ce1477a565b1b3cb42a018900c36eb17891b8a53 | [
"BSD-2-Clause"
] | null | null | null | src/remote_pdb.py | MatthewWilkes/python-remote-pdb | ce1477a565b1b3cb42a018900c36eb17891b8a53 | [
"BSD-2-Clause"
] | null | null | null | src/remote_pdb.py | MatthewWilkes/python-remote-pdb | ce1477a565b1b3cb42a018900c36eb17891b8a53 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import errno
import logging
import re
import socket
import sys
from pdb import Pdb
__version__ = "1.2.0"
PY3 = sys.version_info[0] == 3
def cry(message, stderr=sys.__stderr__):
logging.critical(message)
print(message, file=stderr)
stderr.flush()
class LF2CRLF_FileWrapper(object):
def __init__(self, fh, write_override=None):
self.stream = fh
self.read = fh.read
self.readline = fh.readline
self.readlines = fh.readlines
self.close = fh.close
self.flush = fh.flush
self.fileno = fh.fileno
self.write_override = write_override
@property
def encoding(self):
return self.stream.encoding
def __iter__(self):
return self.stream.__iter__()
def write(self, data, nl_rex=re.compile("\r?\n")):
data = nl_rex.sub("\r\n", data)
if self.write_override:
self.write_override(data)
else:
self._stream.write(data)
# we have to explicitly flush, and unfortunately we cannot just disable buffering because on Python 3 text
# streams line buffering seems the minimum and on Windows line buffering doesn't work properly because we
# write unix-style line endings
self.stream.flush()
def writelines(self, lines, nl_rex=re.compile("\r?\n")):
for line in lines:
self.write(line, nl_rex)
class RemotePdb(Pdb):
"""
This will run pdb as a ephemeral telnet service. Once you connect no one
else can connect. On construction this object will block execution till a
client has connected.
Based on https://github.com/tamentis/rpdb I think ...
To use this::
RemotePdb(host='0.0.0.0', port=4444).set_trace()
Then run: telnet 127.0.0.1 4444
"""
active_instance = None
def __init__(self, host, port, patch_stdstreams=False):
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
listen_socket.bind((host, port))
cry("RemotePdb session open at %s:%s, waiting for connection ..." % listen_socket.getsockname())
listen_socket.listen(1)
connection, address = listen_socket.accept()
cry("RemotePdb accepted connection from %s." % repr(address))
if PY3:
# Some versions of Python 3.6, 3.7 and 3.8 have errors with makefile in rw mode
# This redirects the write calls to the underlying socket as a workaround
# See https://bugs.python.org/issue35928 for tracking of the fix
filelike = connection.makefile('r')
def write_override(data):
data = data.encode(filelike.encoding)
connection.send(data)
self.handle = LF2CRLF_FileWrapper(filelike, write_override=write_override)
else:
self.handle = LF2CRLF_FileWrapper(connection.makefile())
Pdb.__init__(self, completekey='tab', stdin=self.handle, stdout=self.handle)
self.backup = []
if patch_stdstreams:
for name in (
'stderr',
'stdout',
'__stderr__',
'__stdout__',
'stdin',
'__stdin__',
):
self.backup.append((name, getattr(sys, name)))
setattr(sys, name, self.handle)
RemotePdb.active_instance = self
def __restore(self):
if self.backup:
cry('Restoring streams: %s ...' % self.backup)
for name, fh in self.backup:
setattr(sys, name, fh)
self.handle.close()
RemotePdb.active_instance = None
def do_quit(self, arg):
self.__restore()
self.set_quit()
return 1
do_q = do_exit = do_quit
def set_trace(self, frame=None):
if frame is None:
frame = sys._getframe().f_back
try:
Pdb.set_trace(self, frame)
except IOError as exc:
if exc.errno != errno.ECONNRESET:
raise
def set_quit(self):
sys.settrace(None)
def set_trace(host='127.0.0.1', port=0, patch_stdstreams=False):
"""
Opens a remote PDB on first available port.
"""
rdb = RemotePdb(host=host, port=port, patch_stdstreams=patch_stdstreams)
rdb.set_trace(frame=sys._getframe().f_back)
| 32.107914 | 118 | 0.611696 | 561 | 4,463 | 4.68984 | 0.368984 | 0.039529 | 0.019384 | 0.019764 | 0.028126 | 0.012163 | 0 | 0 | 0 | 0 | 0 | 0.015462 | 0.28994 | 4,463 | 138 | 119 | 32.34058 | 0.814768 | 0.184181 | 0 | 0.021053 | 0 | 0 | 0.05585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136842 | false | 0 | 0.073684 | 0.021053 | 0.294737 | 0.021053 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4e410c12154b75ec6c1f9e175b28611de1ac1b9 | 9,258 | py | Python | dataproxy/__init__.py | peerplays-network/bos-dataproxy | ff19ce97981a10d8ff8d6ad3ed6afe7b4cdd42fc | [
"MIT"
] | 6 | 2019-12-05T18:37:33.000Z | 2019-12-20T17:58:32.000Z | dataproxy/__init__.py | peerplays-network/bos-dataproxy | ff19ce97981a10d8ff8d6ad3ed6afe7b4cdd42fc | [
"MIT"
] | 2 | 2019-08-06T10:40:45.000Z | 2020-02-21T14:14:12.000Z | dataproxy/__init__.py | peerplays-network/bos-dataproxy | ff19ce97981a10d8ff8d6ad3ed6afe7b4cdd42fc | [
"MIT"
] | 1 | 2019-07-01T13:25:15.000Z | 2019-07-01T13:25:15.000Z | import os
import yaml
import io
from copy import deepcopy
import logging
import collections
from logging.handlers import TimedRotatingFileHandler
from bookiesports.normalize import IncidentsNormalizer
def get_version():
try:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'VERSION')) as version_file:
return version_file.read().strip()
except FileNotFoundError:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", 'VERSION')) as version_file:
return version_file.read().strip()
__VERSION__ = get_version()
class Config(dict):
""" This class allows us to load the configuration from a YAML encoded
configuration file.
"""
ERRORS = {
}
data = None
source = None
@staticmethod
def load(config_files=[], relative_location=False):
""" Load config from a file
:param str file_name: (defaults to 'config.yaml') File name and
path to load config from
"""
if not Config.data:
Config.data = {}
if not config_files:
raise Exception("Trying to load config without target files")
if type(config_files) == str:
config_files = [config_files]
for config_file in config_files:
if relative_location:
file_path = config_file
else:
file_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
config_file
)
stream = io.open(file_path, 'r', encoding='utf-8')
with stream:
Config.data = Config._nested_update(Config.data, yaml.load(stream))
Config.source = ";".join(config_files)
@staticmethod
def get_config(config_name=None):
""" Static method that returns the configuration as dictionary.
Usage:
.. code-block:: python
Config.get_config()
"""
if not config_name:
if not Config.data:
raise Exception("Either preload the configuration or specify config_name!")
else:
if not Config.data:
Config.data = {}
Config.load(config_name)
return deepcopy(Config.data)
@staticmethod
def get(*args, **kwargs):
"""
This config getter method allows sophisticated and encapsulated access to the config file, while
being able to define defaults in-code where necessary.
:param args: key to retrieve from config, nested in order. if the last is not a string it is assumed to be the default, but giving default keyword is then forbidden
:type tuple of strings, last can be object
:param message: message to be displayed when not found, defaults to entry in ERRORS dict with the
key defined by the desired config keys in args (key1.key2.key2). For example
Config.get("foo", "bar") will attempt to retrieve config["foo"]["bar"], and if
not found raise an exception with ERRORS["foo.bar"] message
:type message: string
:param default: default value if not found in config
:type default: object
"""
default_given = "default" in kwargs
default = kwargs.pop("default", None)
message = kwargs.pop("message", None)
# check if last in args is default value
if type(args[len(args) - 1]) != str:
if default_given:
raise KeyError("There can only be one default set. Either use default=value or add non-string values as last positioned argument!")
default = args[len(args) - 1]
default_given = True
args = args[0:len(args) - 1]
try:
nested = Config.data
for key in args:
if type(key) == str:
nested = nested[key]
else:
raise KeyError("The given key " + str(key) + " is not valid.")
if nested is None:
raise KeyError()
except KeyError:
lookup_key = '.'.join(str(i) for i in args)
if not message:
if Config.ERRORS.get(lookup_key):
message = Config.ERRORS[lookup_key]
else:
message = "Configuration key {0} not found in {1}!"
message = message.format(lookup_key, Config.source)
if default_given:
logging.getLogger(__name__).debug(message + " Using given default value.")
return default
else:
raise KeyError(message)
# filter out empty lists
if type(nested) == list and len(nested) == 1 and nested[0] is None:
nested = None
return nested
@staticmethod
def reset():
""" Static method to reset the configuration storage
"""
Config.data = None
Config.source = None
@staticmethod
def _nested_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = Config._nested_update(d.get(k, {}), v)
else:
if d:
d[k] = v
else:
d = {}
d[k] = v
return d
def set_global_logger(existing_loggers=None, config_file_name=None):
print("Setting up logger handling for dataproxy...")
# setup logging
# ... log to file system
log_folder = os.path.join(Config.get("dump_folder", default="dump"), Config.get("logs", "folder", default="logs"))
log_level = logging.getLevelName(Config.get("logs", "level", default="INFO"))
os.makedirs(log_folder, exist_ok=True)
log_format = (Config.get("logs", "format", default="%(asctime)s %(levelname) -10s %(name)s: %(message)s"))
if config_file_name is None:
config_file_name = Config.get("logs", "file", default="dataproxy.log")
trfh = TimedRotatingFileHandler(
os.path.join(log_folder, config_file_name),
"midnight",
1
)
trfh.suffix = "%Y-%m-%d"
trfh.setFormatter(logging.Formatter(log_format))
trfh.setLevel(log_level)
# ... and to console
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(log_format))
sh.setLevel(log_level)
# global config (e.g. for werkzeug)
logging.basicConfig(level=log_level,
format=log_format,
handlers=[trfh, sh])
use_handlers = [trfh, sh]
if existing_loggers is not None:
if not type(existing_loggers) == list:
existing_loggers = [existing_loggers]
for logger in existing_loggers:
logger.setLevel(log_level)
while len(logger.handlers) > 0:
logger.removeHandler(logger.handlers[0])
for handler in use_handlers:
logger.addHandler(handler)
print("... done")
return use_handlers
def on_startup():
if Config.data and Config.data.get("subscribed_witnesses", None) is not None:
raise Exception("Please update your config.yaml to match the new format, subscribed_witnesses is outdated")
Config.get("subscriptions", "mask_providers")
try:
IncidentsNormalizer.use_chain(Config.get("bookiesports_chain", default="beatrice"),
not_found_file=os.path.join(Config.get("dump_folder"), "missing_bookiesports_entries.txt"))
except AttributeError:
IncidentsNormalizer.DEFAULT_CHAIN = Config.get("bookiesports_chain", default="beatrice")
IncidentsNormalizer.NOT_FOUND_FILE = os.path.join(Config.get("dump_folder"), "missing_bookiesports_entries.txt")
logging.getLogger(__name__).debug("Incidents normalizer set for chain " + IncidentsNormalizer.DEFAULT_CHAIN + ", using " + str(IncidentsNormalizer.NOT_FOUND_FILE) + " for missing entries")
providers = list(Config.get("providers", default={}).keys())
for key in providers:
# check and load optional provider configs
_config_file = Config.get("providers", key).get("config_file", None)
if _config_file is None:
_config_file = "config-" + key + ".yaml"
else:
_config_file = _config_file + ".yaml"
if os.path.isfile(_config_file):
Config.load(_config_file, True)
if not Config.data:
Config.load("config-defaults.yaml")
notify = False
try:
# overwrites defaults
Config.load("config-dataproxy.yaml", True)
notify = True
except FileNotFoundError:
pass
try:
# overwrites defaults
Config.load("../config-dataproxy.yaml", True)
notify = True
except FileNotFoundError:
pass
set_global_logger()
on_startup()
if notify:
# don't use utils here due to import loop
logging.getLogger(__name__).info("Custom config has been loaded from working directory: " + Config.source)
else:
raise Exception("No custom config has been found in working directory (filename should be config-dataproxy.yaml)")
| 36.023346 | 196 | 0.600562 | 1,085 | 9,258 | 4.993548 | 0.242396 | 0.029531 | 0.01292 | 0.011074 | 0.155962 | 0.133629 | 0.1244 | 0.102067 | 0.102067 | 0.102067 | 0 | 0.002633 | 0.302549 | 9,258 | 256 | 197 | 36.164063 | 0.836457 | 0.16159 | 0 | 0.215909 | 0 | 0.005682 | 0.150916 | 0.017388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.011364 | 0.045455 | 0 | 0.153409 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4e764ff085936858ce0218d82e10f1f4328559c | 1,125 | py | Python | python_4/sun.py | SPbAU-ProgrammingParadigms/materials | 447bc7639c218cf5ee869d461e35998e1a0e02e5 | [
"Unlicense"
] | null | null | null | python_4/sun.py | SPbAU-ProgrammingParadigms/materials | 447bc7639c218cf5ee869d461e35998e1a0e02e5 | [
"Unlicense"
] | null | null | null | python_4/sun.py | SPbAU-ProgrammingParadigms/materials | 447bc7639c218cf5ee869d461e35998e1a0e02e5 | [
"Unlicense"
] | 7 | 2017-09-02T17:09:46.000Z | 2021-01-10T09:53:56.000Z | #!/usr/bin/env python3
class Runtime:
pass
class Singleton(Runtime):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
class StringBuilder:
def __init__(self, encoding='utf-8'):
self.buf = bytearray()
self.encoding = encoding
def add(self, s):
self.buf += bytes(s.encode(self.encoding))
return self
def whitespace(self):
return self.add(' ')
def newline(self):
return self.add('\n')
def build(self):
return self.buf.decode(self.encoding)
if __name__ == '__main__':
# runtime
s1 = Singleton()
s2 = Singleton()
if id(s1) == id(s2):
print("Same")
else:
print("Different")
# summary
# https://www.python.org/doc/essays/list2str/
sb = StringBuilder()
result = (sb.add('hello,')
.whitespace()
.add('world')
.add('!')
.newline()
.build())
sb.build()
print(result)
| 20.454545 | 65 | 0.535111 | 122 | 1,125 | 4.737705 | 0.467213 | 0.083045 | 0.072664 | 0.055363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009162 | 0.320889 | 1,125 | 54 | 66 | 20.833333 | 0.747382 | 0.072 | 0 | 0 | 0 | 0 | 0.039423 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162162 | false | 0.027027 | 0 | 0.081081 | 0.405405 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ea4abbb674908b308a29f2c1088bdba4b29c37 | 1,587 | py | Python | tests/test_zip.py | shengqh/bamsnap | 4815c618011b2a1f2ab0d9e6418e39dbd292239b | [
"MIT"
] | 84 | 2020-01-09T11:12:52.000Z | 2022-03-05T00:15:55.000Z | tests/test_zip.py | shengqh/bamsnap | 4815c618011b2a1f2ab0d9e6418e39dbd292239b | [
"MIT"
] | 23 | 2020-08-24T14:28:06.000Z | 2021-11-27T16:42:40.000Z | tests/test_zip.py | shengqh/bamsnap | 4815c618011b2a1f2ab0d9e6418e39dbd292239b | [
"MIT"
] | 14 | 2020-08-28T16:55:21.000Z | 2021-12-01T20:20:50.000Z | import sys
import shlex
sys.path.append('..')
bamsnap_prog = "src/bamsnap.py"
from src import bamsnap
# import bamsnap
# bamsnap_prog = "bamsnap"
cmdlist = []
cmdlist.append("""
-bam ./data/test_SV1_softclipped_1.bam \
-title "Clipped read" \
-pos chr1:37775740 chr1:37775780 chr1:37775783 chr1:37775785 chr1:37775789 \
-out ./out/test_SV1-7_proc1 \
-bamplot coverage read \
-margin 100 \
-no_target_line \
-show_soft_clipped \
-read_color_by interchrom \
-zipout \
-save_image_only
""")
cmdlist.append("""
-bam ./data/test_SV1_softclipped_1.bam \
-title "Clipped read" \
-pos chr1:37775740 chr1:37775780 chr1:37775783 chr1:37775785 chr1:37775789 \
-out ./out/test_SV1-7_proc2 \
-bamplot coverage read \
-margin 100 \
-no_target_line \
-show_soft_clipped \
-read_color_by interchrom \
-zipout \
-process 2 \
-save_image_only
""")
def getopt(target_option):
flag = False
value = ""
for opt1 in sys.argv:
if flag:
if opt1[0] == '-':
break
else:
value += ' ' + opt1
if opt1 == target_option:
flag = True
return value.strip()
def test_run():
for cmd in cmdlist:
# cmd = cmdlist[-1]
cmd = bamsnap_prog + " " + cmd.strip()
sys.argv = shlex.split(cmd)
print(' '.join(sys.argv))
# print(cmd)
bamsnap.cli()
out = getopt('-out')
assert bamsnap.util.is_exist(out + '.zip') == True
if __name__ == "__main__":
test_run()
| 21.16 | 80 | 0.587902 | 194 | 1,587 | 4.592784 | 0.407216 | 0.031425 | 0.035915 | 0.044893 | 0.487093 | 0.487093 | 0.487093 | 0.487093 | 0.487093 | 0.487093 | 0 | 0.099472 | 0.284184 | 1,587 | 74 | 81 | 21.445946 | 0.684859 | 0.042848 | 0 | 0.436364 | 0 | 0 | 0.497358 | 0.072655 | 0 | 0 | 0 | 0 | 0.018182 | 1 | 0.036364 | false | 0 | 0.054545 | 0 | 0.109091 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ebe3d8d5c147beec7d63a578314a1710325450 | 3,751 | py | Python | doc/source/rstprocess.py | jefalon/WindSE | bf7e0dbad85552b32327bda2b5c29a0fac5286bb | [
"Apache-2.0"
] | 35 | 2019-07-14T17:08:10.000Z | 2022-03-15T11:09:44.000Z | doc/source/rstprocess.py | jefalon/WindSE | bf7e0dbad85552b32327bda2b5c29a0fac5286bb | [
"Apache-2.0"
] | 45 | 2020-11-16T16:40:12.000Z | 2022-03-30T20:04:37.000Z | doc/source/rstprocess.py | jefalon/WindSE | bf7e0dbad85552b32327bda2b5c29a0fac5286bb | [
"Apache-2.0"
] | 21 | 2020-02-11T12:01:36.000Z | 2022-03-18T19:07:14.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2017 Garth N. Wells
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import shutil
# sys.path.append('../../../utils/pylit/')
try:
import pylit
except ImportError:
raise ImportError("Unable to import pylit module")
def process():
"""Copy demo rst files (C++ and Python) from the DOLFIN source tree
into the demo source tree, and process file with pylit
"""
# Check that we can find pylint.py for converting foo.py.rst to
# foo.py
pylit_parser = "pylit.py"
if os.path.isfile(pylit_parser):
pass
else:
raise RuntimeError("Cannot find pylit.py")
# Directories to scan
subdirs = ["../../demo/documented"]
# Iterate over subdirectories containing demos
for subdir in subdirs:
# Get list of demos (demo name , subdirectory)
demos = [(dI, os.path.join(subdir, dI)) for dI in os.listdir(subdir) if os.path.isdir(os.path.join(subdir, dI))]
# Iterate over demos
for demo, path in demos:
# Make demo doc directory
demo_dir = os.path.join('./demos/', demo)
if not os.path.exists(demo_dir):
os.makedirs(demo_dir)
#for f in rst_files_common:
# shutil.copy(os.path.join(path, f), demo_dir)
# Build list of rst and png files in demo source directory
rst_files = [f for f in os.listdir(path) if os.path.splitext(f)[1] == ".rst" ]
other_files = [f for f in os.listdir(path) if os.path.splitext(f)[1] in (".png", ".pdf", ".gif", ".py", ".gz", ".yaml", ".zip")]
# Create directory in documentation tree for demo
demo_dir = os.path.join('./demos/', demo)
if not os.path.exists(demo_dir):
os.makedirs(demo_dir)
# Copy .png and .py files into documentation demo directory
for f in other_files:
shutil.copy(os.path.join(path, f), demo_dir)
# # Copy input folders
# if "Input_Data" in os.listdir(path):
# input_path = os.path.join(path, "Input_Data")
# demo_input_dir = os.path.join(demo_dir, "Input_Data/")
# if not os.path.exists(demo_input_dir):
# os.makedirs(demo_input_dir)
# for f in os.listdir(input_path):
# shutil.copy(os.path.join(input_path, f), demo_input_dir)
# Copy rst files into documentation demo directory
for f in rst_files:
shutil.copy(os.path.join(path, f), demo_dir)
# Copy rst files into documentation demo directory and
# process with Pylit
for f in rst_files:
shutil.copy(os.path.join(path, f), demo_dir)
# Run pylit on py.rst files (files with 'double
# extensions')
if os.path.splitext(os.path.splitext(f)[0])[1] == ".py":
rst_file = os.path.join(demo_dir, f)
pylit.main([rst_file])
if __name__ == "__main__":
process()
| 36.417476 | 140 | 0.604639 | 528 | 3,751 | 4.210227 | 0.306818 | 0.05668 | 0.053981 | 0.035987 | 0.338731 | 0.2852 | 0.261359 | 0.230769 | 0.174989 | 0.160594 | 0 | 0.00375 | 0.28899 | 3,751 | 102 | 141 | 36.77451 | 0.829771 | 0.491336 | 0 | 0.305556 | 0 | 0 | 0.073553 | 0.011357 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0.027778 | 0.166667 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4ecc6c166b9223c2444d0ba856f875609a0d0b7 | 7,084 | py | Python | homeassistant/components/thermostat/homematic.py | magas0/home-assistant | 3c9e4934946ce99f5193ca550296034e86337997 | [
"MIT"
] | 1 | 2016-07-14T05:20:54.000Z | 2016-07-14T05:20:54.000Z | app/bower_components/home-assistant-dev/homeassistant/components/thermostat/homematic.py | EkoHub/CustomizableWalkThroughTourElement | 0a4ae793a1e031c9bd042b0e8ffef3be96b7c1b0 | [
"BSD-3-Clause"
] | null | null | null | app/bower_components/home-assistant-dev/homeassistant/components/thermostat/homematic.py | EkoHub/CustomizableWalkThroughTourElement | 0a4ae793a1e031c9bd042b0e8ffef3be96b7c1b0 | [
"BSD-3-Clause"
] | 1 | 2018-11-22T13:55:23.000Z | 2018-11-22T13:55:23.000Z | """
Support for Homematic (HM-TC-IT-WM-W-EU, HM-CC-RT-DN) thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/thermostat.homematic/
"""
import logging
import socket
from xmlrpc.client import ServerProxy
from xmlrpc.client import Error
from collections import namedtuple
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.temperature import convert
REQUIREMENTS = []
_LOGGER = logging.getLogger(__name__)
CONF_ADDRESS = 'address'
CONF_DEVICES = 'devices'
CONF_ID = 'id'
PROPERTY_SET_TEMPERATURE = 'SET_TEMPERATURE'
PROPERTY_VALVE_STATE = 'VALVE_STATE'
PROPERTY_ACTUAL_TEMPERATURE = 'ACTUAL_TEMPERATURE'
PROPERTY_BATTERY_STATE = 'BATTERY_STATE'
PROPERTY_LOWBAT = 'LOWBAT'
PROPERTY_CONTROL_MODE = 'CONTROL_MODE'
PROPERTY_BURST_MODE = 'BURST_RX'
TYPE_HM_THERMOSTAT = 'HOMEMATIC_THERMOSTAT'
TYPE_HM_WALLTHERMOSTAT = 'HOMEMATIC_WALLTHERMOSTAT'
TYPE_MAX_THERMOSTAT = 'MAX_THERMOSTAT'
HomematicConfig = namedtuple('HomematicConfig',
['device_type',
'platform_type',
'channel',
'maint_channel'])
HM_TYPE_MAPPING = {
'HM-CC-RT-DN': HomematicConfig('HM-CC-RT-DN',
TYPE_HM_THERMOSTAT,
4, 4),
'HM-CC-RT-DN-BoM': HomematicConfig('HM-CC-RT-DN-BoM',
TYPE_HM_THERMOSTAT,
4, 4),
'HM-TC-IT-WM-W-EU': HomematicConfig('HM-TC-IT-WM-W-EU',
TYPE_HM_WALLTHERMOSTAT,
2, 2),
'BC-RT-TRX-CyG': HomematicConfig('BC-RT-TRX-CyG',
TYPE_MAX_THERMOSTAT,
1, 0),
'BC-RT-TRX-CyG-2': HomematicConfig('BC-RT-TRX-CyG-2',
TYPE_MAX_THERMOSTAT,
1, 0),
'BC-RT-TRX-CyG-3': HomematicConfig('BC-RT-TRX-CyG-3',
TYPE_MAX_THERMOSTAT,
1, 0)
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Homematic thermostat."""
devices = []
try:
address = config[CONF_ADDRESS]
homegear = ServerProxy(address)
for name, device_cfg in config[CONF_DEVICES].items():
# get device description to detect the type
device_type = homegear.getDeviceDescription(
device_cfg[CONF_ID] + ':-1')['TYPE']
if device_type in HM_TYPE_MAPPING.keys():
devices.append(HomematicThermostat(
HM_TYPE_MAPPING[device_type],
address,
device_cfg[CONF_ID],
name))
else:
raise ValueError(
"Device Type '{}' currently not supported".format(
device_type))
except socket.error:
_LOGGER.exception("Connection error to homematic web service")
return False
add_devices(devices)
return True
# pylint: disable=too-many-instance-attributes
class HomematicThermostat(ThermostatDevice):
"""Representation of a Homematic thermostat."""
def __init__(self, hm_config, address, _id, name):
"""Initialize the thermostat."""
self._hm_config = hm_config
self.address = address
self._id = _id
self._name = name
self._full_device_name = '{}:{}'.format(self._id,
self._hm_config.channel)
self._maint_device_name = '{}:{}'.format(self._id,
self._hm_config.maint_channel)
self._current_temperature = None
self._target_temperature = None
self._valve = None
self._battery = None
self._mode = None
self.update()
@property
def name(self):
"""Return the name of the Homematic device."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, temperature):
"""Set new target temperature."""
device = ServerProxy(self.address)
device.setValue(self._full_device_name,
PROPERTY_SET_TEMPERATURE,
temperature)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {"valve": self._valve,
"battery": self._battery,
"mode": self._mode}
@property
def min_temp(self):
"""Return the minimum temperature - 4.5 means off."""
return convert(4.5, TEMP_CELSIUS, self.unit_of_measurement)
@property
def max_temp(self):
"""Return the maximum temperature - 30.5 means on."""
return convert(30.5, TEMP_CELSIUS, self.unit_of_measurement)
def update(self):
"""Update the data from the thermostat."""
try:
device = ServerProxy(self.address)
self._current_temperature = device.getValue(
self._full_device_name,
PROPERTY_ACTUAL_TEMPERATURE)
self._target_temperature = device.getValue(
self._full_device_name,
PROPERTY_SET_TEMPERATURE)
self._valve = device.getValue(
self._full_device_name,
PROPERTY_VALVE_STATE)
self._mode = device.getValue(
self._full_device_name,
PROPERTY_CONTROL_MODE)
if self._hm_config.platform_type in [TYPE_HM_THERMOSTAT,
TYPE_HM_WALLTHERMOSTAT]:
self._battery = device.getValue(self._maint_device_name,
PROPERTY_BATTERY_STATE)
elif self._hm_config.platform_type == TYPE_MAX_THERMOSTAT:
# emulate homematic battery voltage,
# max reports lowbat if voltage < 2.2V
# while homematic battery_state should
# be between 1.5V and 4.6V
lowbat = device.getValue(self._maint_device_name,
PROPERTY_LOWBAT)
if lowbat:
self._battery = 1.5
else:
self._battery = 4.6
except Error:
_LOGGER.exception("Did not receive any temperature data from the "
"homematic API.")
| 36.328205 | 79 | 0.56917 | 720 | 7,084 | 5.340278 | 0.241667 | 0.023407 | 0.023667 | 0.015605 | 0.204161 | 0.151105 | 0.13264 | 0.059298 | 0.015085 | 0 | 0 | 0.007976 | 0.345144 | 7,084 | 194 | 80 | 36.515464 | 0.820867 | 0.125353 | 0 | 0.180556 | 0 | 0 | 0.091458 | 0.00392 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076389 | false | 0 | 0.055556 | 0 | 0.201389 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4eebbcaac2662f45a776502923fe86d751ae5dc | 1,658 | py | Python | src/main/python/bridge_access.py | afichet/candela | eafa61fb6054b3beedbb63d9b9ca3c1f5a15f562 | [
"MIT"
] | 1 | 2020-07-27T21:37:28.000Z | 2020-07-27T21:37:28.000Z | src/main/python/bridge_access.py | afichet/candela | eafa61fb6054b3beedbb63d9b9ca3c1f5a15f562 | [
"MIT"
] | 1 | 2020-06-22T13:36:37.000Z | 2020-10-04T19:26:52.000Z | src/main/python/bridge_access.py | afichet/candela | eafa61fb6054b3beedbb63d9b9ca3c1f5a15f562 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
import phue
from PySide2.QtCore import QObject, Signal, Slot, Property
class BridgeAccess(QObject):
def __init__(self, ip, user, parent=None):
super(BridgeAccess, self).__init__(parent)
self.ip_val = ip
self.user_val = user
try:
self.bridge = phue.Bridge(self.ip_val, self.user_val)
except phue.PhueRegistrationException:
print("Error connecting to the bridge")
def __init__(self, parent=None):
super(BridgeAccess, self).__init__(parent)
self.ip_val = None
self.user_val = None
@Slot()
def init_connection(self):
try:
self.bridge = phue.Bridge(self.ip_val)
self.bridge.connect()
self.user = self.bridge.username
self.connection_established.emit(self.user_val)
except phue.PhueRegistrationException:
print("Error connecting to the bridge")
connection_established = Signal(str, name="connection_established")
def _ip(self):
return self.ip_val
def _set_ip(self, v):
self.ip_val = v
@Signal
def ip_changed(self):
pass
def _user(self):
return self.user_val
def _set_user(self, v):
self.user_val = v
if self.ip_val is not None:
print(self.ip_val, self.user_val)
self.bridge = phue.Bridge(self.ip, self.user)
self.bridge.connect()
@Signal
def user_changed(self):
pass
ip = Property(str, _ip, _set_ip, notify=ip_changed)
user = Property(str, _user, _set_user, notify=user_changed)
| 26.741935 | 71 | 0.626659 | 211 | 1,658 | 4.682464 | 0.251185 | 0.060729 | 0.072874 | 0.060729 | 0.37247 | 0.37247 | 0.325911 | 0.325911 | 0.325911 | 0.257085 | 0 | 0.001672 | 0.278649 | 1,658 | 61 | 72 | 27.180328 | 0.824415 | 0.03076 | 0 | 0.311111 | 0 | 0 | 0.05109 | 0.013707 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.044444 | 0.044444 | 0.044444 | 0.377778 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4f154a32dafa26aff0166449879bde375421d24 | 3,779 | py | Python | toqnets/nn/gnntc.py | C-SUNSHINE/TOQ-Nets-PyTorch-Release | 05e06bf633fb3c6b610dda9a5126ecd7af1db02f | [
"MIT"
] | 6 | 2021-08-24T21:46:01.000Z | 2022-03-09T14:34:05.000Z | toqnets/nn/gnntc.py | vacancy/TOQ-Nets-PyTorch-Release | 53a712be28e2ecf8d2e04a9f71a2d7e8db5430e1 | [
"MIT"
] | null | null | null | toqnets/nn/gnntc.py | vacancy/TOQ-Nets-PyTorch-Release | 53a712be28e2ecf8d2e04a9f71a2d7e8db5430e1 | [
"MIT"
] | 2 | 2021-08-23T03:06:20.000Z | 2021-09-30T14:17:14.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : gnntc.py
# Author : Zhezheng Luo
# Email : luozhezheng@gmail.com
# Date : 08/02/2021
#
# This file is part of TOQ-Nets-PyTorch.
# Distributed under terms of the MIT license.
import torch
from torch import nn
from toqnets.nn.propnet import AgentEncoder, RelationEncoder, Propagator
class GNNTC(nn.Module):
"""
Graph Neural Network + Temporal Conv
"""
def __init__(self, n_agents, state_dim=3, type_dim=3, h_dim=256, n_features=256,
layers=[(32, 9, 3), (32, 7, 2), (32, 5, 2)], dropout=0.5):
super().__init__()
self.n_agents = n_agents
self.state_dim = state_dim
self.type_dim = type_dim
self.h_dim = h_dim
self.n_features = n_features
self.agent_encoder = AgentEncoder(type_dim, h_dim, h_dim)
self.state_encoder = AgentEncoder(state_dim, h_dim, h_dim)
self.relation_encoder = RelationEncoder(h_dim + h_dim, h_dim, h_dim)
self.relation_propagator = Propagator(h_dim + h_dim + h_dim, h_dim)
last_channel = h_dim
conv_layers = []
for (channel, kernel_size, stride) in layers:
conv_layers.append(
nn.Conv1d(last_channel, channel, kernel_size=kernel_size, padding=kernel_size // 2, stride=stride))
conv_layers.append(nn.ReLU())
conv_layers.append(nn.Dropout(dropout))
last_channel = channel
conv_layers.append(nn.Conv1d(last_channel, n_features, kernel_size=1, padding=0, stride=1))
self.conv = nn.Sequential(*conv_layers)
def forward(self, states, types, playerid):
"""
:param states: [batch, length, n_agents, state_dim]
:param types: [batch, n_agents, type_dim]
:param playerid: [batch]
"""
batch, length, n_agents, state_dim = states.size()
type_dim = types.size(2)
assert n_agents == self.n_agents
assert state_dim == self.state_dim
assert type_dim == self.type_dim
assert types.size() == torch.Size((batch, n_agents, type_dim))
assert playerid.size() == torch.Size((batch,))
h_dim = self.h_dim
n_features = self.n_features
agent_encode = self.agent_encoder(types)
agent_encode_r = agent_encode.repeat(1, n_agents, 1)
agent_encode_s = agent_encode.repeat(1, 1, n_agents).view(batch, n_agents * n_agents, h_dim)
relation_encode = self.relation_encoder(torch.cat([agent_encode_r, agent_encode_s], dim=2))
state_encode = self.state_encoder(states)
state_encode_r = state_encode.repeat(1, 1, n_agents, 1)
state_encode_s = state_encode.repeat(1, 1, 1, n_agents).view(batch, length, n_agents * n_agents, h_dim)
relation_effect = self.relation_propagator(
torch.cat([state_encode_s, state_encode_r, relation_encode.unsqueeze(1).repeat(1, length, 1, 1)], dim=3))
agg_effect = relation_effect.view(batch, length, n_agents, n_agents, h_dim).sum(dim=3)
# agg_effect:[batch, length, n_agents, h_dim]
assert agg_effect.size() == torch.Size([batch, length, n_agents, h_dim])
agg_effect = agg_effect.gather(2, playerid.view(-1, 1, 1, 1).repeat(1, length, 1, h_dim))[:, :, 0, :]
# agg_effect:[batch, length, h_dim]
assert agg_effect.size() == torch.Size([batch, length, h_dim])
# NB(Jiayuan Mao @ 04/14): add contiguous() to avoid back-propagation error in PyTorch 1.4.
output = self.conv(agg_effect.transpose(1, 2).contiguous())
# print(output.size(), torch.Size([batch * n_agents, n_features, length]))
assert output.size(0) == batch
assert output.size(1) == n_features
return output[:, :, output.size(2) // 2]
| 41.527473 | 117 | 0.645144 | 542 | 3,779 | 4.243542 | 0.226937 | 0.045217 | 0.033478 | 0.031304 | 0.304783 | 0.23 | 0.140435 | 0.08 | 0.066087 | 0.037391 | 0 | 0.026135 | 0.230484 | 3,779 | 90 | 118 | 41.988889 | 0.764787 | 0.162741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169811 | 1 | 0.037736 | false | 0 | 0.056604 | 0 | 0.132075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4f83beaa12a9866f44ed0f21189ccc8f4c1bf21 | 1,187 | py | Python | evaluation/hand_calculation/agari.py | VictorZXY/meowjong | ec71171f7dc2369c55f5e3bd3302cbaa76346561 | [
"MIT"
] | null | null | null | evaluation/hand_calculation/agari.py | VictorZXY/meowjong | ec71171f7dc2369c55f5e3bd3302cbaa76346561 | [
"MIT"
] | null | null | null | evaluation/hand_calculation/agari.py | VictorZXY/meowjong | ec71171f7dc2369c55f5e3bd3302cbaa76346561 | [
"MIT"
] | null | null | null | from evaluation.hand_calculation.hand_divider import HandDivider
from evaluation.hand_calculation.yaku_list.yakuman import KokushiMusou
class Agari:
@staticmethod
def is_agari(private_tiles, win_tile=None, melds=None):
"""
Determine whether a given hand is complete. Yaku are not counted.
:param private_tiles: Private tiles (winning tile may be included or
not), represented by a 34-array
:param win_tile: Integer index, only specified when it is not included
in private_tiles
:param melds: Melds represented by a list of Meld objects
:return: Boolean
"""
divisions = HandDivider.divide_hand(
private_tiles, win_tile=win_tile, melds=melds)
# case of kokushi musou
if isinstance(divisions[0], int):
kokushi_musou = KokushiMusou()
if kokushi_musou.is_condition_met(divisions):
return True
else:
return False
else:
# as long as divisions is a list of lists and is not empty, there is
# at least a valid division, which means the hand is complete
return True
| 38.290323 | 80 | 0.648694 | 150 | 1,187 | 5.013333 | 0.526667 | 0.079787 | 0.047872 | 0.077128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003601 | 0.298231 | 1,187 | 30 | 81 | 39.566667 | 0.89916 | 0.403538 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4f9ccdefea2153e5041d2860cc16cc5a2038328 | 570 | py | Python | CreaFiguraConSimbolos.py | brown9804/Python_DiversosAlgortimos | e9ff0fbe761f24a49a30a513d50824ca56cafaa3 | [
"Apache-2.0"
] | 3 | 2018-06-28T21:06:53.000Z | 2018-07-01T20:39:30.000Z | CreaFiguraConSimbolos.py | brown9804/Python_DiversosAlgortimos | e9ff0fbe761f24a49a30a513d50824ca56cafaa3 | [
"Apache-2.0"
] | null | null | null | CreaFiguraConSimbolos.py | brown9804/Python_DiversosAlgortimos | e9ff0fbe761f24a49a30a513d50824ca56cafaa3 | [
"Apache-2.0"
] | null | null | null | #Python3
#Crea una figura con un simbolo digitado
###### DEFINICIONES ######
def impr (anch):
print("*" * anch)
def anchofig(anc,sym):
print (sym*anc)
###### IMPLEMENTACION ######
ancho = int(input("Digite el ancho para el de asteriscos "))
for indice in range (1, ancho + 1):
impr(ancho)
ancho = int(input("Digite el ancho que desea para la figura "))
alto = int(input("Digite el alto que desea para la figura "))
symbol = input("Digite el símbolo con el que desea contruir la figura ")
for indice in range (1, alto+1):
anchofig(ancho,symbol)
| 23.75 | 72 | 0.659649 | 85 | 570 | 4.423529 | 0.435294 | 0.117021 | 0.138298 | 0.12766 | 0.335106 | 0.138298 | 0 | 0 | 0 | 0 | 0 | 0.010893 | 0.194737 | 570 | 23 | 73 | 24.782609 | 0.808279 | 0.149123 | 0 | 0 | 0 | 0 | 0.38326 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4fb690e180278fffd33e59c9c5b194dc52708ab | 2,039 | py | Python | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/mcl/lp/cmdparser/XmlCommandArgument.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 9 | 2019-11-22T04:58:40.000Z | 2022-02-26T16:47:28.000Z | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/mcl/lp/cmdparser/XmlCommandArgument.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | null | null | null | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/mcl/lp/cmdparser/XmlCommandArgument.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 8 | 2017-09-27T10:31:18.000Z | 2022-01-08T10:30:46.000Z | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: XmlCommandArgument.py
from XmlCommandBase import XmlCommandBase
class XmlCommandArgument(XmlCommandBase):
def __init__(self):
XmlCommandBase.__init__(self)
self.m_group = ''
self.m_validValues = {}
def AddValidValueData(self, value, dataName, dataValue):
if len(value) == 0 or len(dataName) == 0:
raise RuntimeError('Invalid value/dataName')
if not self.m_validValues.has_key(value):
dataMap = {dataName: dataValue}
paramMap = {}
self.m_validValues[value] = (paramMap, dataMap)
else:
if self.m_validValues[value][1].has_key(dataName):
raise RuntimeError("Duplicate data name (%s) found for value '%s'" % (dataName, value))
self.m_validValues[value][1][dataName] = dataValue
def AddValidValueParam(self, value, paramName, paramValue):
if len(value) == 0 or len(paramValue) == 0:
raise RuntimeError('Invalid value/paramValue')
if not self.m_validValues.has_key(value):
dataMap = {}
paramMap = {paramName: paramValue}
self.m_validValues[value] = (paramMap, dataMap)
else:
if self.m_validValues[value][0].has_key(paramName):
raise RuntimeError("Duplicate param name (%s) found for value '%s'" % (paramName, value))
self.m_validValues[value][0][paramName] = paramValue
def GetGroupName(self):
return self.m_group
def GetValidValues(self):
return self.m_validValues
def HasGroup(self):
if len(self.m_group) > 0:
return True
else:
return False
def HasValidValues(self):
if len(self.m_validValues) > 0:
return True
else:
return False
def SetGroupName(self, name):
self.m_group = name | 35.155172 | 105 | 0.615498 | 234 | 2,039 | 5.247863 | 0.303419 | 0.061075 | 0.143322 | 0.102606 | 0.387622 | 0.270358 | 0.213355 | 0.166124 | 0.166124 | 0.102606 | 0 | 0.033311 | 0.278568 | 2,039 | 58 | 106 | 35.155172 | 0.801496 | 0.089259 | 0 | 0.272727 | 0 | 0 | 0.073974 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.022727 | 0.045455 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4fc46a1bb23ac5c4deeb4b6415184c741b08da2 | 10,390 | py | Python | userPayWin.py | Salalami/SmartSupermarket4Win | 0442de2c821361ecdb88cf1ce9523ac78c48d6d1 | [
"Apache-2.0"
] | null | null | null | userPayWin.py | Salalami/SmartSupermarket4Win | 0442de2c821361ecdb88cf1ce9523ac78c48d6d1 | [
"Apache-2.0"
] | null | null | null | userPayWin.py | Salalami/SmartSupermarket4Win | 0442de2c821361ecdb88cf1ce9523ac78c48d6d1 | [
"Apache-2.0"
] | null | null | null | from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QWidget, QMessageBox
from PyQt5 import QtWidgets
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSignal, QThread, QCoreApplication, Qt
from Ui_userPayWin import Ui_Dialog
import sys, utils
from alipay import AliPay
import qrcode, time, threading, sched
alipay_public_key_string = open("alipay_public_key_string.txt").read()
app_private_key_string = open("app_private_key_string.txt").read()
APP_ID = '2016093000628067'
PRECREATE_ORDER_FAIL = 10
PRECREATE_ORDER_SUCCESS = 20
PERCHASE_COMPLETEED = 30
PERCHASE_CANCELED = 40
PERCHASE_CANCELED_BY_BTN = 50
exit_flag = 1
class UserPayWin(QDialog, Ui_Dialog):
sinOutExit = pyqtSignal()
def __init__(self, item_info_dict, parentWidget=None):
super(UserPayWin, self).__init__(parentWidget)
self.setupUi(parentWidget)
self.dialog.setWindowFlags(Qt.CustomizeWindowHint)
self.cancelPaybtn.clicked.connect(self.queryDialog)
self.qrcode.setScaledContents(True)
self.sinOutExit.connect(self.disposeExitSignal)
toPrice = 0.0
for value in item_info_dict.values():
if value != None:
print(value.getTotalInfo())
self.displayInfo(value)
toPrice = toPrice + value.getToPrice()
toPrice = float('%.2f' % toPrice)
print('toPrice', toPrice)
self.ToPrice.setText('总计: ' +str(toPrice)+ '元')
self.myAlipay = AliPayUtil(app_private_key_string, alipay_public_key_string, self.helpInfo)
subject = '购买水果付款'
self.out_trade_no = int(time.time())
result1 = self.myAlipay.preCreateOrder(subject, self.out_trade_no, toPrice) # 预创建订单
if result1 == PRECREATE_ORDER_SUCCESS:
self.qrcode.setPixmap(QPixmap('qr_ali.png'))
self.alipayThread = QueryPaymentInfoThread(self.myAlipay, self.out_trade_no, 120)# 查询订单支付状态
#self.checkFlagThread = CheckFlagThread() # 检查标志信号
# 初始化槽函数
self.alipayThread.sinOut.connect(self.show_help_info)
#self.alipayThread.sinOutExit.connect(self.disposeExitSignal)
#self.checkFlagThread.sinOut.connect(self.disposeExitSignal)
self.alipayThread.finished.connect(self.disposeExitSignal)
self.alipayThread.start()
#self.checkFlagThread.start()
#time.sleep(2) # 阻塞线程一会,让UI加载好
elif result1 == PRECREATE_ORDER_FAIL:
self.cancelPaybtn.setEnabled(False)
s = threading.Timer(5, self.disposeExitSignal) # 延时函数,界面初始化完成时再关闭
s.start()
def disposeExitSignal(self):
print('关闭付款界面')
self.dialog.close()
def show_help_info(self, info):
self.helpInfo.clear()
self.helpInfo.setText(str(info))
def queryDialog(self):
reply = QMessageBox.warning(self, "警告", '确定放弃购买?', QMessageBox.Yes | QMessageBox.Cancel, QMessageBox.Cancel)
if reply == QMessageBox.Yes:
#global exit_flag
#global exit_check_thread_flag
#exit_check_thread_flag = 0
global exit_flag
exit_flag = 0
#time.sleep(2)
self.close()
def displayInfo(self, itemInfo):
itemName, itemSimprice, toPrice, weight_num, category = itemInfo.getTotalInfo()
if self.itemInfoGroupbox1.isHidden():
self.itemInfoGroupbox1.setHidden(False)
self.item_name1.setText('名称:'+itemName)
self.item_simprice1.setText('单价:'+str(itemSimprice)+'元')
self.item_toprice1.setText('总价:'+str(toPrice)+'元')
self.item_weight1.setText('重量:'+str(weight_num)+'kg')
self.itemInfoGroupbox1.setTitle(utils.category_confirm(category)) # 设置显示的物品类别
elif self.itemInfoGroupbox1_2.isHidden():
self.itemInfoGroupbox1_2.setHidden(False)
self.item_name1_2.setText('名称:'+itemName)
self.item_simprice1_2.setText('单价:'+str(itemSimprice)+'元')
self.item_toprice1_2.setText('总价:'+str(toPrice)+'元')
self.item_weight1_2.setText('重量:'+str(weight_num)+'kg')
self.itemInfoGroupbox1_2.setTitle(utils.category_confirm(category)) # 设置显示的物品类别
elif self.itemInfoGroupbox1_3.isHidden():
self.itemInfoGroupbox1_3.setHidden(False)
self.item_name1_3.setText('名称:'+itemName)
self.item_simprice1_3.setText('单价:'+str(itemSimprice)+'元')
self.item_toprice1_3.setText('总价:'+str(toPrice)+'元')
self.item_weight1_3.setText('重量:'+str(weight_num)+'kg')
self.itemInfoGroupbox1_3.setTitle(utils.category_confirm(category)) # 设置显示的物品类别
elif self.itemInfoGroupbox1_4.isHidden():
self.itemInfoGroupbox1_4.setHidden(False)
self.item_name1_4.setText('名称:'+itemName)
self.item_simprice1_4.setText('单价:'+str(itemSimprice)+'元')
self.item_toprice1_4.setText('总价:'+str(toPrice)+'元')
self.item_weight1_4.setText('重量:'+str(weight_num)+'kg')
self.itemInfoGroupbox1_4.setTitle(utils.category_confirm(category)) # 设置显示的物品类别
elif self.itemInfoGroupbox1_5.isHidden():
self.itemInfoGroupbox1_5.setHidden(False)
self.item_name1_5.setText('名称:'+itemName)
self.item_simprice1_5.setText('单价:'+str(itemSimprice)+'元')
self.item_toprice1_5.setText('总价:'+str(toPrice)+'元')
self.item_weight1_5.setText('重量:'+str(weight_num)+'kg')
self.itemInfoGroupbox1_5.setTitle(utils.category_confirm(category)) # 设置显示的物品类别
elif self.itemInfoGroupbox1_6.isHidden():
self.itemInfoGroupbox1_6.setHidden(False)
self.item_name1_6.setText('名称:'+itemName)
self.item_simprice1_6.setText('单价:'+str(itemSimprice)+'元')
self.item_toprice1_6.setText('总价:'+str(toPrice)+'元')
self.item_weight1_6.setText('重量:'+str(weight_num)+'kg')
self.itemInfoGroupbox1_6.setTitle(utils.category_confirm(category)) # 设置显示的物品类别
class AliPayUtil: # 支付宝工具类
def __init__(self, app_private_key_string_, alipay_public_key_string_, helpInfo):
self.alipay = AliPay(
appid=APP_ID,
app_notify_url=None,
app_private_key_string=app_private_key_string_,
alipay_public_key_string=alipay_public_key_string_,
sign_type='RSA2',
debug=True
)
self.helpInfo = helpInfo
def preCreateOrder(self, subject:'order_desc' , out_trade_no:int, total_amount:(float,'eg:0.01')):
''' 创建预付订单
:return None:表示预付订单创建失败 [或] code_url:二维码url
'''
result = self.alipay.api_alipay_trade_precreate(
subject=subject, # 商品名
out_trade_no=out_trade_no,# 交易订单号,不可重复
total_amount=total_amount)
print('返回值:',result)
msg = result.get('msg')
if msg == 'Business Failed':
print('预创建订单失败')
self.helpInfo.clear()
self.helpInfo.setText("订单创建失败,5s后窗口关闭")
return PRECREATE_ORDER_FAIL
elif msg == 'Success':
code_url = result.get('qr_code')
self.get_qr_code(code_url)
return PRECREATE_ORDER_SUCCESS
def get_qr_code(self, code_url):
'''
生成二维码
:return None
'''
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4
)
qr.add_data(code_url) # 二维码所包含信息
img = qr.make_image()
img.save('qr_ali.png')
print('二维码保存成功')
def query_order(self, out_trade_no_:int, cancel_time:int and 'secs'):
'''
:param out_trade_no: 商户订单号
:return: None
'''
print('预付订单已创建,请在%s秒内扫码支付,过期订单将被取消!'% cancel_time)
# 检查订单状态
_time = 0
global exit_flag
#print("check flag:", exit_check_thread_flag)
for i in range(int(cancel_time / 2) + 10):
if exit_flag == 0: # 按钮退出时结束线程
exit_flag = 1
return self.cancel_order(out_trade_no_, btnControl=True)
# 每2s检查一次,共检查60次
time.sleep(2)
result = self.alipay.api_alipay_trade_query(out_trade_no=out_trade_no_)
if result.get('trade_status', '') == "TRADE_SUCCESS":
print('订单已支付')
print('订单查询返回值:',result)
return PERCHASE_COMPLETEED
_time +=2
print('accumulate time:', _time)
if _time >= cancel_time:
print('取消订单')
return self.cancel_order(out_trade_no_, cancel_time)
def cancel_order(self, out_trade_no_:int, cancel_time=None, btnControl=None): # 2参数对应不同的调用方式
'''
撤销订单
:param cancel_time: 撤销前的等待时间(若未支付),撤销后在商家中心-交易下的交易状态显示为"关闭"
:return:
'''
result = self.alipay.api_alipay_trade_cancel(out_trade_no=out_trade_no_)
print("取消订单result:", result)
resp_state = result.get('msg')
if resp_state == 'Success':
if cancel_time:
print("%s秒内未支付订单,订单已被取消!" % cancel_time)
return PERCHASE_CANCELED
elif btnControl:
return PERCHASE_CANCELED_BY_BTN
else:
return 0
class QueryPaymentInfoThread(QThread):
sinOut = pyqtSignal(str)
#sinOutExit = pyqtSignal()
def __init__(self, myAlipay, out_trade_no, query_time):
super(QueryPaymentInfoThread, self).__init__()
self.myalipay = myAlipay
self.out_trade_no = out_trade_no
self.query_time = query_time
def run(self):
print('开始查询订单状态')
#global exit_flag
result = self.myalipay.query_order(self.out_trade_no, self.query_time)
if result == PERCHASE_CANCELED:
self.sinOut.emit('购买超时,订单已取消!')
time.sleep(3)
#exit_flag = 0
#self.sinOutExit.emit()
elif result == PERCHASE_COMPLETEED:
self.sinOut.emit('购买成功!')
time.sleep(3)
#self.sinOutExit.emit()
#exit_flag = 0
elif result == PERCHASE_CANCELED_BY_BTN:
self.sinOut.emit('取消成功!')
time.sleep(1)
#self.sinOutExit.emit()
| 39.505703 | 116 | 0.626083 | 1,162 | 10,390 | 5.344234 | 0.222031 | 0.032206 | 0.030596 | 0.016908 | 0.375201 | 0.303221 | 0.211916 | 0.201932 | 0.056361 | 0 | 0 | 0.019521 | 0.265351 | 10,390 | 262 | 117 | 39.656489 | 0.794052 | 0.081809 | 0 | 0.041451 | 0 | 0 | 0.049621 | 0.00875 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062176 | false | 0 | 0.041451 | 0 | 0.170984 | 0.072539 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4fc5de33e8b1aa1ab1e0d787882387f1e99ef8d | 6,125 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/tests/unit/modules/network/nxos/test_nxos_bgp_af.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/tests/unit/modules/network/nxos/test_nxos_bgp_af.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/tests/unit/modules/network/nxos/test_nxos_bgp_af.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.cisco.nxos.tests.unit.compat.mock import patch
from ansible_collections.cisco.nxos.plugins.modules import nxos_bgp_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpAfModule(TestNxosModule):
module = nxos_bgp_af
def setUp(self):
super(TestNxosBgpAfModule, self).setUp()
self.mock_load_config = patch(
"ansible_collections.cisco.nxos.plugins.modules.nxos_bgp_af.load_config"
)
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch(
"ansible_collections.cisco.nxos.plugins.modules.nxos_bgp_af.get_config"
)
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpAfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=""):
self.get_config.return_value = load_fixture("nxos_bgp", "config.cfg")
self.load_config.return_value = None
def test_nxos_bgp_af(self):
set_module_args(dict(asn=65535, afi="ipv4", safi="unicast"))
self.execute_module(
changed=True,
sort=False,
commands=["router bgp 65535", "address-family ipv4 unicast"],
)
def test_nxos_bgp_af_vrf(self):
set_module_args(
dict(asn=65535, vrf="test", afi="ipv4", safi="unicast")
)
self.execute_module(
changed=True,
sort=False,
commands=[
"router bgp 65535",
"vrf test",
"address-family ipv4 unicast",
],
)
def test_nxos_bgp_af_vrf_exists(self):
set_module_args(
dict(asn=65535, vrf="test2", afi="ipv4", safi="unicast")
)
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_af_dampening_routemap(self):
set_module_args(
dict(
asn=65535,
afi="ipv4",
safi="unicast",
dampening_routemap="route-map-a",
)
)
self.execute_module(
changed=True,
commands=[
"router bgp 65535",
"address-family ipv4 unicast",
"dampening route-map route-map-a",
],
)
def test_nxos_bgp_af_dampening_manual(self):
set_module_args(
dict(
asn=65535,
afi="ipv4",
safi="unicast",
dampening_half_time=5,
dampening_suppress_time=2000,
dampening_reuse_time=1900,
dampening_max_suppress_time=10,
)
)
self.execute_module(
changed=True,
commands=[
"router bgp 65535",
"address-family ipv4 unicast",
"dampening 5 1900 2000 10",
],
)
def test_nxos_bgp_af_dampening_mix(self):
set_module_args(
dict(
asn=65535,
afi="ipv4",
safi="unicast",
dampening_routemap="route-map-a",
dampening_half_time=5,
dampening_suppress_time=2000,
dampening_reuse_time=1900,
dampening_max_suppress_time=10,
)
)
result = self.execute_module(failed=True)
self.assertEqual(
result["msg"],
"parameters are mutually exclusive: dampening_routemap|dampening_half_time, "
"dampening_routemap|dampening_suppress_time, dampening_routemap|dampening_reuse_time, "
"dampening_routemap|dampening_max_suppress_time",
)
def test_nxos_bgp_af_client(self):
set_module_args(
dict(asn=65535, afi="ipv4", safi="unicast", client_to_client=False)
)
self.execute_module(
changed=True,
commands=[
"router bgp 65535",
"address-family ipv4 unicast",
"no client-to-client reflection",
],
)
def test_nxos_bgp_af_retain_route_target(self):
set_module_args(
dict(
asn=65535, afi="l2vpn", safi="evpn", retain_route_target="abc"
)
)
self.execute_module(
changed=True,
commands=[
"router bgp 65535",
"address-family l2vpn evpn",
"retain route-target route-map abc",
],
)
def test_nxos_bgp_af_retain_route_target_all(self):
set_module_args(
dict(
asn=65535, afi="l2vpn", safi="evpn", retain_route_target="all"
)
)
self.execute_module(
changed=True,
commands=[
"router bgp 65535",
"address-family l2vpn evpn",
"retain route-target all",
],
)
def test_nxos_bgp_af_retain_route_target_exists(self):
set_module_args(
dict(
asn=65535, afi="l2vpn", safi="evpn", retain_route_target="xyz"
)
)
self.execute_module(changed=False, commands=[])
| 31.735751 | 99 | 0.577306 | 677 | 6,125 | 4.970458 | 0.257016 | 0.031204 | 0.037444 | 0.041605 | 0.581872 | 0.55156 | 0.485587 | 0.485587 | 0.4211 | 0.4211 | 0 | 0.034778 | 0.333388 | 6,125 | 192 | 100 | 31.901042 | 0.789371 | 0.110041 | 0 | 0.496732 | 0 | 0 | 0.175962 | 0.056507 | 0 | 0 | 0 | 0 | 0.006536 | 1 | 0.084967 | false | 0 | 0.026144 | 0 | 0.124183 | 0.006536 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b4fdebbc09fb6614f7cb781e964938b2e7e78dab | 7,253 | py | Python | devilry/apps/core/admin.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/apps/core/admin.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/apps/core/admin.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z | import json
from django.contrib import admin
from django.utils.html import format_html
from devilry.apps.core.models import AssignmentGroup, Subject, Period, Assignment, PeriodTag, \
CandidateAssignmentGroupHistory, ExaminerAssignmentGroupHistory, Examiner, RelatedStudent, RelatedExaminer, \
AssignmentGroupHistory, GroupInvite
from django.utils.translation import gettext_lazy
class ExaminerAdmin(admin.ModelAdmin):
pass
admin.site.register(Examiner, ExaminerAdmin)
class RelatedExaminerAdmin(admin.ModelAdmin):
pass
admin.site.register(RelatedExaminer, RelatedExaminerAdmin)
class RelatedStudentAdmin(admin.ModelAdmin):
pass
admin.site.register(RelatedStudent, RelatedStudentAdmin)
class BaseNodeAdmin(admin.ModelAdmin):
filter_horizontal = ['admins']
raw_id_fields = [
'parentnode',
]
# Added between id,name and admins in :meth:`.get_list_display`.
list_display_middle = []
# Added to search_fields in :meth:`.get_search_fields`.
extra_search_fields = []
def get_search_fields(self, request):
return [
'id',
'short_name',
'long_name',
'admins__shortname',
'admins__fullname',
] + self.extra_search_fields
def get_list_display(self, request):
return [
'id',
'short_name',
'long_name',
] + self.list_display_middle + [
'admins_as_string',
]
def admins_as_string(self, obj):
return ', '.join([user.shortname for user in obj.admins.all()])
admins_as_string.short_description = gettext_lazy("Admins")
def get_queryset(self, request):
return super(BaseNodeAdmin, self).get_queryset(request) \
.prefetch_related('admins')
class SubjectAdmin(BaseNodeAdmin):
raw_id_fields = []
admin.site.register(Subject, SubjectAdmin)
class PeriodAdmin(BaseNodeAdmin):
extra_search_fields = [
'parentnode__long_name',
'parentnode__short_name',
]
list_display_middle = [
'get_subject',
'start_time',
'end_time',
]
list_filter = [
'start_time',
'end_time',
]
def get_subject(self, obj):
return obj.subject.short_name
get_subject.short_description = gettext_lazy('Subject')
get_subject.admin_order_field = 'parentnode__short_name'
admin.site.register(Period, PeriodAdmin)
class AssignmentAdmin(BaseNodeAdmin):
extra_search_fields = [
'parentnode__long_name',
'parentnode__short_name',
'parentnode__parentnode__long_name',
'parentnode__parentnode__short_name',
]
list_display_middle = [
'get_subject',
'get_period',
'publishing_time',
'first_deadline',
]
list_filter = [
'anonymizationmode',
'publishing_time',
'first_deadline',
]
def get_subject(self, obj):
return obj.subject.short_name
get_subject.short_description = gettext_lazy('Subject')
get_subject.admin_order_field = 'parentnode__parentnode__short_name'
def get_period(self, obj):
return obj.period.short_name
get_period.short_description = gettext_lazy('Period')
get_period.admin_order_field = 'parentnode__short_name'
admin.site.register(Assignment, AssignmentAdmin)
class AssignmentGroupHistoryInline(admin.StackedInline):
model = AssignmentGroupHistory
extra = 0
exclude = ['merge_history_json']
readonly_fields = [
'get_merge_history_json_pretty',
]
def get_merge_history_json_pretty(self, obj):
return format_html(
'<pre>{}</pre>',
json.dumps(obj.merge_history, indent=2, sort_keys=True)
)
class AssignmentGroupAdmin(admin.ModelAdmin):
list_display = [
'id',
'get_subject',
'get_period',
'get_assignment',
'short_displayname',
'created_datetime',
]
search_fields = [
'id',
'parentnode__long_name',
'parentnode__short_name',
'parentnode__parentnode__long_name',
'parentnode__parentnode__short_name',
'parentnode__parentnode__parentnode__long_name',
'parentnode__parentnode__parentnode__short_name',
]
readonly_fields = [
'parentnode',
'feedback',
]
list_filter = [
'created_datetime',
]
raw_id_fields = [
'last_deadline',
'batchoperation',
'copied_from'
]
inlines = [
AssignmentGroupHistoryInline
]
def get_subject(self, obj):
return obj.subject.short_name
get_subject.short_description = gettext_lazy('Subject')
get_subject.admin_order_field = 'parentnode__parentnode__parentnode__short_name'
def get_period(self, obj):
return obj.period.short_name
get_period.short_description = gettext_lazy('Period')
get_period.admin_order_field = 'parentnode__parentnode__short_name'
def get_assignment(self, obj):
return obj.assignment.short_name
get_assignment.short_description = gettext_lazy('Assignment')
get_assignment.admin_order_field = 'parentnode__short_name'
def get_queryset(self, request):
return super(AssignmentGroupAdmin, self).get_queryset(request) \
.select_related('parentnode',
'parentnode__parentnode',
'parentnode__parentnode__parentnode')
admin.site.register(AssignmentGroup, AssignmentGroupAdmin)
class PeriodTagAdmin(admin.ModelAdmin):
raw_id_fields = ['period']
list_display = [
'id',
'prefix',
'tag',
'is_hidden',
]
filter_horizontal = [
'relatedstudents',
'relatedexaminers',
]
list_filter = [
'prefix'
]
admin.site.register(PeriodTag, PeriodTagAdmin)
class GroupInviteAdmin(admin.ModelAdmin):
raw_id_fields = [
'group',
'sent_by',
'sent_to'
]
list_display = [
'group',
'sent_by',
'sent_to',
'accepted',
'responded_datetime'
]
readonly_fields = [
'group',
'sent_by',
'sent_to',
'accepted',
'responded_datetime'
]
admin.site.register(GroupInvite, GroupInviteAdmin)
class CandidateAssignmentGroupHistoryAdmin(admin.ModelAdmin):
raw_id_fields = [
'assignment_group',
'user'
]
list_display = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
readonly_fields = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
admin.site.register(CandidateAssignmentGroupHistory, CandidateAssignmentGroupHistoryAdmin)
class ExaminerAssignmentGroupHistoryAdmin(admin.ModelAdmin):
raw_id_fields = [
'assignment_group',
'user'
]
list_display = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
readonly_fields = [
'assignment_group',
'user',
'is_add',
'created_datetime'
]
admin.site.register(ExaminerAssignmentGroupHistory, ExaminerAssignmentGroupHistoryAdmin)
| 23.548701 | 113 | 0.64718 | 683 | 7,253 | 6.481698 | 0.191801 | 0.04066 | 0.051502 | 0.042693 | 0.470296 | 0.418116 | 0.368647 | 0.352383 | 0.304947 | 0.287554 | 0 | 0.000371 | 0.256721 | 7,253 | 307 | 114 | 23.625407 | 0.820812 | 0.015993 | 0 | 0.491304 | 0 | 0 | 0.208999 | 0.086768 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052174 | false | 0.013043 | 0.021739 | 0.052174 | 0.330435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3702fd553a25de275c7515e72b1efc8cf1822fe0 | 293 | py | Python | Ex008.py | GabrielSilva2y3d/Curso-em-video-python-exercicios | 1098ccb3f8c21b411e6b6e6dc1c9bb339e80b785 | [
"MIT"
] | null | null | null | Ex008.py | GabrielSilva2y3d/Curso-em-video-python-exercicios | 1098ccb3f8c21b411e6b6e6dc1c9bb339e80b785 | [
"MIT"
] | null | null | null | Ex008.py | GabrielSilva2y3d/Curso-em-video-python-exercicios | 1098ccb3f8c21b411e6b6e6dc1c9bb339e80b785 | [
"MIT"
] | null | null | null | print('Unidades de Medida')
m = float(input('Digite uma distância em metros: '))
print(f'A distancia de {m}m corresponde a: ')
km = m/1000
hm = m/100
dam = m/10
m = m
dm = m * 10
cm = m * 100
mm = m * 1000
print(f"({km}km - {hm}hm - {dam}dam - {m}m - {dm:.0f}dm - {cm:.0f}cm - {mm:.0f}mm)")
| 20.928571 | 84 | 0.576792 | 60 | 293 | 2.816667 | 0.433333 | 0.035503 | 0.047337 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090129 | 0.204778 | 293 | 13 | 85 | 22.538462 | 0.635193 | 0 | 0 | 0 | 0 | 0.090909 | 0.544521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
37096124c4cc930bde39de01b5eb32a7bab3bf08 | 5,285 | py | Python | models/quantization.py | aliyun/alibabacloud-quantization-networks | 05522aabebf5188df5a92b26f96f5ebded806ca9 | [
"Apache-2.0"
] | 102 | 2019-11-08T08:45:56.000Z | 2022-03-03T05:22:14.000Z | models/quantization.py | DefTruth/alibabacloud-quantization-networks | 05522aabebf5188df5a92b26f96f5ebded806ca9 | [
"Apache-2.0"
] | 8 | 2019-12-02T08:44:36.000Z | 2021-08-12T13:35:03.000Z | models/quantization.py | DefTruth/alibabacloud-quantization-networks | 05522aabebf5188df5a92b26f96f5ebded806ca9 | [
"Apache-2.0"
] | 30 | 2019-11-22T05:16:05.000Z | 2021-08-04T07:18:56.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# quantization.py is used to quantize the activation of model.
from __future__ import print_function, absolute_import
import torch
import torch.nn.functional as F
from torch.nn import init
import torch.nn as nn
import pickle
from torch.nn.parameter import Parameter
from torch.autograd import Variable
import numpy as np
import pdb
class SigmoidT(torch.autograd.Function):
""" sigmoid with temperature T for training
we need the gradients for input and bias
for customization of function, refer to https://pytorch.org/docs/stable/notes/extending.html
"""
@staticmethod
def forward(self, input, scales, n, b, T):
self.save_for_backward(input)
self.T = T
self.b = b
self.scales = scales
self.n = n
buf = torch.clamp(self.T * (input - self.b[0]), min=-10.0, max=10.0)
output = self.scales[0] / (1.0 + torch.exp(-buf))
for k in range(1, self.n):
buf = torch.clamp(self.T * (input - self.b[k]), min=-10.0, max=10.0)
output += self.scales[k] / (1.0 + torch.exp(-buf))
return output
@staticmethod
def backward(self, grad_output):
# set T = 1 when train binary model in the backward.
#self.T = 1
input, = self.saved_tensors
b_buf = torch.clamp(self.T * (input - self.b[0]), min=-10.0, max=10.0)
b_output = self.scales[0] / (1.0 + torch.exp(-b_buf))
temp = b_output * (1 - b_output) * self.T
for j in range(1, self.n):
b_buf = torch.clamp(self.T * (input - self.b[j]), min=-10.0, max=10.0)
b_output = self.scales[j] / (1.0 + torch.exp(-b_buf))
temp += b_output * (1 - b_output) * self.T
grad_input = Variable(temp) * grad_output
# corresponding to grad_input
return grad_input, None, None, None, None
sigmoidT = SigmoidT.apply
def step(x, b):
"""
The step function for ideal quantization function in test stage.
"""
y = torch.zeros_like(x)
mask = torch.gt(x - b, 0.0)
y[mask] = 1.0
return y
class Quantization(nn.Module):
""" Quantization Activation
Args:
quant_values: the target quantized values, like [-4, -2, -1, 0, 1 , 2, 4]
quan_bias and init_beta: the data for initialization of quantization parameters (biases, beta)
- for activations, format as `N x 1` for biases and `1x1` for (beta)
we need to obtain the intialization values for biases and beta offline
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Usage:
- for activations, just pending this module to the activations when build the graph
"""
def __init__(self, quant_values=[-1, 0, 1], quan_bias=[0], init_beta=0.0):
super(Quantization, self).__init__()
"""register_parameter: params w/ grad, and need to be learned
register_buffer: params w/o grad, do not need to be learned
example shown in: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
self.values = quant_values
# number of sigmoids
self.n = len(self.values) - 1
self.alpha = Parameter(torch.Tensor([1]))
self.beta = Parameter(torch.Tensor([1]))
self.register_buffer('biases', torch.zeros(self.n))
self.register_buffer('scales', torch.zeros(self.n))
boundary = np.array(quan_bias)
self.init_scale_and_offset()
self.bias_inited = False
self.alpha_beta_inited = False
self.init_biases(boundary)
self.init_alpha_and_beta(init_beta)
def init_scale_and_offset(self):
"""
Initialize the scale and offset of quantization function.
"""
for i in range(self.n):
gap = self.values[i + 1] - self.values[i]
self.scales[i] = gap
def init_biases(self, init_data):
"""
Initialize the bias of quantization function.
init_data in numpy format.
"""
# activations initialization (obtained offline)
assert init_data.size == self.n
self.biases.copy_(torch.from_numpy(init_data))
self.bias_inited = True
#print('baises inited!!!')
def init_alpha_and_beta(self, init_beta):
"""
Initialize the alpha and beta of quantization function.
init_data in numpy format.
"""
# activations initialization (obtained offline)
self.beta.data = torch.Tensor([init_beta]).cuda()
self.alpha.data = torch.reciprocal(self.beta.data)
self.alpha_beta_inited = True
def forward(self, input, T=1):
assert self.bias_inited
input = input.mul(self.beta)
if self.training:
assert self.alpha_beta_inited
output = sigmoidT(input, self.scales, self.n, self.biases, T)
else:
output = step(input, b=self.biases[0])*self.scales[0]
for i in range(1, self.n):
output += step(input, b=self.biases[i])*self.scales[i]
output = output.mul(self.alpha)
return output
| 36.19863 | 106 | 0.60246 | 735 | 5,285 | 4.22585 | 0.242177 | 0.016098 | 0.016742 | 0.021893 | 0.226014 | 0.173213 | 0.156471 | 0.156471 | 0.152608 | 0.119124 | 0 | 0.018724 | 0.282498 | 5,285 | 145 | 107 | 36.448276 | 0.800369 | 0.255629 | 0 | 0.049383 | 0 | 0 | 0.003437 | 0 | 0 | 0 | 0 | 0 | 0.037037 | 1 | 0.098765 | false | 0 | 0.123457 | 0 | 0.296296 | 0.012346 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |