max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
eval.py | andrew-kulikov/belarussian_word_segmenter | 0 | 6616451 | <filename>eval.py
# -*- coding: utf-8 -*-
########################################################################################################
# eval.py
#
# Description: this is an evaluation script to measure the quality of spaces restoration in text
# Usage: python eval.py golden.txt predicted.txt
# Outputs F1-score, precision and recall metrics
# Project: IHS Markit Internship Test
# Creation date: January 12, 2018
# Copyright (c) 2017 by IHS Markit
########################################################################################################
from __future__ import print_function
import re
import sys
import codecs
if len(sys.argv) != 3:
print("Two arguments expected: GOLDEN_FILE and RESULT_FILE" , file=sys.stderr)
exit(1)
def unspace(l):
return re.sub(' ','',l)
def trim_line(l):
l = re.sub('[ \t]+',' ',l)
l = l.strip(' ')
return l
# returns the quantity of non-space characters and list space_arr[] with non-space character indices, followed by space
def line2space_arr(line):
space_arr = []
size = 0
for c in line:
if c == " ":
space_arr.append(size)
else:
size +=1
print(line)
print(space_arr)
return size, space_arr
file1 = sys.argv[1]
file2 = sys.argv[2]
file1_content = codecs.open(file1, 'r+', encoding='utf-8').readlines()
file2_content = codecs.open(file2, 'r+', encoding='utf-8').readlines()
#check that file1 and file2 has equal number of lines
if len(file1_content) < len(file2_content):
print("File %s has fewer lines than %s." % (file1, file2), file=sys.stderr)
exit(1)
true_positive = 0
false_negative = 0
false_positive = 0
for idx, pair in enumerate(zip(file1_content, file2_content)):
line1 = trim_line(pair[0].rstrip())
line2 = trim_line(pair[1].rstrip())
size1, space_arr1 = line2space_arr(line1)
size2, space_arr2 = line2space_arr(line2)
#if size1 != size2 or unspace(line1) != unspace(line2):
# print("Files are not aligned at line %i" % (idx+1), file=sys.stderr)
# exit(1)
for s in space_arr1:
if s in space_arr2:
true_positive +=1
else:
false_negative +=1
for s in space_arr2:
if s not in space_arr1:
false_positive +=1
if true_positive + false_positive > 0 and true_positive + false_negative > 0:
precision = true_positive / float( true_positive + false_positive )
recall = true_positive / float( true_positive + false_negative )
f1 = 2 * precision * recall / float( precision + recall )
print("F1:\t%s\nPrecision:\t%s\nRecall:\t%s\n\nTrue_positive:\t%d\nFalse_positive:\t%d\nFalse_negative:\t%d\n" % (round(f1,5), round(precision,5), round(recall,5), true_positive+0, false_positive+0, false_negative+0))
| <filename>eval.py
# -*- coding: utf-8 -*-
########################################################################################################
# eval.py
#
# Description: this is an evaluation script to measure the quality of spaces restoration in text
# Usage: python eval.py golden.txt predicted.txt
# Outputs F1-score, precision and recall metrics
# Project: IHS Markit Internship Test
# Creation date: January 12, 2018
# Copyright (c) 2017 by IHS Markit
########################################################################################################
from __future__ import print_function
import re
import sys
import codecs
if len(sys.argv) != 3:
print("Two arguments expected: GOLDEN_FILE and RESULT_FILE" , file=sys.stderr)
exit(1)
def unspace(l):
return re.sub(' ','',l)
def trim_line(l):
l = re.sub('[ \t]+',' ',l)
l = l.strip(' ')
return l
# returns the quantity of non-space characters and list space_arr[] with non-space character indices, followed by space
def line2space_arr(line):
space_arr = []
size = 0
for c in line:
if c == " ":
space_arr.append(size)
else:
size +=1
print(line)
print(space_arr)
return size, space_arr
file1 = sys.argv[1]
file2 = sys.argv[2]
file1_content = codecs.open(file1, 'r+', encoding='utf-8').readlines()
file2_content = codecs.open(file2, 'r+', encoding='utf-8').readlines()
#check that file1 and file2 has equal number of lines
if len(file1_content) < len(file2_content):
print("File %s has fewer lines than %s." % (file1, file2), file=sys.stderr)
exit(1)
true_positive = 0
false_negative = 0
false_positive = 0
for idx, pair in enumerate(zip(file1_content, file2_content)):
line1 = trim_line(pair[0].rstrip())
line2 = trim_line(pair[1].rstrip())
size1, space_arr1 = line2space_arr(line1)
size2, space_arr2 = line2space_arr(line2)
#if size1 != size2 or unspace(line1) != unspace(line2):
# print("Files are not aligned at line %i" % (idx+1), file=sys.stderr)
# exit(1)
for s in space_arr1:
if s in space_arr2:
true_positive +=1
else:
false_negative +=1
for s in space_arr2:
if s not in space_arr1:
false_positive +=1
if true_positive + false_positive > 0 and true_positive + false_negative > 0:
precision = true_positive / float( true_positive + false_positive )
recall = true_positive / float( true_positive + false_negative )
f1 = 2 * precision * recall / float( precision + recall )
print("F1:\t%s\nPrecision:\t%s\nRecall:\t%s\n\nTrue_positive:\t%d\nFalse_positive:\t%d\nFalse_negative:\t%d\n" % (round(f1,5), round(precision,5), round(recall,5), true_positive+0, false_positive+0, false_negative+0))
| en | 0.487329 | # -*- coding: utf-8 -*- ######################################################################################################## # eval.py # # Description: this is an evaluation script to measure the quality of spaces restoration in text # Usage: python eval.py golden.txt predicted.txt # Outputs F1-score, precision and recall metrics # Project: IHS Markit Internship Test # Creation date: January 12, 2018 # Copyright (c) 2017 by IHS Markit ######################################################################################################## # returns the quantity of non-space characters and list space_arr[] with non-space character indices, followed by space #check that file1 and file2 has equal number of lines #if size1 != size2 or unspace(line1) != unspace(line2): # print("Files are not aligned at line %i" % (idx+1), file=sys.stderr) # exit(1) | 3.548395 | 4 |
cabina/_computed.py | nikitanovosibirsk/cabina | 1 | 6616452 | from typing import Any, Callable
from ._core import MetaBase
def _required(*args: Any) -> Any:
pass # pragma: nocover
class computed:
def __init__(self, fn: Callable[[Any], Any] = _required) -> None:
if fn is _required:
raise TypeError("Use @computed instead of @computed()")
self._fn = fn
def __get__(self, _: None, owner: MetaBase) -> Any:
return self._fn(owner)
| from typing import Any, Callable
from ._core import MetaBase
def _required(*args: Any) -> Any:
pass # pragma: nocover
class computed:
def __init__(self, fn: Callable[[Any], Any] = _required) -> None:
if fn is _required:
raise TypeError("Use @computed instead of @computed()")
self._fn = fn
def __get__(self, _: None, owner: MetaBase) -> Any:
return self._fn(owner)
| pl | 0.287295 | # pragma: nocover | 2.322898 | 2 |
3.4_ Словарь_dict.py | HeyArtem/python_lesson_3 | 1 | 6616453 |
# Тип данных Словарь dict
dict_temp = {}
print(type(dict_temp), dict_temp)
dict_temp = {'dict': 1, 'dict2': 2.1, 'dict3': 'name', 'dict4': [1, 2, 3]}
print(type(dict_temp), dict_temp)
print(' * ')
dict_temp = dict.fromkeys(['a', 'b']) # fromkeys создает словарь с КЛЮЧАМИ без ЗНАЧЕНИЙ
print(type(dict_temp), dict_temp)
print(' * * ')
dict_temp = dict.fromkeys(['a', 'b'], [12, '2020']) # наполняем значениями
print(type(dict_temp), dict_temp)
print(' * * * ')
dict_temp = dict(brend = 'Volvo', volume_engine = 1.5) # dict метод наполнения
print(type(dict_temp), dict_temp)
print(' * * * * ')
# Генератор словаря
dict_temp = {a: a**2 for a in range(10)}
print(dict_temp)
# Обращене к содержимому словаря
print(' Обращене к содержимому словаря ')
print(dict_temp[8])
print(' выыод всех ключей СЛОВАРЯ ')
print(dict_temp.keys())
print()
# но обычно приводят к листу
print(' но обычно приводят к листу ')
print(list(dict_temp.keys()))
print(' - - ')
# Получение значений
print(' Получение значений ')
print(list(dict_temp.values()))
print()
# также можно рабоать с парами (ключ:знач) = items, он возвращает КОРТЕЖИ
print(' также можно рабоать с парами (ключ:знач) = items ')
print(list(dict_temp.items()))
print()
# Работа с элементами
print(' Работа с элементами ')
print(type(dict_temp), dict_temp)
dict_temp[0] = 100 # к опред ключу присвоим значение
print(type(dict_temp), dict_temp)
print(' - - - - - -')
# Добавляем новую ПАРУ
print(' Добавляем новую ПАРУ ')
dict_temp['name'] = 'Dima'
print(type(dict_temp), dict_temp)
print(' / / / / / ')
# Методы
# Удаляем значение по ключу pop
print(' Удаляем значение по ключу ')
dict_temp.pop('name')
print(type(dict_temp), dict_temp)
# или вывести значение которое удалил
print('вывести значение которое удалил')
temp = dict_temp.pop(0) # почему оно не удалено!?!?!
print(temp)
print(dict_temp)
print(' *-*-*-*-*-*-*')
# Итерирование по словарю
print(' Итерирование по словарю ')
for pair in dict_temp.items():
print(pair)
# Итерирование по словарю II способ
print(' * Итерирование по словарю II способ * ')
for key, value in dict_temp.items():
print(key, value)
print(' + + + + + + + ')
# Итерирование по ключам
print(' Итерирование по ключам ')
for key in dict_temp.keys():
print(key)
print('&&&&&&&&&&&&&')
# Итерирование по ЗНАЧЕНИЯМ
print(' Итерирование по ЗНАЧЕНИЯМ ')
for value in dict_temp.values():
print(value)
|
# Тип данных Словарь dict
dict_temp = {}
print(type(dict_temp), dict_temp)
dict_temp = {'dict': 1, 'dict2': 2.1, 'dict3': 'name', 'dict4': [1, 2, 3]}
print(type(dict_temp), dict_temp)
print(' * ')
dict_temp = dict.fromkeys(['a', 'b']) # fromkeys создает словарь с КЛЮЧАМИ без ЗНАЧЕНИЙ
print(type(dict_temp), dict_temp)
print(' * * ')
dict_temp = dict.fromkeys(['a', 'b'], [12, '2020']) # наполняем значениями
print(type(dict_temp), dict_temp)
print(' * * * ')
dict_temp = dict(brend = 'Volvo', volume_engine = 1.5) # dict метод наполнения
print(type(dict_temp), dict_temp)
print(' * * * * ')
# Генератор словаря
dict_temp = {a: a**2 for a in range(10)}
print(dict_temp)
# Обращене к содержимому словаря
print(' Обращене к содержимому словаря ')
print(dict_temp[8])
print(' выыод всех ключей СЛОВАРЯ ')
print(dict_temp.keys())
print()
# но обычно приводят к листу
print(' но обычно приводят к листу ')
print(list(dict_temp.keys()))
print(' - - ')
# Получение значений
print(' Получение значений ')
print(list(dict_temp.values()))
print()
# также можно рабоать с парами (ключ:знач) = items, он возвращает КОРТЕЖИ
print(' также можно рабоать с парами (ключ:знач) = items ')
print(list(dict_temp.items()))
print()
# Работа с элементами
print(' Работа с элементами ')
print(type(dict_temp), dict_temp)
dict_temp[0] = 100 # к опред ключу присвоим значение
print(type(dict_temp), dict_temp)
print(' - - - - - -')
# Добавляем новую ПАРУ
print(' Добавляем новую ПАРУ ')
dict_temp['name'] = 'Dima'
print(type(dict_temp), dict_temp)
print(' / / / / / ')
# Методы
# Удаляем значение по ключу pop
print(' Удаляем значение по ключу ')
dict_temp.pop('name')
print(type(dict_temp), dict_temp)
# или вывести значение которое удалил
print('вывести значение которое удалил')
temp = dict_temp.pop(0) # почему оно не удалено!?!?!
print(temp)
print(dict_temp)
print(' *-*-*-*-*-*-*')
# Итерирование по словарю
print(' Итерирование по словарю ')
for pair in dict_temp.items():
print(pair)
# Итерирование по словарю II способ
print(' * Итерирование по словарю II способ * ')
for key, value in dict_temp.items():
print(key, value)
print(' + + + + + + + ')
# Итерирование по ключам
print(' Итерирование по ключам ')
for key in dict_temp.keys():
print(key)
print('&&&&&&&&&&&&&')
# Итерирование по ЗНАЧЕНИЯМ
print(' Итерирование по ЗНАЧЕНИЯМ ')
for value in dict_temp.values():
print(value)
| ru | 0.991012 | # Тип данных Словарь dict # fromkeys создает словарь с КЛЮЧАМИ без ЗНАЧЕНИЙ # наполняем значениями # dict метод наполнения # Генератор словаря # Обращене к содержимому словаря # но обычно приводят к листу # Получение значений # также можно рабоать с парами (ключ:знач) = items, он возвращает КОРТЕЖИ # Работа с элементами # к опред ключу присвоим значение # Добавляем новую ПАРУ # Методы # Удаляем значение по ключу pop # или вывести значение которое удалил # почему оно не удалено!?!?! # Итерирование по словарю # Итерирование по словарю II способ # Итерирование по ключам # Итерирование по ЗНАЧЕНИЯМ | 3.556313 | 4 |
object_graph/builder.py | eshta/django-pinject | 0 | 6616454 | from typing import List
import pinject
from pinject.object_graph import ObjectGraph
class ObjectGraphBuilder(object):
def __init__(self):
self.classes: List = []
self.modules: List = []
self.binding_specs: List = []
self.tainted: bool = True
self.object_graph: ObjectGraph = None
def get_object_graph(self) -> ObjectGraph:
if self.tainted:
self.object_graph = pinject.\
new_object_graph(classes=self.classes, modules=self.modules, binding_specs=self.binding_specs)
self.clean()
return self.object_graph
else:
return self.object_graph
def add_class(self, class_definition) -> None:
self.classes.append(class_definition)
self.taint()
def add_classes(self, class_definitions: List):
self.classes += class_definitions
self.taint()
def add_module(self, module) -> None:
self.modules.append(module)
self.taint()
def add_modules(self, modules: List) -> None:
self.modules += modules
self.taint()
def add_binding_spec(self, binding_spec: pinject.BindingSpec) -> None:
self.binding_specs.append(binding_spec)
self.taint()
def add_binding_specs(self, binding_specs: List[pinject.BindingSpec]) -> None:
self.binding_specs += binding_specs
self.taint()
def taint(self) -> None:
self.tainted = True
def clean(self) -> None:
self.tainted = False
| from typing import List
import pinject
from pinject.object_graph import ObjectGraph
class ObjectGraphBuilder(object):
def __init__(self):
self.classes: List = []
self.modules: List = []
self.binding_specs: List = []
self.tainted: bool = True
self.object_graph: ObjectGraph = None
def get_object_graph(self) -> ObjectGraph:
if self.tainted:
self.object_graph = pinject.\
new_object_graph(classes=self.classes, modules=self.modules, binding_specs=self.binding_specs)
self.clean()
return self.object_graph
else:
return self.object_graph
def add_class(self, class_definition) -> None:
self.classes.append(class_definition)
self.taint()
def add_classes(self, class_definitions: List):
self.classes += class_definitions
self.taint()
def add_module(self, module) -> None:
self.modules.append(module)
self.taint()
def add_modules(self, modules: List) -> None:
self.modules += modules
self.taint()
def add_binding_spec(self, binding_spec: pinject.BindingSpec) -> None:
self.binding_specs.append(binding_spec)
self.taint()
def add_binding_specs(self, binding_specs: List[pinject.BindingSpec]) -> None:
self.binding_specs += binding_specs
self.taint()
def taint(self) -> None:
self.tainted = True
def clean(self) -> None:
self.tainted = False
| none | 1 | 2.392414 | 2 | |
reVX/config/wind_dirs.py | NREL/reVX | 7 | 6616455 | # -*- coding: utf-8 -*-
"""
reVX Wind Directions sub-package Configurations
"""
from reV.config.base_analysis_config import AnalysisConfig
class MeanWindDirsConfig(AnalysisConfig):
"""Config framework for mean wind direction calculation"""
NAME = 'MeanWindDirs'
REQUIREMENTS = ('res_h5_fpath', 'excl_fpath', 'wdir_dsets')
def __init__(self, config):
"""
Parameters
----------
config : str | dict
Path to config .json or pre-extracted config input dictionary.
"""
super().__init__(config)
self._default_tm_dset = 'techmap_wtk'
self._default_resolution = 128
self._default_chunk_point_len = 1000
self._default_area_filter_kernel = 'queen'
@property
def res_h5_fpath(self):
"""Get the resource .h5 file path (required)."""
return self['res_h5_fpath']
@property
def excl_fpath(self):
"""Get the exclusions .h5 file path (required)."""
return self['excl_fpath']
@property
def wdir_dsets(self):
"""Get the dataset name."""
return self['wdir_dsets']
@property
def tm_dset(self):
"""Get the techmap dataset name."""
return self.get('tm_dset', self._default_tm_dset)
@property
def excl_dict(self):
"""Get the exclusions dictionary"""
return self.get('excl_dict', None)
@property
def resolution(self):
"""Get the supply curve resolution."""
return self.get('resolution', self._default_resolution)
@property
def excl_area(self):
"""Get the exclusion pixel area in km2"""
return self.get('excl_area', None)
@property
def area_filter_kernel(self):
"""Get the minimum area filter kernel name ('queen' or 'rook')."""
return self.get('area_filter_kernel', self._default_area_filter_kernel)
@property
def min_area(self):
"""Get the minimum area filter minimum area in km2."""
return self.get('min_area', None)
class ProminentWindDirsConfig(AnalysisConfig):
"""Config framework for prominent wind direction calculation"""
NAME = 'ProminentWindDirs'
REQUIREMENTS = ('powerrose_h5_fpath', 'excl_fpath')
def __init__(self, config):
"""
Parameters
----------
config : str | dict
Path to config .json or pre-extracted config input dictionary.
"""
super().__init__(config)
self._default_agg_dset = 'powerrose_100m'
self._default_tm_dset = 'techmap_wtk'
self._default_resolution = 128
self._default_chunk_point_len = 1000
@property
def powerrose_h5_fpath(self):
"""Get the powerrose .h5 file path (required)."""
return self['powerrose_h5_fpath']
@property
def excl_fpath(self):
"""Get the exclusions .h5 file path (required)."""
return self['excl_fpath']
@property
def agg_dset(self):
"""Get the aggregation dataset name."""
return self.get('agg_dset', self._default_agg_dset)
@property
def tm_dset(self):
"""Get the techmap dataset name."""
return self.get('tm_dset', self._default_tm_dset)
@property
def resolution(self):
"""Get the supply curve resolution."""
return self.get('resolution', self._default_resolution)
@property
def excl_area(self):
"""Get the exclusion pixel area in km2"""
return self.get('excl_area', None)
| # -*- coding: utf-8 -*-
"""
reVX Wind Directions sub-package Configurations
"""
from reV.config.base_analysis_config import AnalysisConfig
class MeanWindDirsConfig(AnalysisConfig):
"""Config framework for mean wind direction calculation"""
NAME = 'MeanWindDirs'
REQUIREMENTS = ('res_h5_fpath', 'excl_fpath', 'wdir_dsets')
def __init__(self, config):
"""
Parameters
----------
config : str | dict
Path to config .json or pre-extracted config input dictionary.
"""
super().__init__(config)
self._default_tm_dset = 'techmap_wtk'
self._default_resolution = 128
self._default_chunk_point_len = 1000
self._default_area_filter_kernel = 'queen'
@property
def res_h5_fpath(self):
"""Get the resource .h5 file path (required)."""
return self['res_h5_fpath']
@property
def excl_fpath(self):
"""Get the exclusions .h5 file path (required)."""
return self['excl_fpath']
@property
def wdir_dsets(self):
"""Get the dataset name."""
return self['wdir_dsets']
@property
def tm_dset(self):
"""Get the techmap dataset name."""
return self.get('tm_dset', self._default_tm_dset)
@property
def excl_dict(self):
"""Get the exclusions dictionary"""
return self.get('excl_dict', None)
@property
def resolution(self):
"""Get the supply curve resolution."""
return self.get('resolution', self._default_resolution)
@property
def excl_area(self):
"""Get the exclusion pixel area in km2"""
return self.get('excl_area', None)
@property
def area_filter_kernel(self):
"""Get the minimum area filter kernel name ('queen' or 'rook')."""
return self.get('area_filter_kernel', self._default_area_filter_kernel)
@property
def min_area(self):
"""Get the minimum area filter minimum area in km2."""
return self.get('min_area', None)
class ProminentWindDirsConfig(AnalysisConfig):
"""Config framework for prominent wind direction calculation"""
NAME = 'ProminentWindDirs'
REQUIREMENTS = ('powerrose_h5_fpath', 'excl_fpath')
def __init__(self, config):
"""
Parameters
----------
config : str | dict
Path to config .json or pre-extracted config input dictionary.
"""
super().__init__(config)
self._default_agg_dset = 'powerrose_100m'
self._default_tm_dset = 'techmap_wtk'
self._default_resolution = 128
self._default_chunk_point_len = 1000
@property
def powerrose_h5_fpath(self):
"""Get the powerrose .h5 file path (required)."""
return self['powerrose_h5_fpath']
@property
def excl_fpath(self):
"""Get the exclusions .h5 file path (required)."""
return self['excl_fpath']
@property
def agg_dset(self):
"""Get the aggregation dataset name."""
return self.get('agg_dset', self._default_agg_dset)
@property
def tm_dset(self):
"""Get the techmap dataset name."""
return self.get('tm_dset', self._default_tm_dset)
@property
def resolution(self):
"""Get the supply curve resolution."""
return self.get('resolution', self._default_resolution)
@property
def excl_area(self):
"""Get the exclusion pixel area in km2"""
return self.get('excl_area', None)
| en | 0.581259 | # -*- coding: utf-8 -*- reVX Wind Directions sub-package Configurations Config framework for mean wind direction calculation Parameters ---------- config : str | dict Path to config .json or pre-extracted config input dictionary. Get the resource .h5 file path (required). Get the exclusions .h5 file path (required). Get the dataset name. Get the techmap dataset name. Get the exclusions dictionary Get the supply curve resolution. Get the exclusion pixel area in km2 Get the minimum area filter kernel name ('queen' or 'rook'). Get the minimum area filter minimum area in km2. Config framework for prominent wind direction calculation Parameters ---------- config : str | dict Path to config .json or pre-extracted config input dictionary. Get the powerrose .h5 file path (required). Get the exclusions .h5 file path (required). Get the aggregation dataset name. Get the techmap dataset name. Get the supply curve resolution. Get the exclusion pixel area in km2 | 2.188479 | 2 |
update-environment.py | OompahLoompah/CCI-environment-updater | 1 | 6616456 | import yaml
import sys
import time
from lib.circleAPI import circleAPI
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-k", "--key", action="store", help="Environment variable name to check for")
parser.add_option("-v", "--value", action="store", help="New value to assign to variable")
options, args = parser.parse_args()
if not options.key: # if filename is not given
parser.error('Envvar key not given')
if not options.value: # if filename is not given
parser.error('New value not given')
f = open ('.config')
config = yaml.load(f)
key = config['api_key']
base_url = config['base_url']
circle = circleAPI(key, base_url)
projects = circle.getProjects()
var = options.key
value = options.value
tup = {"name": var, "value": value}
for project in projects:
envvars = circle.getEnvironmentVariables(project['username'], project['reponame'])
for envvar in envvars:
if envvar['name'] == var:
print project['username'] + "/" + project['reponame']
circle.deleteEnvironmentVariable(project['username'], project['reponame'], var)
circle.addEnvironmentVariable(project['username'], project['reponame'], tup)
time.sleep(2) #To prevent us from hammering the API too hard
| import yaml
import sys
import time
from lib.circleAPI import circleAPI
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-k", "--key", action="store", help="Environment variable name to check for")
parser.add_option("-v", "--value", action="store", help="New value to assign to variable")
options, args = parser.parse_args()
if not options.key: # if filename is not given
parser.error('Envvar key not given')
if not options.value: # if filename is not given
parser.error('New value not given')
f = open ('.config')
config = yaml.load(f)
key = config['api_key']
base_url = config['base_url']
circle = circleAPI(key, base_url)
projects = circle.getProjects()
var = options.key
value = options.value
tup = {"name": var, "value": value}
for project in projects:
envvars = circle.getEnvironmentVariables(project['username'], project['reponame'])
for envvar in envvars:
if envvar['name'] == var:
print project['username'] + "/" + project['reponame']
circle.deleteEnvironmentVariable(project['username'], project['reponame'], var)
circle.addEnvironmentVariable(project['username'], project['reponame'], tup)
time.sleep(2) #To prevent us from hammering the API too hard
| en | 0.747414 | # if filename is not given # if filename is not given #To prevent us from hammering the API too hard | 2.499451 | 2 |
CodeHS/Basic Python and Console Interaction/Rectangle.py | Kev-in123/ICS2O7 | 2 | 6616457 | <gh_stars>1-10
length=10
width=5
area=length*width
perimeter=2*(length+width)
print(area)
print(perimeter) | length=10
width=5
area=length*width
perimeter=2*(length+width)
print(area)
print(perimeter) | none | 1 | 3.601448 | 4 | |
modules/signatures/windows/bot_madness.py | Yuanmessi/Bold-Falcon | 24 | 6616458 | <gh_stars>10-100
# Copyright (C) 2014 thedude13
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Madness(Signature):
name = "bot_madness"
description = "Recognized to be an Madness bot"
severity = 3
categories = ["bot", "ddos"]
families = ["madness"]
authors = ["thedude13", "nex"]
minimum = "2.0"
indicator = "\?uid\x3d[0-9]{8}&ver\x3d[0-9].[0-9]{2}&mk\x3d[0-9a-f]{6}&os\x3d[A-Za-z0-9]+&rs\x3d[a-z]+&c\x3d[0-1]&rq\x3d[0-1]"
def on_complete(self):
for url in self.check_url(pattern=self.indicator, regex=True, all=True):
self.mark_ioc("url", url)
return self.has_marks()
| # Copyright (C) 2014 thedude13
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Madness(Signature):
name = "bot_madness"
description = "Recognized to be an Madness bot"
severity = 3
categories = ["bot", "ddos"]
families = ["madness"]
authors = ["thedude13", "nex"]
minimum = "2.0"
indicator = "\?uid\x3d[0-9]{8}&ver\x3d[0-9].[0-9]{2}&mk\x3d[0-9a-f]{6}&os\x3d[A-Za-z0-9]+&rs\x3d[a-z]+&c\x3d[0-1]&rq\x3d[0-1]"
def on_complete(self):
for url in self.check_url(pattern=self.indicator, regex=True, all=True):
self.mark_ioc("url", url)
return self.has_marks() | en | 0.884004 | # Copyright (C) 2014 thedude13 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. | 2.130123 | 2 |
rgb/step5/tests/tests.py | breki/pbt-demo | 0 | 6616459 | <reponame>breki/pbt-demo<gh_stars>0
from unittest import TestCase
from hypothesis import given, assume
from hypothesis.strategies import integers, SearchStrategy
from rgb.step5.rgb import RgbColor
# introducing custom strategy function
def color_components() -> SearchStrategy[int]:
return integers(min_value=0, max_value=255)
class RgbStep5Tests(TestCase):
def test_hex_triplet_representation_is_correct(self):
color = RgbColor(33, 52, 165)
self.assertEqual("#2134a5", color.to_hex_triplet())
@given(color_components(), color_components(), color_components())
def test_hex_triplet_is_7_characters_long(
self, r: int, g: int, b: int):
color = RgbColor(r, g, b)
triplet = color.to_hex_triplet()
self.assertEqual(7, len(triplet))
# new property
@given(integers(), integers(), integers())
def test_constructor_raises_value_error_on_invalid_component(
self, r: int, g: int, b: int):
# filtering out cases when the property does not hold
assume(not _is_valid_component(r)
or not _is_valid_component(g)
or not _is_valid_component(b))
with self.assertRaises(ValueError):
RgbColor(r, g, b)
def _is_valid_component(value: int) -> bool:
return 0 <= value <= 255
| from unittest import TestCase
from hypothesis import given, assume
from hypothesis.strategies import integers, SearchStrategy
from rgb.step5.rgb import RgbColor
# introducing custom strategy function
def color_components() -> SearchStrategy[int]:
return integers(min_value=0, max_value=255)
class RgbStep5Tests(TestCase):
def test_hex_triplet_representation_is_correct(self):
color = RgbColor(33, 52, 165)
self.assertEqual("#2134a5", color.to_hex_triplet())
@given(color_components(), color_components(), color_components())
def test_hex_triplet_is_7_characters_long(
self, r: int, g: int, b: int):
color = RgbColor(r, g, b)
triplet = color.to_hex_triplet()
self.assertEqual(7, len(triplet))
# new property
@given(integers(), integers(), integers())
def test_constructor_raises_value_error_on_invalid_component(
self, r: int, g: int, b: int):
# filtering out cases when the property does not hold
assume(not _is_valid_component(r)
or not _is_valid_component(g)
or not _is_valid_component(b))
with self.assertRaises(ValueError):
RgbColor(r, g, b)
def _is_valid_component(value: int) -> bool:
return 0 <= value <= 255 | en | 0.617383 | # introducing custom strategy function # new property # filtering out cases when the property does not hold | 3.135289 | 3 |
savecode/threeyears/idownclient/scout/scouter/scouteremail.py | Octoberr/swm0920 | 2 | 6616460 | """
scouter email
2019/07/10
"""
import traceback
import tld
from datacontract import EObjectType, IscoutTask
from .scouterbase import ScouterBase
from ..plugin import SonarApi, MXQuery, SearchApi, FBSearchEmail
from ...clientdatafeedback.scoutdatafeedback import (
Email,
ScoutFeedBackBase,
Whoisr,
MailServer,
SearchEngine,
ScreenshotSE,
SearchFile,
Phone,
NetworkProfile,
NetworkId,
NetworkProfiles,
)
from outputmanagement import OutputManagement
class ScouterEMail(ScouterBase):
"""
scouter email
"""
TARGET_OBJ_TYPE = EObjectType.EMail
def __init__(self, task: IscoutTask):
ScouterBase.__init__(self, task)
def __segment_output(self, root: Email, level, email) -> Email:
"""
分段输出数据,达到分段输出的标准后给新的root
没有达到那么就给旧的root
:param root:
:return:
"""
# 加载到max output就输出
# 如果输出了那么就返回新的根节点
if root._subitem_count() >= self.max_output:
self.outputdata(root.get_outputdict(), root._suffix)
root: Email = Email(self.task, level, email)
return root
def __segment_output_profiles(self, profiles: NetworkProfiles) -> NetworkProfiles:
"""输出 iscout_networkid_profile 数据"""
if not isinstance(profiles, NetworkProfiles):
raise Exception("Invalid NetworkProfiles for segment output")
if len(profiles) >= self.max_output:
self.outputdata(profiles.get_outputdict(), profiles._suffix)
profiles = NetworkProfiles(self.task)
return profiles
def __output_getdata(self, root: Email, level, email: str) -> Email:
"""
单个插件拿到的数据太大了,每个插件执行完成后都输出下
:param root:
:param level:
:param email:
:return:
"""
if root._subitem_count() > 0:
self.outputdata(root.get_outputdict(), root._suffix)
root: Email = Email(self.task, level, email)
return root
def __set_value(self, root: Email, data):
"""
插入数据
:param root:
:param data:
:return:
"""
if isinstance(data, Whoisr):
root.set_whoisr(data)
elif isinstance(data, MailServer):
root.set_mailserver(data)
elif isinstance(data, SearchEngine):
root.set_searchengine(data)
def _scoutsub(self, level: int, obj: ScoutFeedBackBase) -> iter:
root: Email = Email(self.task, level, obj.value)
# whoisr
try:
# whoisr
for whoisr in self._get_whoisr(
root, self.task, level, obj, reason=self.dtools.whois_reverse
):
yield whoisr
# 每完成一个插件输出一次,避免数据过大导致入库的问题
root = self.__output_getdata(root, level, email=obj.value)
# mailserver
for mailserver in self._get_mailserver(
root, self.task, level, obj, reason=self.dtools.mail_server
):
yield mailserver
root = self.__output_getdata(root, level, email=obj.value)
# ---------------------------------新增
# landing,public,这里的处理有点复杂,可能到时候需要自定义怎么实现
# 这里的reason是随便给的,插件具体使用的时候自己指定
for data in self._landing(
root, self.task, level, obj, reason=self.dtools.landing_messenger
):
yield data
root = self.__output_getdata(root, level, email=obj.value)
# searchengine
for data in self._get_searchengine(
root, self.task, level, obj, reason=self.dtools.urlInfo
):
yield data
root = self.__output_getdata(root, level, email=obj.value)
for data in self._get_phone(
root, self.task, level, obj, reason=self.dtools.phone
):
yield data
root = self.__output_getdata(root, level, email=obj.value)
# public
self._public(obj, reason=self.dtools.public_twitter)
except:
self._logger.error(f"Scouter mail error, err:{traceback.format_exc()}")
finally:
# 最后结束完成也要输出
if root._subitem_count() > 0:
self.outputdata(root.get_outputdict(), root._suffix)
# --------------------------------------whoisr
def _get_whoisr(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
) -> iter:
"""
根据邮箱账号来反查
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_whois_reverse:
return
self._logger.debug("EMIAL:Start getting whoisr")
email: str = obj.value
log = f"开始收集目标{email} {self.dtools.whois_reverse}信息"
self._outprglog(log)
count_dict = {}
try:
for ew in self.__get_sonar_whoisr(root, task, level, email):
count_dict[ew._domain] = 1
yield ew
except:
self._logger.error(f"Get whoisr data error, err:{traceback.format_exc()}")
finally:
log = (
f"获取到目标{email}未经处理的{count_dict.__len__()}条{self.dtools.whois_reverse}数据"
)
self._outprglog(log)
def __get_sonar_whoisr(self, root: Email, task: IscoutTask, level, email):
"""
sonar的api
:param root:
:param task:
:param level:
:param email:
:return:
"""
try:
for ew in SonarApi.email_whoisr(task, level, email):
self.__set_value(root, ew)
root = self.__segment_output(root, level, email)
yield ew
task.success_count()
except:
task.fail_count()
self._logger.error(
f"Get Sonar email whoisr error, err:{traceback.format_exc()}"
)
# -------------------------email server
def _get_mailserver(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
根据邮箱后缀查询邮件服务地址
:param root:
:param task:
:param level:
:param obj:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_mailserver:
return
self._logger.debug("EMIAL:Start getting mail server")
email: str = tld.get_tld(
obj.value, fail_silently=True, as_object=True, fix_protocol=True
).fld
log = f"开始收集目标{email} {self.dtools.mail_server}信息"
self._outprglog(log)
count_dict = {}
try:
for ms in self.__get_mx_email_server(root, task, level, email):
count_dict[ms._host] = 1
yield ms
except:
self._logger.error(f"Get mx mailserver error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count_dict.__len__()}条{self.dtools.mail_server}数据"
self._outprglog(log)
def __get_mx_email_server(self, root: Email, task: IscoutTask, level, email):
"""
根据mx记录查询邮服地址
:param root:
:param task:
:param level:
:param email:
:return:
"""
try:
mxq = MXQuery(task)
for ms in mxq.get_mail_server(level, email):
self.__set_value(root, ms)
root = self.__segment_output(root, level, email)
yield ms
task.success_count()
except:
task.fail_count()
self._logger.error(f"Get mx maiserver error, err:{traceback.format_exc()}")
def _landing(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
目标账号落地到各大主流网站的网络id 信息
:param root:
:param task:
:param level:
:param obj:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_landing_facebook:
return
self._logger.debug("EMIAL:Start landing")
log = f"开始收集目标{obj.value} {self.dtools.landing_facebook}信息"
self._outprglog(log)
networkprofiles: NetworkProfiles = NetworkProfiles(self.task)
count = 0
try:
fb = FBSearchEmail(self.task)
for res in fb._search_email(root._email, level):
count += 1
if isinstance(res, NetworkProfile):
networkprofiles.set_profile(res)
networkprofiles = self.__segment_output_profiles(networkprofiles)
yield res
except:
pass
finally:
log = f"获取到目标{obj.value}未经处理的{count}条{self.dtools.landing_facebook}数据"
self._outprglog(log)
# 最后输出,最后剩下没有输出的,一定要输出,不管拿到多少个
if len(networkprofiles) > 0:
self.outputdata(
networkprofiles.get_outputdict(), networkprofiles._suffix
)
def _public(self, obj: ScoutFeedBackBase, reason=None):
"""
这里是用网络id去拿舆情信息
修改逻辑后是根据拿到的具体信息后再去拿相关的信息
而且public的数据是不会加入root的,所以这样就好
:param obj:
:return:
"""
if not self.task.cmd.stratagyscout.cmdemail:
return
self._logger.debug("EMIAL:Start public")
return
def _get_searchengine(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
搜索引擎全词匹配目标邮箱账号相关信息
:param root:
:param task:
:param level:
:param obj:
:return:
"""
email = obj.value
try:
for data in self._get_google_searchengine(root, task, level, email, reason):
yield data
for data1 in self._get_bing_searchengine(root, task, level, email, reason):
yield data1
for data2 in self._get_baidu_searchengine(root, task, level, email, reason):
yield data2
except:
self._logger.error(f"Get search engine error, err:{traceback.format_exc()}")
def _get_google_searchengine(
self, root: Email, task: IscoutTask, level, email, reason=None
):
"""
google search engine
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_searchgoogle:
return
log = f"开始收集目标{email} {self.dtools.google}信息"
self._outprglog(log)
self._logger.debug("EMIAL:Start getting google search result")
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_google.keywords
)
filetypes: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_google.filetypes
)
count = 0
try:
sapi = SearchApi(task)
for data in sapi.text_google_search_engine(
keywords, filetypes, email, level, self.dtools.google
):
# 输出截图数据
if isinstance(data, ScreenshotSE):
OutputManagement.output(data)
elif isinstance(data, SearchFile):
OutputManagement.output(data)
else:
self.__set_value(root, data)
count += 1
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(f"Get google search error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count}条{self.dtools.google}数据"
self._outprglog(log)
def _get_bing_searchengine(
self, root: Email, task: IscoutTask, level, email, reason=None
):
"""
bing search engine
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_searchbing:
return
log = f"开始收集目标{email} {self.dtools.bing}信息"
self._outprglog(log)
self._logger.debug("EMIAL:Start getting bing search result")
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_bing.keywords
)
filetypes: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_bing.filetypes
)
count = 0
try:
sapi = SearchApi(task)
for data in sapi.text_bing_search_engine(
keywords, filetypes, email, level, self.dtools.bing
):
# 输出截图数据
if isinstance(data, ScreenshotSE):
OutputManagement.output(data)
elif isinstance(data, SearchFile):
OutputManagement.output(data)
else:
self.__set_value(root, data)
count += 1
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(f"Get bing search error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count}条{self.dtools.bing}数据"
self._outprglog(log)
def _get_baidu_searchengine(
self, root: Email, task: IscoutTask, level, email, reason=None
):
"""
baidu search engine
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_searchbaidu:
return
log = f"开始收集目标{email} {self.dtools.baidu}信息"
self._outprglog(log)
self._logger.debug("EMIAL:Start getting baidu search result")
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_baidu.keywords
)
filetypes: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_baidu.filetypes
)
count = 0
try:
sapi = SearchApi(task)
for data in sapi.text_baidu_search_engine(
keywords, filetypes, email, level, self.dtools.baidu
):
# 输出截图数据
if isinstance(data, ScreenshotSE):
OutputManagement.output(data)
elif isinstance(data, SearchFile):
OutputManagement.output(data)
else:
self.__set_value(root, data)
count += 1
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(f"Get baidu search error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count}条{self.dtools.baidu}数据"
self._outprglog(log)
# phone
def _get_phone(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
根据插件信息去查phone
:param root:
:param task:
:param level:
:param obj:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_phone:
return
self._logger.debug("EMIAL:Start getting phone")
email = obj.value
log = f"开始收集目标{email} {self.dtools.phone}信息"
self._outprglog(log)
# 搜索引擎
count_dict = {}
try:
for data in self._google_search_phone(root, task, level, email, reason):
count_dict[data.value] = 1
yield data
# whois里面的信息
for data1 in self._sonarapi_get_phone(root, task, level, email, reason):
count_dict[data1.value] = 1
yield data1
except:
self._logger.error(f"Get phone info error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count_dict.__len__()}条{self.dtools.phone}数据"
self._outprglog(log)
def _google_search_phone(self, root: Email, task: IscoutTask, level, email, reason):
"""
google浏览器去提取phone信息
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
try:
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_google.keywords
)
# filetypes: list = task.cmd.stratagyscout.cmdemail.searchengine.search_google.filetypes
filetypes: list = []
sapi = SearchApi(task)
for data in sapi.text_google_search_engine(
keywords, filetypes, email, level, reason
):
if isinstance(data, Phone):
root.set_phone(data)
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(
f"Get phone from google search engine error, err:{traceback.format_exc()}"
)
def _sonarapi_get_phone(self, root: Email, task: IscoutTask, level, email, reason):
"""
sonar api 先去查whoisr,然后使用查到的domain,再去domain whois那边拿phone
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
try:
for ew in SonarApi.email_whoisr(task, level, email):
domain = ew._domain
self._logger.debug(f"Sonar search a domain:{domain}")
for data in SonarApi.domain_whois(task, level, domain, reason):
if isinstance(data, Phone):
root.set_phone(data)
root = self.__segment_output(root, level, domain)
yield data
task.success_count()
except:
task.fail_count()
self._logger.error(
f"Get phone from sonar api error, err:{traceback.format_exc()}"
)
| """
scouter email
2019/07/10
"""
import traceback
import tld
from datacontract import EObjectType, IscoutTask
from .scouterbase import ScouterBase
from ..plugin import SonarApi, MXQuery, SearchApi, FBSearchEmail
from ...clientdatafeedback.scoutdatafeedback import (
Email,
ScoutFeedBackBase,
Whoisr,
MailServer,
SearchEngine,
ScreenshotSE,
SearchFile,
Phone,
NetworkProfile,
NetworkId,
NetworkProfiles,
)
from outputmanagement import OutputManagement
class ScouterEMail(ScouterBase):
"""
scouter email
"""
TARGET_OBJ_TYPE = EObjectType.EMail
def __init__(self, task: IscoutTask):
ScouterBase.__init__(self, task)
def __segment_output(self, root: Email, level, email) -> Email:
"""
分段输出数据,达到分段输出的标准后给新的root
没有达到那么就给旧的root
:param root:
:return:
"""
# 加载到max output就输出
# 如果输出了那么就返回新的根节点
if root._subitem_count() >= self.max_output:
self.outputdata(root.get_outputdict(), root._suffix)
root: Email = Email(self.task, level, email)
return root
def __segment_output_profiles(self, profiles: NetworkProfiles) -> NetworkProfiles:
"""输出 iscout_networkid_profile 数据"""
if not isinstance(profiles, NetworkProfiles):
raise Exception("Invalid NetworkProfiles for segment output")
if len(profiles) >= self.max_output:
self.outputdata(profiles.get_outputdict(), profiles._suffix)
profiles = NetworkProfiles(self.task)
return profiles
def __output_getdata(self, root: Email, level, email: str) -> Email:
"""
单个插件拿到的数据太大了,每个插件执行完成后都输出下
:param root:
:param level:
:param email:
:return:
"""
if root._subitem_count() > 0:
self.outputdata(root.get_outputdict(), root._suffix)
root: Email = Email(self.task, level, email)
return root
def __set_value(self, root: Email, data):
"""
插入数据
:param root:
:param data:
:return:
"""
if isinstance(data, Whoisr):
root.set_whoisr(data)
elif isinstance(data, MailServer):
root.set_mailserver(data)
elif isinstance(data, SearchEngine):
root.set_searchengine(data)
def _scoutsub(self, level: int, obj: ScoutFeedBackBase) -> iter:
root: Email = Email(self.task, level, obj.value)
# whoisr
try:
# whoisr
for whoisr in self._get_whoisr(
root, self.task, level, obj, reason=self.dtools.whois_reverse
):
yield whoisr
# 每完成一个插件输出一次,避免数据过大导致入库的问题
root = self.__output_getdata(root, level, email=obj.value)
# mailserver
for mailserver in self._get_mailserver(
root, self.task, level, obj, reason=self.dtools.mail_server
):
yield mailserver
root = self.__output_getdata(root, level, email=obj.value)
# ---------------------------------新增
# landing,public,这里的处理有点复杂,可能到时候需要自定义怎么实现
# 这里的reason是随便给的,插件具体使用的时候自己指定
for data in self._landing(
root, self.task, level, obj, reason=self.dtools.landing_messenger
):
yield data
root = self.__output_getdata(root, level, email=obj.value)
# searchengine
for data in self._get_searchengine(
root, self.task, level, obj, reason=self.dtools.urlInfo
):
yield data
root = self.__output_getdata(root, level, email=obj.value)
for data in self._get_phone(
root, self.task, level, obj, reason=self.dtools.phone
):
yield data
root = self.__output_getdata(root, level, email=obj.value)
# public
self._public(obj, reason=self.dtools.public_twitter)
except:
self._logger.error(f"Scouter mail error, err:{traceback.format_exc()}")
finally:
# 最后结束完成也要输出
if root._subitem_count() > 0:
self.outputdata(root.get_outputdict(), root._suffix)
# --------------------------------------whoisr
def _get_whoisr(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
) -> iter:
"""
根据邮箱账号来反查
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_whois_reverse:
return
self._logger.debug("EMIAL:Start getting whoisr")
email: str = obj.value
log = f"开始收集目标{email} {self.dtools.whois_reverse}信息"
self._outprglog(log)
count_dict = {}
try:
for ew in self.__get_sonar_whoisr(root, task, level, email):
count_dict[ew._domain] = 1
yield ew
except:
self._logger.error(f"Get whoisr data error, err:{traceback.format_exc()}")
finally:
log = (
f"获取到目标{email}未经处理的{count_dict.__len__()}条{self.dtools.whois_reverse}数据"
)
self._outprglog(log)
def __get_sonar_whoisr(self, root: Email, task: IscoutTask, level, email):
"""
sonar的api
:param root:
:param task:
:param level:
:param email:
:return:
"""
try:
for ew in SonarApi.email_whoisr(task, level, email):
self.__set_value(root, ew)
root = self.__segment_output(root, level, email)
yield ew
task.success_count()
except:
task.fail_count()
self._logger.error(
f"Get Sonar email whoisr error, err:{traceback.format_exc()}"
)
# -------------------------email server
def _get_mailserver(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
根据邮箱后缀查询邮件服务地址
:param root:
:param task:
:param level:
:param obj:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_mailserver:
return
self._logger.debug("EMIAL:Start getting mail server")
email: str = tld.get_tld(
obj.value, fail_silently=True, as_object=True, fix_protocol=True
).fld
log = f"开始收集目标{email} {self.dtools.mail_server}信息"
self._outprglog(log)
count_dict = {}
try:
for ms in self.__get_mx_email_server(root, task, level, email):
count_dict[ms._host] = 1
yield ms
except:
self._logger.error(f"Get mx mailserver error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count_dict.__len__()}条{self.dtools.mail_server}数据"
self._outprglog(log)
def __get_mx_email_server(self, root: Email, task: IscoutTask, level, email):
"""
根据mx记录查询邮服地址
:param root:
:param task:
:param level:
:param email:
:return:
"""
try:
mxq = MXQuery(task)
for ms in mxq.get_mail_server(level, email):
self.__set_value(root, ms)
root = self.__segment_output(root, level, email)
yield ms
task.success_count()
except:
task.fail_count()
self._logger.error(f"Get mx maiserver error, err:{traceback.format_exc()}")
def _landing(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
目标账号落地到各大主流网站的网络id 信息
:param root:
:param task:
:param level:
:param obj:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_landing_facebook:
return
self._logger.debug("EMIAL:Start landing")
log = f"开始收集目标{obj.value} {self.dtools.landing_facebook}信息"
self._outprglog(log)
networkprofiles: NetworkProfiles = NetworkProfiles(self.task)
count = 0
try:
fb = FBSearchEmail(self.task)
for res in fb._search_email(root._email, level):
count += 1
if isinstance(res, NetworkProfile):
networkprofiles.set_profile(res)
networkprofiles = self.__segment_output_profiles(networkprofiles)
yield res
except:
pass
finally:
log = f"获取到目标{obj.value}未经处理的{count}条{self.dtools.landing_facebook}数据"
self._outprglog(log)
# 最后输出,最后剩下没有输出的,一定要输出,不管拿到多少个
if len(networkprofiles) > 0:
self.outputdata(
networkprofiles.get_outputdict(), networkprofiles._suffix
)
def _public(self, obj: ScoutFeedBackBase, reason=None):
"""
这里是用网络id去拿舆情信息
修改逻辑后是根据拿到的具体信息后再去拿相关的信息
而且public的数据是不会加入root的,所以这样就好
:param obj:
:return:
"""
if not self.task.cmd.stratagyscout.cmdemail:
return
self._logger.debug("EMIAL:Start public")
return
def _get_searchengine(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
搜索引擎全词匹配目标邮箱账号相关信息
:param root:
:param task:
:param level:
:param obj:
:return:
"""
email = obj.value
try:
for data in self._get_google_searchengine(root, task, level, email, reason):
yield data
for data1 in self._get_bing_searchengine(root, task, level, email, reason):
yield data1
for data2 in self._get_baidu_searchengine(root, task, level, email, reason):
yield data2
except:
self._logger.error(f"Get search engine error, err:{traceback.format_exc()}")
def _get_google_searchengine(
self, root: Email, task: IscoutTask, level, email, reason=None
):
"""
google search engine
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_searchgoogle:
return
log = f"开始收集目标{email} {self.dtools.google}信息"
self._outprglog(log)
self._logger.debug("EMIAL:Start getting google search result")
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_google.keywords
)
filetypes: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_google.filetypes
)
count = 0
try:
sapi = SearchApi(task)
for data in sapi.text_google_search_engine(
keywords, filetypes, email, level, self.dtools.google
):
# 输出截图数据
if isinstance(data, ScreenshotSE):
OutputManagement.output(data)
elif isinstance(data, SearchFile):
OutputManagement.output(data)
else:
self.__set_value(root, data)
count += 1
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(f"Get google search error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count}条{self.dtools.google}数据"
self._outprglog(log)
def _get_bing_searchengine(
self, root: Email, task: IscoutTask, level, email, reason=None
):
"""
bing search engine
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_searchbing:
return
log = f"开始收集目标{email} {self.dtools.bing}信息"
self._outprglog(log)
self._logger.debug("EMIAL:Start getting bing search result")
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_bing.keywords
)
filetypes: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_bing.filetypes
)
count = 0
try:
sapi = SearchApi(task)
for data in sapi.text_bing_search_engine(
keywords, filetypes, email, level, self.dtools.bing
):
# 输出截图数据
if isinstance(data, ScreenshotSE):
OutputManagement.output(data)
elif isinstance(data, SearchFile):
OutputManagement.output(data)
else:
self.__set_value(root, data)
count += 1
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(f"Get bing search error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count}条{self.dtools.bing}数据"
self._outprglog(log)
def _get_baidu_searchengine(
self, root: Email, task: IscoutTask, level, email, reason=None
):
"""
baidu search engine
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_searchbaidu:
return
log = f"开始收集目标{email} {self.dtools.baidu}信息"
self._outprglog(log)
self._logger.debug("EMIAL:Start getting baidu search result")
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_baidu.keywords
)
filetypes: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_baidu.filetypes
)
count = 0
try:
sapi = SearchApi(task)
for data in sapi.text_baidu_search_engine(
keywords, filetypes, email, level, self.dtools.baidu
):
# 输出截图数据
if isinstance(data, ScreenshotSE):
OutputManagement.output(data)
elif isinstance(data, SearchFile):
OutputManagement.output(data)
else:
self.__set_value(root, data)
count += 1
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(f"Get baidu search error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count}条{self.dtools.baidu}数据"
self._outprglog(log)
# phone
def _get_phone(
self, root: Email, task: IscoutTask, level, obj: ScoutFeedBackBase, reason=None
):
"""
根据插件信息去查phone
:param root:
:param task:
:param level:
:param obj:
:return:
"""
if not task.cmd.stratagyscout.cmdemail.enabled_phone:
return
self._logger.debug("EMIAL:Start getting phone")
email = obj.value
log = f"开始收集目标{email} {self.dtools.phone}信息"
self._outprglog(log)
# 搜索引擎
count_dict = {}
try:
for data in self._google_search_phone(root, task, level, email, reason):
count_dict[data.value] = 1
yield data
# whois里面的信息
for data1 in self._sonarapi_get_phone(root, task, level, email, reason):
count_dict[data1.value] = 1
yield data1
except:
self._logger.error(f"Get phone info error, err:{traceback.format_exc()}")
finally:
log = f"获取到目标{email}未经处理的{count_dict.__len__()}条{self.dtools.phone}数据"
self._outprglog(log)
def _google_search_phone(self, root: Email, task: IscoutTask, level, email, reason):
"""
google浏览器去提取phone信息
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
try:
keywords: list = (
task.cmd.stratagyscout.cmdemail.searchengine.search_google.keywords
)
# filetypes: list = task.cmd.stratagyscout.cmdemail.searchengine.search_google.filetypes
filetypes: list = []
sapi = SearchApi(task)
for data in sapi.text_google_search_engine(
keywords, filetypes, email, level, reason
):
if isinstance(data, Phone):
root.set_phone(data)
root = self.__segment_output(root, level, email)
yield data
except:
self._logger.error(
f"Get phone from google search engine error, err:{traceback.format_exc()}"
)
def _sonarapi_get_phone(self, root: Email, task: IscoutTask, level, email, reason):
"""
sonar api 先去查whoisr,然后使用查到的domain,再去domain whois那边拿phone
:param root:
:param task:
:param level:
:param email:
:param reason:
:return:
"""
try:
for ew in SonarApi.email_whoisr(task, level, email):
domain = ew._domain
self._logger.debug(f"Sonar search a domain:{domain}")
for data in SonarApi.domain_whois(task, level, domain, reason):
if isinstance(data, Phone):
root.set_phone(data)
root = self.__segment_output(root, level, domain)
yield data
task.success_count()
except:
task.fail_count()
self._logger.error(
f"Get phone from sonar api error, err:{traceback.format_exc()}"
)
| zh | 0.452813 | scouter email 2019/07/10 scouter email 分段输出数据,达到分段输出的标准后给新的root 没有达到那么就给旧的root :param root: :return: # 加载到max output就输出 # 如果输出了那么就返回新的根节点 输出 iscout_networkid_profile 数据 单个插件拿到的数据太大了,每个插件执行完成后都输出下 :param root: :param level: :param email: :return: 插入数据 :param root: :param data: :return: # whoisr # whoisr # 每完成一个插件输出一次,避免数据过大导致入库的问题 # mailserver # ---------------------------------新增 # landing,public,这里的处理有点复杂,可能到时候需要自定义怎么实现 # 这里的reason是随便给的,插件具体使用的时候自己指定 # searchengine # public # 最后结束完成也要输出 # --------------------------------------whoisr 根据邮箱账号来反查 :return: sonar的api :param root: :param task: :param level: :param email: :return: # -------------------------email server 根据邮箱后缀查询邮件服务地址 :param root: :param task: :param level: :param obj: :return: 根据mx记录查询邮服地址 :param root: :param task: :param level: :param email: :return: 目标账号落地到各大主流网站的网络id 信息 :param root: :param task: :param level: :param obj: :return: # 最后输出,最后剩下没有输出的,一定要输出,不管拿到多少个 这里是用网络id去拿舆情信息 修改逻辑后是根据拿到的具体信息后再去拿相关的信息 而且public的数据是不会加入root的,所以这样就好 :param obj: :return: 搜索引擎全词匹配目标邮箱账号相关信息 :param root: :param task: :param level: :param obj: :return: google search engine :param root: :param task: :param level: :param email: :param reason: :return: # 输出截图数据 bing search engine :param root: :param task: :param level: :param email: :param reason: :return: # 输出截图数据 baidu search engine :param root: :param task: :param level: :param email: :param reason: :return: # 输出截图数据 # phone 根据插件信息去查phone :param root: :param task: :param level: :param obj: :return: # 搜索引擎 # whois里面的信息 google浏览器去提取phone信息 :param root: :param task: :param level: :param email: :param reason: :return: # filetypes: list = task.cmd.stratagyscout.cmdemail.searchengine.search_google.filetypes sonar api 先去查whoisr,然后使用查到的domain,再去domain whois那边拿phone :param root: :param task: :param level: :param email: :param reason: :return: | 1.978007 | 2 |
tests/test_regression.py | StuartSul/Homemade_Neural_Network | 0 | 6616461 | <reponame>StuartSul/Homemade_Neural_Network
from inspect import signature
from random import random
from tests.test_global import *
# y = x
def sample_relation(x):
return x
def generate(size, relation=sample_relation, filename=dafault_filename_regression, save=True):
input_count = len(signature(relation).parameters)
data = []
for i in range(size):
data.append([])
for j in range(input_count):
data[i].append(random())
data[i].append(relation(*data[i]))
data_str = ''
for example in data:
for value in example:
data_str += str(value) + secondary_separator
data_str = data_str[:len(data_str) - len(secondary_separator)]
data_str += primary_separator
data_str = data_str[:len(data_str) - len(primary_separator)]
if save:
with open(filename, "w") as data_file:
data_file.write(data_str)
return data_str | from inspect import signature
from random import random
from tests.test_global import *
# y = x
def sample_relation(x):
return x
def generate(size, relation=sample_relation, filename=dafault_filename_regression, save=True):
input_count = len(signature(relation).parameters)
data = []
for i in range(size):
data.append([])
for j in range(input_count):
data[i].append(random())
data[i].append(relation(*data[i]))
data_str = ''
for example in data:
for value in example:
data_str += str(value) + secondary_separator
data_str = data_str[:len(data_str) - len(secondary_separator)]
data_str += primary_separator
data_str = data_str[:len(data_str) - len(primary_separator)]
if save:
with open(filename, "w") as data_file:
data_file.write(data_str)
return data_str | none | 1 | 2.912471 | 3 | |
PyCTBN/tests/structure_graph/test_structure.py | pietroepis/PyCTBN | 1 | 6616462 |
# License: MIT License
import unittest
import numpy as np
from ...PyCTBN.structure_graph.structure import Structure
class TestStructure(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.labels = ['X','Y','Z']
cls.indxs = np.array([0,1,2])
cls.vals = np.array([3,3,3])
cls.edges = [('X','Z'),('Y','Z'), ('Z','Y')]
cls.vars_numb = len(cls.labels)
def test_init(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
self.assertListEqual(self.labels,s1.nodes_labels)
self.assertIsInstance(s1.nodes_indexes, np.ndarray)
self.assertTrue(np.array_equal(self.indxs, s1.nodes_indexes))
self.assertIsInstance(s1.nodes_values, np.ndarray)
self.assertTrue(np.array_equal(self.vals, s1.nodes_values))
self.assertListEqual(self.edges, s1.edges)
self.assertEqual(self.vars_numb, s1.total_variables_number)
def test_get_node_id(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
for indx, var in enumerate(self.labels):
self.assertEqual(var, s1.get_node_id(indx))
def test_edges_operations(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
self.assertTrue(s1.contains_edge(('X','Z')))
s1.add_edge(('Z','X'))
self.assertTrue(s1.contains_edge(('Z','X')))
s1.remove_edge(('Z','X'))
self.assertFalse(s1.contains_edge(('Z','X')))
def test_get_node_indx(self):
l2 = self.labels[:]
l2.remove('Y')
i2 = self.indxs.copy()
np.delete(i2, 1)
v2 = self.vals.copy()
np.delete(v2, 1)
e2 = [('X','Z')]
n2 = self.vars_numb - 1
s1 = Structure(l2, i2, v2, e2, n2)
for indx, var in zip(i2, l2):
self.assertEqual(indx, s1.get_node_indx(var))
def test_get_positional_node_indx(self):
l2 = self.labels[:]
l2.remove('Y')
i2 = self.indxs.copy()
np.delete(i2, 1)
v2 = self.vals.copy()
np.delete(v2, 1)
e2 = [('X', 'Z')]
n2 = self.vars_numb - 1
s1 = Structure(l2, i2, v2, e2, n2)
for indx, var in enumerate(s1.nodes_labels):
self.assertEqual(indx, s1.get_positional_node_indx(var))
def test_get_states_number(self):
l2 = self.labels[:]
l2.remove('Y')
i2 = self.indxs.copy()
np.delete(i2, 1)
v2 = self.vals.copy()
np.delete(v2, 1)
e2 = [('X', 'Z')]
n2 = self.vars_numb - 1
s1 = Structure(l2, i2, v2, e2, n2)
for val, node in zip(v2, l2):
self.assertEqual(val, s1.get_states_number(node))
def test_equality(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
s2 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
self.assertEqual(s1, s2)
self.assertNotEqual(s1,4)
def test_repr(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
print(s1)
if __name__ == '__main__':
unittest.main()
|
# License: MIT License
import unittest
import numpy as np
from ...PyCTBN.structure_graph.structure import Structure
class TestStructure(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.labels = ['X','Y','Z']
cls.indxs = np.array([0,1,2])
cls.vals = np.array([3,3,3])
cls.edges = [('X','Z'),('Y','Z'), ('Z','Y')]
cls.vars_numb = len(cls.labels)
def test_init(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
self.assertListEqual(self.labels,s1.nodes_labels)
self.assertIsInstance(s1.nodes_indexes, np.ndarray)
self.assertTrue(np.array_equal(self.indxs, s1.nodes_indexes))
self.assertIsInstance(s1.nodes_values, np.ndarray)
self.assertTrue(np.array_equal(self.vals, s1.nodes_values))
self.assertListEqual(self.edges, s1.edges)
self.assertEqual(self.vars_numb, s1.total_variables_number)
def test_get_node_id(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
for indx, var in enumerate(self.labels):
self.assertEqual(var, s1.get_node_id(indx))
def test_edges_operations(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
self.assertTrue(s1.contains_edge(('X','Z')))
s1.add_edge(('Z','X'))
self.assertTrue(s1.contains_edge(('Z','X')))
s1.remove_edge(('Z','X'))
self.assertFalse(s1.contains_edge(('Z','X')))
def test_get_node_indx(self):
l2 = self.labels[:]
l2.remove('Y')
i2 = self.indxs.copy()
np.delete(i2, 1)
v2 = self.vals.copy()
np.delete(v2, 1)
e2 = [('X','Z')]
n2 = self.vars_numb - 1
s1 = Structure(l2, i2, v2, e2, n2)
for indx, var in zip(i2, l2):
self.assertEqual(indx, s1.get_node_indx(var))
def test_get_positional_node_indx(self):
l2 = self.labels[:]
l2.remove('Y')
i2 = self.indxs.copy()
np.delete(i2, 1)
v2 = self.vals.copy()
np.delete(v2, 1)
e2 = [('X', 'Z')]
n2 = self.vars_numb - 1
s1 = Structure(l2, i2, v2, e2, n2)
for indx, var in enumerate(s1.nodes_labels):
self.assertEqual(indx, s1.get_positional_node_indx(var))
def test_get_states_number(self):
l2 = self.labels[:]
l2.remove('Y')
i2 = self.indxs.copy()
np.delete(i2, 1)
v2 = self.vals.copy()
np.delete(v2, 1)
e2 = [('X', 'Z')]
n2 = self.vars_numb - 1
s1 = Structure(l2, i2, v2, e2, n2)
for val, node in zip(v2, l2):
self.assertEqual(val, s1.get_states_number(node))
def test_equality(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
s2 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
self.assertEqual(s1, s2)
self.assertNotEqual(s1,4)
def test_repr(self):
s1 = Structure(self.labels, self.indxs, self.vals, self.edges, self.vars_numb)
print(s1)
if __name__ == '__main__':
unittest.main()
| en | 0.39821 | # License: MIT License | 2.66147 | 3 |
exporter/SynthesisFusionGltfExporter/apper/apper/__init__.py | Autodesk/synthesis | 136 | 6616463 | """
Apper Fusion 360 API Wrapper
=========================================================
Apper a simple wrapper for the Fusion 360 API,
written in Python, for human beings.
Full documentation is at <https://apper.readthedocs.io>.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2019 by <NAME>.
:license: Apache 2.0, see LICENSE for more details.
"""
from importlib import reload
from .FusionApp import FusionApp
from .Fusion360AppEvents import Fusion360CustomEvent
from .Fusion360AppEvents import Fusion360CustomThread
from .Fusion360AppEvents import Fusion360NewThread
from .Fusion360AppEvents import Fusion360DocumentEvent
from .Fusion360AppEvents import Fusion360WorkspaceEvent
from .Fusion360AppEvents import Fusion360WebRequestEvent
from .Fusion360AppEvents import Fusion360CommandEvent
from .Fusion360CommandBase import Fusion360CommandBase
from .PaletteCommandBase import PaletteCommandBase
from .Fusion360Utilities import AppObjects
from .Fusion360Utilities import *
| """
Apper Fusion 360 API Wrapper
=========================================================
Apper a simple wrapper for the Fusion 360 API,
written in Python, for human beings.
Full documentation is at <https://apper.readthedocs.io>.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2019 by <NAME>.
:license: Apache 2.0, see LICENSE for more details.
"""
from importlib import reload
from .FusionApp import FusionApp
from .Fusion360AppEvents import Fusion360CustomEvent
from .Fusion360AppEvents import Fusion360CustomThread
from .Fusion360AppEvents import Fusion360NewThread
from .Fusion360AppEvents import Fusion360DocumentEvent
from .Fusion360AppEvents import Fusion360WorkspaceEvent
from .Fusion360AppEvents import Fusion360WebRequestEvent
from .Fusion360AppEvents import Fusion360CommandEvent
from .Fusion360CommandBase import Fusion360CommandBase
from .PaletteCommandBase import PaletteCommandBase
from .Fusion360Utilities import AppObjects
from .Fusion360Utilities import *
| en | 0.557682 | Apper Fusion 360 API Wrapper ========================================================= Apper a simple wrapper for the Fusion 360 API, written in Python, for human beings. Full documentation is at <https://apper.readthedocs.io>. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2019 by <NAME>. :license: Apache 2.0, see LICENSE for more details. | 1.184755 | 1 |
samples/physics/src/physics.py | sarosenb/pyfrc | 0 | 6616464 | #
# See the documentation for more details on how this works
#
# The idea here is you provide a simulation object that overrides specific
# pieces of WPILib, and modifies motors/sensors accordingly depending on the
# state of the simulation. An example of this would be measuring a motor
# moving for a set period of time, and then changing a limit switch to turn
# on after that period of time. This can help you do more complex simulations
# of your robot code without too much extra effort.
#
# NOTE: THIS API IS ALPHA AND WILL MOST LIKELY CHANGE!
# ... if you have better ideas on how to implement, submit a patch!
#
from pyfrc import wpilib
from pyfrc.physics import drivetrains
class PhysicsEngine(object):
'''
Simulates a motor moving something that strikes two limit switches,
one on each end of the track. Obviously, this is not particularly
realistic, but it's good enough to illustrate the point
TODO: a better way to implement this is have something track all of
the input values, and have that in a data structure, while also
providing the override capability.
'''
#: Width of robot, specified in feet
ROBOT_WIDTH = 2
ROBOT_HEIGHT = 3
ROBOT_STARTING_X = 18.5
ROBOT_STARTING_Y = 12
# In degrees, 0 is east, 90 is south
STARTING_ANGLE = 180
def __init__(self, physics_controller):
'''
:param physics_controller: `pyfrc.physics.core.Physics` object
to communicate simulation effects to
'''
self.physics_controller = physics_controller
self.jag_value = None
self.position = 0
self.last_tm = None
def update_sim(self, now, tm_diff):
'''
Called when the simulation parameters for the program need to be
updated. This is mostly when wpilib.Wait is called.
:param now: The current time as a float
:param tm_diff: The amount of time that has passed since the last
time that this function was called
'''
# Simulate the drivetrain
l_motor = wpilib.DigitalModule._pwm[0].Get()
r_motor = wpilib.DigitalModule._pwm[1].Get()
speed, rotation = drivetrains.two_motor_drivetrain(l_motor, r_motor)
self.physics_controller.drive(speed, rotation, tm_diff)
if self.jag_value is None:
return
# update position (use tm_diff so the rate is constant)
self.position += self.jag_value * tm_diff * 3
# update limit switches based on position
if self.position <= 0:
switch1 = True
switch2 = False
elif self.position > 10:
switch1 = False
switch2 = True
else:
switch1 = False
switch2 = False
# set values here
try:
wpilib.DigitalModule._io[0].value = switch1
except:
pass
try:
wpilib.DigitalModule._io[1].value = switch2
except:
pass
try:
wpilib.AnalogModule._channels[1].voltage = self.position
except:
pass
# always reset variables in case the input values aren't updated
# by the robot
self.jag_value = None
def sim_Jaguar_Set(self, obj, fn, value):
'''
Called when Jaguar.Set() is called. This function should
call fn() with the passed in value.
:param obj: Jaguar object
:param fn: Wrapped Jaguar.Set function
:param value: Value passed to Jaguar.Set
'''
if obj.channel == 4:
self.jag_value = value
fn(value)
| #
# See the documentation for more details on how this works
#
# The idea here is you provide a simulation object that overrides specific
# pieces of WPILib, and modifies motors/sensors accordingly depending on the
# state of the simulation. An example of this would be measuring a motor
# moving for a set period of time, and then changing a limit switch to turn
# on after that period of time. This can help you do more complex simulations
# of your robot code without too much extra effort.
#
# NOTE: THIS API IS ALPHA AND WILL MOST LIKELY CHANGE!
# ... if you have better ideas on how to implement, submit a patch!
#
from pyfrc import wpilib
from pyfrc.physics import drivetrains
class PhysicsEngine(object):
'''
Simulates a motor moving something that strikes two limit switches,
one on each end of the track. Obviously, this is not particularly
realistic, but it's good enough to illustrate the point
TODO: a better way to implement this is have something track all of
the input values, and have that in a data structure, while also
providing the override capability.
'''
#: Width of robot, specified in feet
ROBOT_WIDTH = 2
ROBOT_HEIGHT = 3
ROBOT_STARTING_X = 18.5
ROBOT_STARTING_Y = 12
# In degrees, 0 is east, 90 is south
STARTING_ANGLE = 180
def __init__(self, physics_controller):
'''
:param physics_controller: `pyfrc.physics.core.Physics` object
to communicate simulation effects to
'''
self.physics_controller = physics_controller
self.jag_value = None
self.position = 0
self.last_tm = None
def update_sim(self, now, tm_diff):
'''
Called when the simulation parameters for the program need to be
updated. This is mostly when wpilib.Wait is called.
:param now: The current time as a float
:param tm_diff: The amount of time that has passed since the last
time that this function was called
'''
# Simulate the drivetrain
l_motor = wpilib.DigitalModule._pwm[0].Get()
r_motor = wpilib.DigitalModule._pwm[1].Get()
speed, rotation = drivetrains.two_motor_drivetrain(l_motor, r_motor)
self.physics_controller.drive(speed, rotation, tm_diff)
if self.jag_value is None:
return
# update position (use tm_diff so the rate is constant)
self.position += self.jag_value * tm_diff * 3
# update limit switches based on position
if self.position <= 0:
switch1 = True
switch2 = False
elif self.position > 10:
switch1 = False
switch2 = True
else:
switch1 = False
switch2 = False
# set values here
try:
wpilib.DigitalModule._io[0].value = switch1
except:
pass
try:
wpilib.DigitalModule._io[1].value = switch2
except:
pass
try:
wpilib.AnalogModule._channels[1].voltage = self.position
except:
pass
# always reset variables in case the input values aren't updated
# by the robot
self.jag_value = None
def sim_Jaguar_Set(self, obj, fn, value):
'''
Called when Jaguar.Set() is called. This function should
call fn() with the passed in value.
:param obj: Jaguar object
:param fn: Wrapped Jaguar.Set function
:param value: Value passed to Jaguar.Set
'''
if obj.channel == 4:
self.jag_value = value
fn(value)
| en | 0.886528 | # # See the documentation for more details on how this works # # The idea here is you provide a simulation object that overrides specific # pieces of WPILib, and modifies motors/sensors accordingly depending on the # state of the simulation. An example of this would be measuring a motor # moving for a set period of time, and then changing a limit switch to turn # on after that period of time. This can help you do more complex simulations # of your robot code without too much extra effort. # # NOTE: THIS API IS ALPHA AND WILL MOST LIKELY CHANGE! # ... if you have better ideas on how to implement, submit a patch! # Simulates a motor moving something that strikes two limit switches, one on each end of the track. Obviously, this is not particularly realistic, but it's good enough to illustrate the point TODO: a better way to implement this is have something track all of the input values, and have that in a data structure, while also providing the override capability. #: Width of robot, specified in feet # In degrees, 0 is east, 90 is south :param physics_controller: `pyfrc.physics.core.Physics` object to communicate simulation effects to Called when the simulation parameters for the program need to be updated. This is mostly when wpilib.Wait is called. :param now: The current time as a float :param tm_diff: The amount of time that has passed since the last time that this function was called # Simulate the drivetrain # update position (use tm_diff so the rate is constant) # update limit switches based on position # set values here # always reset variables in case the input values aren't updated # by the robot Called when Jaguar.Set() is called. This function should call fn() with the passed in value. :param obj: Jaguar object :param fn: Wrapped Jaguar.Set function :param value: Value passed to Jaguar.Set | 3.373182 | 3 |
heat/utils/__init__.py | sebimarkgraf/heat | 0 | 6616465 | from . import matrixgallery
| from . import matrixgallery
| none | 1 | 0.976083 | 1 | |
app/main/forms.py | dhhagan/emfact-2 | 0 | 6616466 | <gh_stars>0
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, FloatField, IntegerField
from wtforms.validators import Required, Length, Email, Optional
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from ..models import NAICS_data
class ReactorForm(Form):
name = StringField('Name', validators = [Required()])
power = FloatField('Power (kW)')
efficiency = FloatField('Efficiency (%)')
loadingrate = FloatField('Loading Rate (kg/h)', validators = [Optional()])
releasefrac = FloatField('Fraction to Air (mol %)', validators = [Optional()])
submit = SubmitField('Add')
class HeatXForm(Form):
name = StringField('Name')
flowrate = FloatField('FlowRate')
heatcapacity = FloatField('HeatCapacity')
tempIn = FloatField('TempIn')
tempOut = FloatField('TempOut')
efficiency = FloatField('Efficiency')
submit = SubmitField('Add')
def choices():
return NAICS_data.query
class PlantInfoForm(Form):
title = StringField('title')
description = StringField('description')
location = StringField('location')
revenue = FloatField('revenue', validators = [Optional()])
naics = QuerySelectField('naics', query_factory = choices, validators = [Optional()])
submit = SubmitField('Update')
| from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, FloatField, IntegerField
from wtforms.validators import Required, Length, Email, Optional
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from ..models import NAICS_data
class ReactorForm(Form):
name = StringField('Name', validators = [Required()])
power = FloatField('Power (kW)')
efficiency = FloatField('Efficiency (%)')
loadingrate = FloatField('Loading Rate (kg/h)', validators = [Optional()])
releasefrac = FloatField('Fraction to Air (mol %)', validators = [Optional()])
submit = SubmitField('Add')
class HeatXForm(Form):
name = StringField('Name')
flowrate = FloatField('FlowRate')
heatcapacity = FloatField('HeatCapacity')
tempIn = FloatField('TempIn')
tempOut = FloatField('TempOut')
efficiency = FloatField('Efficiency')
submit = SubmitField('Add')
def choices():
return NAICS_data.query
class PlantInfoForm(Form):
title = StringField('title')
description = StringField('description')
location = StringField('location')
revenue = FloatField('revenue', validators = [Optional()])
naics = QuerySelectField('naics', query_factory = choices, validators = [Optional()])
submit = SubmitField('Update') | none | 1 | 2.523894 | 3 | |
lib/yaki/plugins/_legacy/SeeAlso.py | rcarmo/yaki-tng | 2 | 6616467 | <gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
"""
SeeAlso.py
Created by <NAME> on 2007-01-11.
Published under the MIT license.
"""
import yaki.Engine, yaki.Store
from BeautifulSoup import *
try:
import cPickle as pickle
except ImportError:
import pickle # fall back on Python version
class SeeAlsoWikiPlugin(yaki.Plugins.WikiPlugin):
def __init__(self, registry, webapp):
registry.register('markup',self, 'plugin','seealso')
self.context = webapp.getContext()
self.cache = self.context.cache
def run(self, serial, tag, tagname, pagename, soup, request, response):
# fetch the cached backlinks for this page
try:
backlinks = pickle.loads(self.cache['backlinks:' + pagename])
# fail silently if no backlinks are found
except:
return True
buffer = []
for href in backlinks:
buffer.append('<a class="seealso" href="%s">%s</a> ' % (href,backlinks[href]))
tag.replaceWith(''.join(buffer))
| #!/usr/bin/env python
# encoding: utf-8
"""
SeeAlso.py
Created by <NAME> on 2007-01-11.
Published under the MIT license.
"""
import yaki.Engine, yaki.Store
from BeautifulSoup import *
try:
import cPickle as pickle
except ImportError:
import pickle # fall back on Python version
class SeeAlsoWikiPlugin(yaki.Plugins.WikiPlugin):
def __init__(self, registry, webapp):
registry.register('markup',self, 'plugin','seealso')
self.context = webapp.getContext()
self.cache = self.context.cache
def run(self, serial, tag, tagname, pagename, soup, request, response):
# fetch the cached backlinks for this page
try:
backlinks = pickle.loads(self.cache['backlinks:' + pagename])
# fail silently if no backlinks are found
except:
return True
buffer = []
for href in backlinks:
buffer.append('<a class="seealso" href="%s">%s</a> ' % (href,backlinks[href]))
tag.replaceWith(''.join(buffer)) | en | 0.741247 | #!/usr/bin/env python # encoding: utf-8 SeeAlso.py Created by <NAME> on 2007-01-11. Published under the MIT license. # fall back on Python version # fetch the cached backlinks for this page # fail silently if no backlinks are found | 2.542581 | 3 |
data/txt_to_csv.py | agbrothers/exchange | 0 | 6616468 | import os
import pandas as pd
from glob import glob
if __name__ == "__main__":
files = glob("data/nasdaq100_1min_lcn14h/*.txt")
for file in files:
df = pd.read_csv(file)
new_path = "data/csv/" + os.path.basename(file).replace(".txt",".csv")
df.to_csv(new_path, index=False)
| import os
import pandas as pd
from glob import glob
if __name__ == "__main__":
files = glob("data/nasdaq100_1min_lcn14h/*.txt")
for file in files:
df = pd.read_csv(file)
new_path = "data/csv/" + os.path.basename(file).replace(".txt",".csv")
df.to_csv(new_path, index=False)
| none | 1 | 2.799955 | 3 | |
tests/templatetags/coltrane/test_parent.py | adamghill/coltrane | 28 | 6616469 | <filename>tests/templatetags/coltrane/test_parent.py
import pytest
from coltrane.templatetags.coltrane_tags import NoParentError, parent
class WSGIRequest:
@property
def path(self):
return "/test2/test2"
def test_parent():
expected = "/test1"
actual = parent("/test1/test2")
assert actual == expected
def test_parent_wsgi_request():
expected = "/test2"
actual = parent(WSGIRequest())
assert actual == expected
def test_parent_ends_with_slash():
expected = "/test1"
actual = parent("/test1/test2/")
assert actual == expected
def test_parent_get_root():
expected = ""
actual = parent("/test1")
assert actual == expected
def test_parent_no_parent_exception():
with pytest.raises(NoParentError):
parent("/")
def test_parent_no_parent_exception2():
with pytest.raises(NoParentError):
parent("")
| <filename>tests/templatetags/coltrane/test_parent.py
import pytest
from coltrane.templatetags.coltrane_tags import NoParentError, parent
class WSGIRequest:
@property
def path(self):
return "/test2/test2"
def test_parent():
expected = "/test1"
actual = parent("/test1/test2")
assert actual == expected
def test_parent_wsgi_request():
expected = "/test2"
actual = parent(WSGIRequest())
assert actual == expected
def test_parent_ends_with_slash():
expected = "/test1"
actual = parent("/test1/test2/")
assert actual == expected
def test_parent_get_root():
expected = ""
actual = parent("/test1")
assert actual == expected
def test_parent_no_parent_exception():
with pytest.raises(NoParentError):
parent("/")
def test_parent_no_parent_exception2():
with pytest.raises(NoParentError):
parent("")
| none | 1 | 2.38227 | 2 | |
app.py | tejasmorkar/housing_price_prediction_aws | 3 | 6616470 | <reponame>tejasmorkar/housing_price_prediction_aws<gh_stars>1-10
import streamlit as st
import altair as alt
import pydeck as pdk
train_area = st.empty()
"""
# California Housing Prices
This is the California Housing Prices dataset which contains data drawn from the 1990 U.S. Census. The following table provides descriptions, data ranges, and data types for each feature in the data set.
## Let's first take a look at imports
"""
with st.echo():
import tensorflow as tf
import numpy as np
import pandas as pd
"""
## Loading the Dataset
We will use the scikit-learn's dataset module to lead data which is already cleaned for us and only has the numerical feautures.
"""
with st.echo():
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
"""
This will load the entire data in the `housing` variable as you can see below
"""
st.subheader('Input Features')
housing.data
st.subheader('Output Lables')
housing.target
"""
## Splitting the data into Train, Test and Dev sets
This is one of the most important thing in beginning of any Machine Learning solution as the result of any model can highly depend on how well you have distributed the data into these sets.
Fourtunately for us, we have scikit-learn to the rescue where it has become as easy as 2 lines of code.
"""
with st.echo():
from sklearn.model_selection import train_test_split
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full
)
"""
The `train_test_split()` function splits the data into 2 sets where the test set is 25% of the total dataset. We have used the same function again on the train_full to split it into train and validation set. 25% is a default parameter and you can tweak is as per your needs. Take a look at it from the [Scikit-Learn's Documentation](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html).
## Taking a look at the train data
The colums represent the following data:
"""
st.write(housing.feature_names)
"""
Now let's look at the location of the houses by plotting it on the map using Latitude and Longitude values:
"""
with st.echo():
map_data = pd.DataFrame(
X_train,
columns=[
'MedInc',
'HouseAge',
'AveRooms',
'AveBedrms',
'Population',
'AveOccup',
'latitude',
'longitude'
])
midpoint = (np.average(map_data["latitude"]), np.average(map_data["longitude"]))
st.write(pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
initial_view_state={
"latitude": midpoint[0],
"longitude": midpoint[1],
"zoom": 6,
"pitch": 75,
},
layers=[
pdk.Layer(
"HexagonLayer",
data=map_data,
get_position=["longitude", "latitude"],
radius=1000,
elevation_scale=4,
elevation_range=[0, 10000],
pickable=True,
extruded=True,
),
],
))
"""
**Feel free to zoom in or drag while pressing ALT key to change the 3D viewing angle of the map, as required.**
## Preprocessing
As pointed out earlier, this dataset is already well preprocessed by scikit-learn for us to use directly without worrying about any NaN values and other stuff.
Although, we are going to scale the values in specific range by using `StandardScaler` to help our model work effeciently.
"""
with st.echo():
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
"""
## Creating a model
We will be creating a simple Sequential Model with first layer containing 30 neurons and the activation function of RELU.
The next layer will be single neuron layer with no activation function as we want the model to predict a range of values and not just binary or multiclass results like classification problems.
"""
st.sidebar.title('Hyperparameters')
n_neurons = st.sidebar.slider('Neurons', 1, 128, 30)
l_rate = st.sidebar.selectbox('Learning Rate', (0.0001, 0.001, 0.01), 1)
n_epochs = st.sidebar.number_input('Number of Epochs', 1, 50, 20)
with st.echo():
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(n_neurons, activation='relu', input_shape=X_train.shape[1:]),
tf.keras.layers.Dense(1)
])
"""
## Compiling the model
Tensorflow keras API provides us with the `model.compile()` function to assign the optimizers, loss function and a few other details for the model.
"""
with st.echo():
model.compile(
loss='mean_squared_error',
optimizer=tf.keras.optimizers.SGD(l_rate)
)
"""
## Training the model
In order to train the model you simply have to call the `fit()` function on the model with training and validation set and number of epochs you want the model to train for.
**Try playing with the hyperparameters from the sidebar on the left side and click on the `Train Model` button given below to start the training.**
"""
train = st.button('Train Model')
if train:
with st.spinner('Training Model...'):
with st.echo():
model.summary(print_fn=lambda x: st.write("{}".format(x)))
history = model.fit(
X_train,
y_train,
epochs=n_epochs,
validation_data=(X_valid, y_valid)
)
st.success('Model Training Complete!')
"""
## Model Performance
"""
with st.echo():
st.line_chart(pd.DataFrame(history.history))
"""
## Evalutating the model on Test set
Again another imortant but easy step to do is to evaluate your model on the test data which it has never seen before. Remember that you should only do this after you are sure enough about the model you'vr built and you should resist making any hyperparameter tuning after evaluating the model on the test set as it would just make it better for test set and again there will be a generalization problem when the model will see new data in production phase.
"""
with st.echo():
evaluation = model.evaluate(X_test, y_test)
evaluation
"""
> This loss on the test set is a little worse than that on the vakidation set, which is as expected, as the model has never seen the images from test set.
"""
"""
## Predictions using the Model
"""
with st.echo():
X_new = X_test[:3]
predictions = model.predict(X_new)
"""
### Predictions
"""
predictions
"""
### Ground Truth
"""
y_test[:3] | import streamlit as st
import altair as alt
import pydeck as pdk
train_area = st.empty()
"""
# California Housing Prices
This is the California Housing Prices dataset which contains data drawn from the 1990 U.S. Census. The following table provides descriptions, data ranges, and data types for each feature in the data set.
## Let's first take a look at imports
"""
with st.echo():
import tensorflow as tf
import numpy as np
import pandas as pd
"""
## Loading the Dataset
We will use the scikit-learn's dataset module to lead data which is already cleaned for us and only has the numerical feautures.
"""
with st.echo():
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
"""
This will load the entire data in the `housing` variable as you can see below
"""
st.subheader('Input Features')
housing.data
st.subheader('Output Lables')
housing.target
"""
## Splitting the data into Train, Test and Dev sets
This is one of the most important thing in beginning of any Machine Learning solution as the result of any model can highly depend on how well you have distributed the data into these sets.
Fourtunately for us, we have scikit-learn to the rescue where it has become as easy as 2 lines of code.
"""
with st.echo():
from sklearn.model_selection import train_test_split
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full
)
"""
The `train_test_split()` function splits the data into 2 sets where the test set is 25% of the total dataset. We have used the same function again on the train_full to split it into train and validation set. 25% is a default parameter and you can tweak is as per your needs. Take a look at it from the [Scikit-Learn's Documentation](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html).
## Taking a look at the train data
The colums represent the following data:
"""
st.write(housing.feature_names)
"""
Now let's look at the location of the houses by plotting it on the map using Latitude and Longitude values:
"""
with st.echo():
map_data = pd.DataFrame(
X_train,
columns=[
'MedInc',
'HouseAge',
'AveRooms',
'AveBedrms',
'Population',
'AveOccup',
'latitude',
'longitude'
])
midpoint = (np.average(map_data["latitude"]), np.average(map_data["longitude"]))
st.write(pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
initial_view_state={
"latitude": midpoint[0],
"longitude": midpoint[1],
"zoom": 6,
"pitch": 75,
},
layers=[
pdk.Layer(
"HexagonLayer",
data=map_data,
get_position=["longitude", "latitude"],
radius=1000,
elevation_scale=4,
elevation_range=[0, 10000],
pickable=True,
extruded=True,
),
],
))
"""
**Feel free to zoom in or drag while pressing ALT key to change the 3D viewing angle of the map, as required.**
## Preprocessing
As pointed out earlier, this dataset is already well preprocessed by scikit-learn for us to use directly without worrying about any NaN values and other stuff.
Although, we are going to scale the values in specific range by using `StandardScaler` to help our model work effeciently.
"""
with st.echo():
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
"""
## Creating a model
We will be creating a simple Sequential Model with first layer containing 30 neurons and the activation function of RELU.
The next layer will be single neuron layer with no activation function as we want the model to predict a range of values and not just binary or multiclass results like classification problems.
"""
st.sidebar.title('Hyperparameters')
n_neurons = st.sidebar.slider('Neurons', 1, 128, 30)
l_rate = st.sidebar.selectbox('Learning Rate', (0.0001, 0.001, 0.01), 1)
n_epochs = st.sidebar.number_input('Number of Epochs', 1, 50, 20)
with st.echo():
import tensorflow as tf
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(n_neurons, activation='relu', input_shape=X_train.shape[1:]),
tf.keras.layers.Dense(1)
])
"""
## Compiling the model
Tensorflow keras API provides us with the `model.compile()` function to assign the optimizers, loss function and a few other details for the model.
"""
with st.echo():
model.compile(
loss='mean_squared_error',
optimizer=tf.keras.optimizers.SGD(l_rate)
)
"""
## Training the model
In order to train the model you simply have to call the `fit()` function on the model with training and validation set and number of epochs you want the model to train for.
**Try playing with the hyperparameters from the sidebar on the left side and click on the `Train Model` button given below to start the training.**
"""
train = st.button('Train Model')
if train:
with st.spinner('Training Model...'):
with st.echo():
model.summary(print_fn=lambda x: st.write("{}".format(x)))
history = model.fit(
X_train,
y_train,
epochs=n_epochs,
validation_data=(X_valid, y_valid)
)
st.success('Model Training Complete!')
"""
## Model Performance
"""
with st.echo():
st.line_chart(pd.DataFrame(history.history))
"""
## Evalutating the model on Test set
Again another imortant but easy step to do is to evaluate your model on the test data which it has never seen before. Remember that you should only do this after you are sure enough about the model you'vr built and you should resist making any hyperparameter tuning after evaluating the model on the test set as it would just make it better for test set and again there will be a generalization problem when the model will see new data in production phase.
"""
with st.echo():
evaluation = model.evaluate(X_test, y_test)
evaluation
"""
> This loss on the test set is a little worse than that on the vakidation set, which is as expected, as the model has never seen the images from test set.
"""
"""
## Predictions using the Model
"""
with st.echo():
X_new = X_test[:3]
predictions = model.predict(X_new)
"""
### Predictions
"""
predictions
"""
### Ground Truth
"""
y_test[:3] | en | 0.904255 | # California Housing Prices
This is the California Housing Prices dataset which contains data drawn from the 1990 U.S. Census. The following table provides descriptions, data ranges, and data types for each feature in the data set.
## Let's first take a look at imports ## Loading the Dataset
We will use the scikit-learn's dataset module to lead data which is already cleaned for us and only has the numerical feautures. This will load the entire data in the `housing` variable as you can see below ## Splitting the data into Train, Test and Dev sets
This is one of the most important thing in beginning of any Machine Learning solution as the result of any model can highly depend on how well you have distributed the data into these sets.
Fourtunately for us, we have scikit-learn to the rescue where it has become as easy as 2 lines of code. The `train_test_split()` function splits the data into 2 sets where the test set is 25% of the total dataset. We have used the same function again on the train_full to split it into train and validation set. 25% is a default parameter and you can tweak is as per your needs. Take a look at it from the [Scikit-Learn's Documentation](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html).
## Taking a look at the train data
The colums represent the following data: Now let's look at the location of the houses by plotting it on the map using Latitude and Longitude values: **Feel free to zoom in or drag while pressing ALT key to change the 3D viewing angle of the map, as required.**
## Preprocessing
As pointed out earlier, this dataset is already well preprocessed by scikit-learn for us to use directly without worrying about any NaN values and other stuff.
Although, we are going to scale the values in specific range by using `StandardScaler` to help our model work effeciently. ## Creating a model
We will be creating a simple Sequential Model with first layer containing 30 neurons and the activation function of RELU.
The next layer will be single neuron layer with no activation function as we want the model to predict a range of values and not just binary or multiclass results like classification problems. ## Compiling the model
Tensorflow keras API provides us with the `model.compile()` function to assign the optimizers, loss function and a few other details for the model. ## Training the model
In order to train the model you simply have to call the `fit()` function on the model with training and validation set and number of epochs you want the model to train for.
**Try playing with the hyperparameters from the sidebar on the left side and click on the `Train Model` button given below to start the training.** ## Model Performance ## Evalutating the model on Test set
Again another imortant but easy step to do is to evaluate your model on the test data which it has never seen before. Remember that you should only do this after you are sure enough about the model you'vr built and you should resist making any hyperparameter tuning after evaluating the model on the test set as it would just make it better for test set and again there will be a generalization problem when the model will see new data in production phase. > This loss on the test set is a little worse than that on the vakidation set, which is as expected, as the model has never seen the images from test set. ## Predictions using the Model ### Predictions ### Ground Truth | 3.613313 | 4 |
Exercicios Modulo 1/ex4.py | Katakhan/TrabalhosPython2 | 0 | 6616471 | #--- Exercício 4 - Impressão de dados com a função Print
#--- Imprima textos que simulem a tela inicial de um sistema de compra de bebidas
#--- Nesta tela deve ter um cabeçalho, um menu e um rodapé
#--- O menu deve ter as opções:
#--- 1 - Listar bebidas alcoolicas
#--- 2 - Listar bebidas não alcoolicas
#--- 3 - Visualizar Pedido
#--- 4 - Sair
print ('=='*50)
print (' ', 'Bem vindo' ' ')
print('Informe a opção desejada \n1-Listar bebidas alcoolicas \n2- Listar bebidas Não alcoolicas \n3- Visualizar Pedido \n4- Sair')
print ('=='*50) | #--- Exercício 4 - Impressão de dados com a função Print
#--- Imprima textos que simulem a tela inicial de um sistema de compra de bebidas
#--- Nesta tela deve ter um cabeçalho, um menu e um rodapé
#--- O menu deve ter as opções:
#--- 1 - Listar bebidas alcoolicas
#--- 2 - Listar bebidas não alcoolicas
#--- 3 - Visualizar Pedido
#--- 4 - Sair
print ('=='*50)
print (' ', 'Bem vindo' ' ')
print('Informe a opção desejada \n1-Listar bebidas alcoolicas \n2- Listar bebidas Não alcoolicas \n3- Visualizar Pedido \n4- Sair')
print ('=='*50) | pt | 0.976857 | #--- Exercício 4 - Impressão de dados com a função Print #--- Imprima textos que simulem a tela inicial de um sistema de compra de bebidas #--- Nesta tela deve ter um cabeçalho, um menu e um rodapé #--- O menu deve ter as opções: #--- 1 - Listar bebidas alcoolicas #--- 2 - Listar bebidas não alcoolicas #--- 3 - Visualizar Pedido #--- 4 - Sair | 3.489726 | 3 |
Day5/high_score.py | CodePuzzler/100-Days-Of-Code-Python | 0 | 6616472 | # Day5 of my 100DaysOfCode Challenge
# You are going to write a program that calculates the highest score from a List of scores.
# 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
#Write your code below this row 👇
highest_score = 0
for n in range(0, len(student_scores)):
if (highest_score < student_scores[n]):
highest_score = student_scores[n]
else:
highest_score = highest_score
print(f"The highest score in the class is: {highest_score}") | # Day5 of my 100DaysOfCode Challenge
# You are going to write a program that calculates the highest score from a List of scores.
# 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
#Write your code below this row 👇
highest_score = 0
for n in range(0, len(student_scores)):
if (highest_score < student_scores[n]):
highest_score = student_scores[n]
else:
highest_score = highest_score
print(f"The highest score in the class is: {highest_score}") | en | 0.851597 | # Day5 of my 100DaysOfCode Challenge # You are going to write a program that calculates the highest score from a List of scores. # 🚨 Don't change the code below 👇 # 🚨 Don't change the code above 👆 #Write your code below this row 👇 | 4.154795 | 4 |
init.py | jedybg/resolve-advanced-importer | 7 | 6616473 | import base64
import tkinter as tk
from PIL import ImageTk, Image
from icon import icon
mainWindow = tk.Tk()
def InitializeTkWindow():
mainWindow.title("DaVinci Resolve Advanced Importer")
mainWindow.resizable(False, False)
mainWindow.call('wm', 'iconphoto', mainWindow._w, ImageTk.PhotoImage(data=base64.b64decode(icon)))
| import base64
import tkinter as tk
from PIL import ImageTk, Image
from icon import icon
mainWindow = tk.Tk()
def InitializeTkWindow():
mainWindow.title("DaVinci Resolve Advanced Importer")
mainWindow.resizable(False, False)
mainWindow.call('wm', 'iconphoto', mainWindow._w, ImageTk.PhotoImage(data=base64.b64decode(icon)))
| none | 1 | 2.343765 | 2 | |
rosys/pathplanning/binary_renderer.py | zauberzeug/rosys | 1 | 6616474 | import numpy as np
from matplotlib.path import Path
class BinaryRenderer:
def __init__(self, size):
self.map = np.zeros(size, dtype=bool)
self.xx, self.yy = np.meshgrid(range(size[1]), range(size[0]))
self.xy = np.vstack((self.xx.flatten(), self.yy.flatten())).T
def circle(self, x, y, radius, value=True):
x0 = max(int(x - radius), 0)
y0 = max(int(y - radius), 0)
x1 = min(int(x + radius) + 2, self.map.shape[1] - 1)
y1 = min(int(y + radius) + 2, self.map.shape[0] - 1)
roi = self.map[y0:y1, x0:x1]
sqr_dist = (self.xx[y0:y1, x0:x1] - x)**2 + (self.yy[y0:y1, x0:x1] - y)**2
roi[sqr_dist <= radius**2] = value
self.map[y0:y1, x0:x1] = roi
def polygon(self, points, value=True):
x0 = max(int(points[:, 0].min()), 0)
y0 = max(int(points[:, 1].min()), 0)
x1 = min(int(points[:, 0].max()) + 2, self.map.shape[1] - 1)
y1 = min(int(points[:, 1].max()) + 2, self.map.shape[0] - 1)
xy = np.vstack((self.xx[y0:y1, x0:x1].flatten(), self.yy[y0:y1, x0:x1].flatten())).T
roi = self.map[y0:y1, x0:x1]
roi[Path(points).contains_points(xy).reshape(roi.shape)] = value
self.map[y0:y1, x0:x1] = roi
| import numpy as np
from matplotlib.path import Path
class BinaryRenderer:
def __init__(self, size):
self.map = np.zeros(size, dtype=bool)
self.xx, self.yy = np.meshgrid(range(size[1]), range(size[0]))
self.xy = np.vstack((self.xx.flatten(), self.yy.flatten())).T
def circle(self, x, y, radius, value=True):
x0 = max(int(x - radius), 0)
y0 = max(int(y - radius), 0)
x1 = min(int(x + radius) + 2, self.map.shape[1] - 1)
y1 = min(int(y + radius) + 2, self.map.shape[0] - 1)
roi = self.map[y0:y1, x0:x1]
sqr_dist = (self.xx[y0:y1, x0:x1] - x)**2 + (self.yy[y0:y1, x0:x1] - y)**2
roi[sqr_dist <= radius**2] = value
self.map[y0:y1, x0:x1] = roi
def polygon(self, points, value=True):
x0 = max(int(points[:, 0].min()), 0)
y0 = max(int(points[:, 1].min()), 0)
x1 = min(int(points[:, 0].max()) + 2, self.map.shape[1] - 1)
y1 = min(int(points[:, 1].max()) + 2, self.map.shape[0] - 1)
xy = np.vstack((self.xx[y0:y1, x0:x1].flatten(), self.yy[y0:y1, x0:x1].flatten())).T
roi = self.map[y0:y1, x0:x1]
roi[Path(points).contains_points(xy).reshape(roi.shape)] = value
self.map[y0:y1, x0:x1] = roi
| none | 1 | 2.80261 | 3 | |
LeetCode/Python/Easy/PathSum3.py | alui07/Technical-Interview | 0 | 6616475 | <reponame>alui07/Technical-Interview<gh_stars>0
""" 437. Path Sum III
You are given a binary tree in which each node contains an integer value.
Find the number of paths that sum to a given value.
The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).
The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.
Example:
root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
Return 3. The paths that sum to 8 are:
1. 5 -> 3
2. 5 -> 2 -> 1
3. -3 -> 11
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> int:
if root is None: return 0
# go left include root
leftRoot = Solution.sumPaths(self, root.left, sum - root.val)
# go left not include root
left = Solution.pathSum(self, root.left, sum)
# go right include root
rightRoot = Solution.sumPaths(self, root.right, sum - root.val)
# go right not include root
right = Solution.pathSum(self, root.right, sum)
# check val
return leftRoot + left + rightRoot + right + 1 if root.val == sum else leftRoot + left + rightRoot + right
# return
def sumPaths(self, root: TreeNode, sum: int) -> int:
if root is None: return 0
#go left
left = Solution.sumPaths(self, root.left, sum - root.val)
#go right
right = Solution.sumPaths(self, root.right, sum - root.val)
#check self
return left + right + 1 if root.val == sum else left + right | """ 437. Path Sum III
You are given a binary tree in which each node contains an integer value.
Find the number of paths that sum to a given value.
The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes).
The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000.
Example:
root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
10
/ \
5 -3
/ \ \
3 2 11
/ \ \
3 -2 1
Return 3. The paths that sum to 8 are:
1. 5 -> 3
2. 5 -> 2 -> 1
3. -3 -> 11
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> int:
if root is None: return 0
# go left include root
leftRoot = Solution.sumPaths(self, root.left, sum - root.val)
# go left not include root
left = Solution.pathSum(self, root.left, sum)
# go right include root
rightRoot = Solution.sumPaths(self, root.right, sum - root.val)
# go right not include root
right = Solution.pathSum(self, root.right, sum)
# check val
return leftRoot + left + rightRoot + right + 1 if root.val == sum else leftRoot + left + rightRoot + right
# return
def sumPaths(self, root: TreeNode, sum: int) -> int:
if root is None: return 0
#go left
left = Solution.sumPaths(self, root.left, sum - root.val)
#go right
right = Solution.sumPaths(self, root.right, sum - root.val)
#check self
return left + right + 1 if root.val == sum else left + right | en | 0.814894 | 437. Path Sum III You are given a binary tree in which each node contains an integer value. Find the number of paths that sum to a given value. The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes). The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000. Example: root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8 10 / \ 5 -3 / \ \ 3 2 11 / \ \ 3 -2 1 Return 3. The paths that sum to 8 are: 1. 5 -> 3 2. 5 -> 2 -> 1 3. -3 -> 11 # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None # go left include root # go left not include root # go right include root # go right not include root # check val # return #go left #go right #check self | 3.818749 | 4 |
Maya/api.py | peerke88/SkinningTools | 7 | 6616476 | """
Maya stub of imports used in the UI library.
The idea is to make this file as short as possible
while leaving room for other packages to implement features.
"""
import functools, os, sys, platform
from SkinningTools.Maya.tools import shared, joints
from SkinningTools.Maya.tools import weightPaintUtils
from SkinningTools.UI.qt_util import QObject, QApplication
from maya import cmds, mel, OpenMayaUI
from maya.api import OpenMaya
from SkinningTools.UI.utils import *
_DEBUG = getDebugState()
def get_maya_window():
""" get the current maya window as a qt widget
:return: the widget or none
:rtype: QWidget
"""
_widget = None
for widget in QApplication.allWidgets():
if widget.objectName() == "MayaWindow":
_widget = widget
if _widget is None:
_widget = wrapinstance(long(OpenMayaUI.MQtUtil.mainWindow()))
return _widget
def selectedObjectVertexList(includeObjects=False):
""" get the current object/component selection
:param includeObjects: if `True` will return the name of the object from which the vertex comes from, if `False` will only return the vertices
:type includeObjects: bool
:return: list of vertices
:rtype: list
"""
step = cmds.ls(sl=True, l=True)
if not step:
return []
res = shared.convertToVertexList(step) or []
if includeObjects:
return [(r.split('.', 1)[0], r) for r in res]
return res
skinPercent = cmds.skinPercent
meshVertexList = shared.convertToVertexList
addCleanJoint = joints.addCleanJoint
skinClusterForObject = shared.skinCluster
skinClusterForObjectHeadless = functools.partial(shared.skinCluster, silent=True)
dec_undo = shared.dec_undo
def selectedSkinnedShapes():
""" get the shapes of skinned objects
:return: list of shapes
:rtype: list
"""
selectedShapes = set(cmds.ls(sl=True, l=True, o=True, type='shape') or [])
t = cmds.ls(sl=True, l=True, o=True, type='transform')
if t:
selectedShapes = selectedShapes | set(cmds.listRelatives(t, c=True, f=True, type='shape') or [])
result = []
for skinCluster in cmds.ls(type='skinCluster'):
for skinnedShape in cmds.ls(cmds.skinCluster(skinCluster, q=True, g=True) or [], l=True) or []:
if skinnedShape in selectedShapes:
result.append(skinnedShape)
return result
def loadPlugin(plugin):
loaded = cmds.pluginInfo(plugin, q=True, loaded=True)
registered = cmds.pluginInfo(plugin, q=True, registered=True)
if not registered or not loaded:
try:
cmds.loadPlugin(plugin)
except Exception as e:
print(e)
def getMayaVersion():
""" get the current general mayaversion in which this tool is launched
:return: maya version number as string
:rtype: string
"""
mayaVersion = str(cmds.about(apiVersion=True))[:-2]
if "maya" in sys.executable and platform.system() == "Windows":
mayaVersion = sys.executable.split("Maya")[-1].split(os.sep)[0]
elif platform.system() != "Windows":
mayaVersion = cmds.about(version=1)
return mayaVersion
def getPluginSuffix():
""" get the current plugin suffix based on the os that we are running
:return: suffix for plugin files specific to a particular os
:rtype: string
"""
pluginSuffix = ".mll"
if platform.system() == "Darwin":
pluginSuffix = ".bundle"
if platform.system() == "Linux":
pluginSuffix = ".so"
return pluginSuffix
def getPlugin():
""" get the smoothbrush plugin based on information gathered on how maya is run
:return: the path of the plugin to load
:rtype: string
"""
mayaVersion = getMayaVersion()
suffix = getPluginSuffix()
currentPath = os.path.dirname(__file__)
_plugin = os.path.join(currentPath, "plugin/skinToolWeightsCpp/comp/Maya%s/plug-ins/SkinCommands%s" % (mayaVersion, suffix))
return _plugin
def connectSelectionChangedCallback(callback):
""" connect a callback to a selection changed event
:param callback: the callback to connect
:type callback: function
:return: scriptjob that holds the callback
:rtype: string
"""
return cmds.scriptJob(e=('SelectionChanged', callback))
def disconnectCallback(handle):
""" disconnect a callback present in the scene
:param handle: the name of the scriptjob to remove
:type handle: string
"""
if isinstance(handle, int): # in the future we can also handle MCallbackId from API callbacks here
cmds.scriptJob(kill=handle, force=True)
else:
print("Unrecognized handle")
def getApiDir():
""" get the path to the current file
:return: path of the api file
:rtype: string
"""
return os.path.dirname(os.path.abspath(__file__))
def dec_loadPlugin(input):
""" forwarded decorator function to load plugins
:note: maybe remove this? too many similar functions? combine them all together
:param input: name of the (python)plugin to load
:type input: string
"""
return shared.dec_loadPlugin(os.path.join(getApiDir(), "plugin/%s" % input))
def skinClusterInfluences(skinCluster):
""" forwarded function to get joint information from skincluster
:param skinCluster: skincluster to gather data from
:type skinCluster: string
:return: list of all joints(fullpath) connected to the skincluster
:rtype: list
"""
return cmds.ls(cmds.listConnections("%s.matrix" % skinCluster, source=True), l=1)
def getSkinWeights(geometry):
""" forwarded function to get the skinning data of a mesh
:param geometry: mesh to get data from
:type geometry: string
:return: list of all weights on the mesh
:rtype: list
"""
return shared.getWeights(geometry)
def setSkinWeights(geometry, skinCluster, weights, influenceIndices=None):
""" forwarded function to set the skinning data on a mesh
:param geometry: mesh to set data to
:type geometry: string
:param skinCluster: skincluster attached to the current geometry
:type skinCluster: string
:param weights: list of weights to set
:type weights: list
:param influenceIndices: list of joints
:type influenceIndices: list
"""
if influenceIndices:
cmds.skinPercent(skinCluster, geometry, tv=zip(influenceIndices, weights))
else:
cmds.SkinWeights(geometry, skinCluster, nwt=weights)
def getSingleVertexWeight(skinClusterHandle, vertexHandle, influenceHandle):
"""given a skin, a vertex and a joint, return the weight
skin cluster can be obtained with skinClusterForObject
mvertex can be obtained with selectedObjectVertexList(True), joint can be obtained with skinClusterInfluences
:param skinClusterHandle: name of the current skincluster
:type skinClusterHandle: string
:param vertexHandle: name of current vertex
:type vertexHandle: string
:param influenceHandle: name of bone to get data from
:type influenceHandle: string
:return: list of influences
:rtype: list
"""
return cmds.skinPercent(skinClusterHandle, vertexHandle, q=True, t=influenceHandle)
def getSingleVertexWeights(skinClusterHandle, vertexHandle):
"""given a skin and a vertex, return the weight
skin cluster can be obtained with skinClusterForObject
vertex can be obtained with selectedObjectVertexList(True)
:param skinClusterHandle: name of the current skincluster
:type skinClusterHandle: string
:param vertexHandle: name of current vertex
:type vertexHandle: string
:return: list of influences
:rtype: list
"""
return cmds.skinPercent(skinClusterHandle, vertexHandle, q=True, v=True)
def selectVertices(meshVertexPairs):
""" select vertices based on given vertex pairs
:param meshVertexPairs: list of objects that hold a list of which the second element is a vertex
:type meshVertexPairs: list
"""
cmds.select([v for m, v in meshVertexPairs])
mel.eval('if( !`exists doMenuComponentSelection` ) eval( "source dagMenuProc" );')
skinMesh = meshVertexPairs[0][0]
objType = cmds.objectType(skinMesh)
if objType == "transform":
shape = cmds.listRelatives(skinMesh, c=1, s=1, fullPath=1)[0]
objType = cmds.objectType(shape)
if objType == "nurbsSurface" or objType == "nurbsCurve":
mel.eval('doMenuNURBComponentSelection("%s", "controlVertex");' % skinMesh)
elif objType == "lattice":
mel.eval('doMenuLatticeComponentSelection("%s", "latticePoint");' % skinMesh)
elif objType == "mesh":
mel.eval('doMenuComponentSelection("%s", "vertex");' % skinMesh)
def _eventFilterTargets():
"""We must return all widgets that receive strong focus that we want to tap into
such as the main window or 3D viewports that are not simple Qt widgets.
:return: the maya window and the active 3d viewport
:rtype: list
"""
from SkinningTools.UI.qt_util import wrapinstance, QMainWindow, QWidget
from maya.OpenMayaUI import MQtUtil, M3dView
mainWin = wrapinstance(int(MQtUtil.mainWindow()), QMainWindow)
active_view = M3dView.active3dView()
active_view_ptr = active_view.widget()
qt_active_view = wrapinstance(int(active_view_ptr), QWidget)
return mainWin, qt_active_view
def convertlistToOpenMayaArray(inList, arrayType):
"""convert given list to an openmaya arraytype
:param inList: list of objects to be added to arraytype
:type inList: list
:param arrayType: any openmaya array type
:type arrayType: OpenMaya.<array>
:return: the array filled with data
:rtype: OpenMaya.<array>
"""
array = arrayType()
for elem in inList:
array.append(elem)
return array
class _EventFilter(QObject):
""" eventfilter class to allow extra functionality to be added to the current maya qt eventfilters
"""
_singleton = None
@staticmethod
def singleton():
""" singleton of the current class for ease of identifying
:return: the current object singleton
:rtype: cls
"""
if _EventFilter._singleton is None:
_EventFilter._singleton = _EventFilter()
return _EventFilter._singleton
def eventFilter(self, obj, event):
from SkinningTools.UI.qt_util import Qt, QEvent
_arrows = {Qt.Key_Up: "up", Qt.Key_Down: "down"}
if event.type() == QEvent.KeyPress and event.key() in _arrows.keys():
return weightPaintUtils.pickWalkSkinClusterInfluenceList(_arrows[event.key()])
return False
def dccInstallEventFilter():
from SkinningTools.UI.markingMenu import MarkingMenuFilter
""" install the eventfilter on the current dcc
:return: `True` if succesfull
:rtype: bool
"""
eventFilterTargets = _cleanEventFilter()
for eventFilterTarget in eventFilterTargets:
eventFilterTarget.installEventFilter(_EventFilter.singleton())
eventFilterTarget.installEventFilter(MarkingMenuFilter.singleton())
return True
def _cleanEventFilter():
from SkinningTools.UI.markingMenu import MarkingMenuFilter
""" remove the eventfilter on the current dcc
:return: list of widgets from which the eventfilters where removed
:rtype: list
"""
widgets = _eventFilterTargets()
for widget in widgets:
try:
widget.removeEventFilter(_EventFilter.singleton())
widget.removeEventFilter(MarkingMenuFilter.singleton())
except Exception as e:
if _DEBUG:
print(e)
return widgets
def getIds(inList):
""" forwarded function to conevert components to indices
:param inlist: list of maya components
:type inlist: list
:return: list of indices
:rtype: list
"""
return shared.convertToIndexList(inList)
def textProgressBar(progress, message=''):
""" set the current progress of a function using test only
:param progress: percentage of progress
:type progress: float
:param message: the message to be displayed
:type message: string
"""
barLength = 10
status = ""
if progress <= 0:
progress = 0
progress = progress / 100.0
if progress >= 1:
progress = 1
block = int(round(barLength * progress))
text = "[%s] %.1f%%, %s" % ("#" * block + "-" * (barLength - block), progress * 100, message)
OpenMaya.MGlobal.displayInfo(text)
def displayToolTips():
"""force display tool tips in maya as these are turned off by default"""
cmds.help(popupMode=True)
| """
Maya stub of imports used in the UI library.
The idea is to make this file as short as possible
while leaving room for other packages to implement features.
"""
import functools, os, sys, platform
from SkinningTools.Maya.tools import shared, joints
from SkinningTools.Maya.tools import weightPaintUtils
from SkinningTools.UI.qt_util import QObject, QApplication
from maya import cmds, mel, OpenMayaUI
from maya.api import OpenMaya
from SkinningTools.UI.utils import *
_DEBUG = getDebugState()
def get_maya_window():
""" get the current maya window as a qt widget
:return: the widget or none
:rtype: QWidget
"""
_widget = None
for widget in QApplication.allWidgets():
if widget.objectName() == "MayaWindow":
_widget = widget
if _widget is None:
_widget = wrapinstance(long(OpenMayaUI.MQtUtil.mainWindow()))
return _widget
def selectedObjectVertexList(includeObjects=False):
""" get the current object/component selection
:param includeObjects: if `True` will return the name of the object from which the vertex comes from, if `False` will only return the vertices
:type includeObjects: bool
:return: list of vertices
:rtype: list
"""
step = cmds.ls(sl=True, l=True)
if not step:
return []
res = shared.convertToVertexList(step) or []
if includeObjects:
return [(r.split('.', 1)[0], r) for r in res]
return res
skinPercent = cmds.skinPercent
meshVertexList = shared.convertToVertexList
addCleanJoint = joints.addCleanJoint
skinClusterForObject = shared.skinCluster
skinClusterForObjectHeadless = functools.partial(shared.skinCluster, silent=True)
dec_undo = shared.dec_undo
def selectedSkinnedShapes():
""" get the shapes of skinned objects
:return: list of shapes
:rtype: list
"""
selectedShapes = set(cmds.ls(sl=True, l=True, o=True, type='shape') or [])
t = cmds.ls(sl=True, l=True, o=True, type='transform')
if t:
selectedShapes = selectedShapes | set(cmds.listRelatives(t, c=True, f=True, type='shape') or [])
result = []
for skinCluster in cmds.ls(type='skinCluster'):
for skinnedShape in cmds.ls(cmds.skinCluster(skinCluster, q=True, g=True) or [], l=True) or []:
if skinnedShape in selectedShapes:
result.append(skinnedShape)
return result
def loadPlugin(plugin):
loaded = cmds.pluginInfo(plugin, q=True, loaded=True)
registered = cmds.pluginInfo(plugin, q=True, registered=True)
if not registered or not loaded:
try:
cmds.loadPlugin(plugin)
except Exception as e:
print(e)
def getMayaVersion():
""" get the current general mayaversion in which this tool is launched
:return: maya version number as string
:rtype: string
"""
mayaVersion = str(cmds.about(apiVersion=True))[:-2]
if "maya" in sys.executable and platform.system() == "Windows":
mayaVersion = sys.executable.split("Maya")[-1].split(os.sep)[0]
elif platform.system() != "Windows":
mayaVersion = cmds.about(version=1)
return mayaVersion
def getPluginSuffix():
""" get the current plugin suffix based on the os that we are running
:return: suffix for plugin files specific to a particular os
:rtype: string
"""
pluginSuffix = ".mll"
if platform.system() == "Darwin":
pluginSuffix = ".bundle"
if platform.system() == "Linux":
pluginSuffix = ".so"
return pluginSuffix
def getPlugin():
""" get the smoothbrush plugin based on information gathered on how maya is run
:return: the path of the plugin to load
:rtype: string
"""
mayaVersion = getMayaVersion()
suffix = getPluginSuffix()
currentPath = os.path.dirname(__file__)
_plugin = os.path.join(currentPath, "plugin/skinToolWeightsCpp/comp/Maya%s/plug-ins/SkinCommands%s" % (mayaVersion, suffix))
return _plugin
def connectSelectionChangedCallback(callback):
""" connect a callback to a selection changed event
:param callback: the callback to connect
:type callback: function
:return: scriptjob that holds the callback
:rtype: string
"""
return cmds.scriptJob(e=('SelectionChanged', callback))
def disconnectCallback(handle):
""" disconnect a callback present in the scene
:param handle: the name of the scriptjob to remove
:type handle: string
"""
if isinstance(handle, int): # in the future we can also handle MCallbackId from API callbacks here
cmds.scriptJob(kill=handle, force=True)
else:
print("Unrecognized handle")
def getApiDir():
""" get the path to the current file
:return: path of the api file
:rtype: string
"""
return os.path.dirname(os.path.abspath(__file__))
def dec_loadPlugin(input):
""" forwarded decorator function to load plugins
:note: maybe remove this? too many similar functions? combine them all together
:param input: name of the (python)plugin to load
:type input: string
"""
return shared.dec_loadPlugin(os.path.join(getApiDir(), "plugin/%s" % input))
def skinClusterInfluences(skinCluster):
""" forwarded function to get joint information from skincluster
:param skinCluster: skincluster to gather data from
:type skinCluster: string
:return: list of all joints(fullpath) connected to the skincluster
:rtype: list
"""
return cmds.ls(cmds.listConnections("%s.matrix" % skinCluster, source=True), l=1)
def getSkinWeights(geometry):
""" forwarded function to get the skinning data of a mesh
:param geometry: mesh to get data from
:type geometry: string
:return: list of all weights on the mesh
:rtype: list
"""
return shared.getWeights(geometry)
def setSkinWeights(geometry, skinCluster, weights, influenceIndices=None):
""" forwarded function to set the skinning data on a mesh
:param geometry: mesh to set data to
:type geometry: string
:param skinCluster: skincluster attached to the current geometry
:type skinCluster: string
:param weights: list of weights to set
:type weights: list
:param influenceIndices: list of joints
:type influenceIndices: list
"""
if influenceIndices:
cmds.skinPercent(skinCluster, geometry, tv=zip(influenceIndices, weights))
else:
cmds.SkinWeights(geometry, skinCluster, nwt=weights)
def getSingleVertexWeight(skinClusterHandle, vertexHandle, influenceHandle):
"""given a skin, a vertex and a joint, return the weight
skin cluster can be obtained with skinClusterForObject
mvertex can be obtained with selectedObjectVertexList(True), joint can be obtained with skinClusterInfluences
:param skinClusterHandle: name of the current skincluster
:type skinClusterHandle: string
:param vertexHandle: name of current vertex
:type vertexHandle: string
:param influenceHandle: name of bone to get data from
:type influenceHandle: string
:return: list of influences
:rtype: list
"""
return cmds.skinPercent(skinClusterHandle, vertexHandle, q=True, t=influenceHandle)
def getSingleVertexWeights(skinClusterHandle, vertexHandle):
"""given a skin and a vertex, return the weight
skin cluster can be obtained with skinClusterForObject
vertex can be obtained with selectedObjectVertexList(True)
:param skinClusterHandle: name of the current skincluster
:type skinClusterHandle: string
:param vertexHandle: name of current vertex
:type vertexHandle: string
:return: list of influences
:rtype: list
"""
return cmds.skinPercent(skinClusterHandle, vertexHandle, q=True, v=True)
def selectVertices(meshVertexPairs):
""" select vertices based on given vertex pairs
:param meshVertexPairs: list of objects that hold a list of which the second element is a vertex
:type meshVertexPairs: list
"""
cmds.select([v for m, v in meshVertexPairs])
mel.eval('if( !`exists doMenuComponentSelection` ) eval( "source dagMenuProc" );')
skinMesh = meshVertexPairs[0][0]
objType = cmds.objectType(skinMesh)
if objType == "transform":
shape = cmds.listRelatives(skinMesh, c=1, s=1, fullPath=1)[0]
objType = cmds.objectType(shape)
if objType == "nurbsSurface" or objType == "nurbsCurve":
mel.eval('doMenuNURBComponentSelection("%s", "controlVertex");' % skinMesh)
elif objType == "lattice":
mel.eval('doMenuLatticeComponentSelection("%s", "latticePoint");' % skinMesh)
elif objType == "mesh":
mel.eval('doMenuComponentSelection("%s", "vertex");' % skinMesh)
def _eventFilterTargets():
"""We must return all widgets that receive strong focus that we want to tap into
such as the main window or 3D viewports that are not simple Qt widgets.
:return: the maya window and the active 3d viewport
:rtype: list
"""
from SkinningTools.UI.qt_util import wrapinstance, QMainWindow, QWidget
from maya.OpenMayaUI import MQtUtil, M3dView
mainWin = wrapinstance(int(MQtUtil.mainWindow()), QMainWindow)
active_view = M3dView.active3dView()
active_view_ptr = active_view.widget()
qt_active_view = wrapinstance(int(active_view_ptr), QWidget)
return mainWin, qt_active_view
def convertlistToOpenMayaArray(inList, arrayType):
"""convert given list to an openmaya arraytype
:param inList: list of objects to be added to arraytype
:type inList: list
:param arrayType: any openmaya array type
:type arrayType: OpenMaya.<array>
:return: the array filled with data
:rtype: OpenMaya.<array>
"""
array = arrayType()
for elem in inList:
array.append(elem)
return array
class _EventFilter(QObject):
""" eventfilter class to allow extra functionality to be added to the current maya qt eventfilters
"""
_singleton = None
@staticmethod
def singleton():
""" singleton of the current class for ease of identifying
:return: the current object singleton
:rtype: cls
"""
if _EventFilter._singleton is None:
_EventFilter._singleton = _EventFilter()
return _EventFilter._singleton
def eventFilter(self, obj, event):
from SkinningTools.UI.qt_util import Qt, QEvent
_arrows = {Qt.Key_Up: "up", Qt.Key_Down: "down"}
if event.type() == QEvent.KeyPress and event.key() in _arrows.keys():
return weightPaintUtils.pickWalkSkinClusterInfluenceList(_arrows[event.key()])
return False
def dccInstallEventFilter():
from SkinningTools.UI.markingMenu import MarkingMenuFilter
""" install the eventfilter on the current dcc
:return: `True` if succesfull
:rtype: bool
"""
eventFilterTargets = _cleanEventFilter()
for eventFilterTarget in eventFilterTargets:
eventFilterTarget.installEventFilter(_EventFilter.singleton())
eventFilterTarget.installEventFilter(MarkingMenuFilter.singleton())
return True
def _cleanEventFilter():
from SkinningTools.UI.markingMenu import MarkingMenuFilter
""" remove the eventfilter on the current dcc
:return: list of widgets from which the eventfilters where removed
:rtype: list
"""
widgets = _eventFilterTargets()
for widget in widgets:
try:
widget.removeEventFilter(_EventFilter.singleton())
widget.removeEventFilter(MarkingMenuFilter.singleton())
except Exception as e:
if _DEBUG:
print(e)
return widgets
def getIds(inList):
""" forwarded function to conevert components to indices
:param inlist: list of maya components
:type inlist: list
:return: list of indices
:rtype: list
"""
return shared.convertToIndexList(inList)
def textProgressBar(progress, message=''):
""" set the current progress of a function using test only
:param progress: percentage of progress
:type progress: float
:param message: the message to be displayed
:type message: string
"""
barLength = 10
status = ""
if progress <= 0:
progress = 0
progress = progress / 100.0
if progress >= 1:
progress = 1
block = int(round(barLength * progress))
text = "[%s] %.1f%%, %s" % ("#" * block + "-" * (barLength - block), progress * 100, message)
OpenMaya.MGlobal.displayInfo(text)
def displayToolTips():
"""force display tool tips in maya as these are turned off by default"""
cmds.help(popupMode=True)
| en | 0.756651 | Maya stub of imports used in the UI library.
The idea is to make this file as short as possible
while leaving room for other packages to implement features. get the current maya window as a qt widget
:return: the widget or none
:rtype: QWidget get the current object/component selection
:param includeObjects: if `True` will return the name of the object from which the vertex comes from, if `False` will only return the vertices
:type includeObjects: bool
:return: list of vertices
:rtype: list get the shapes of skinned objects
:return: list of shapes
:rtype: list get the current general mayaversion in which this tool is launched
:return: maya version number as string
:rtype: string get the current plugin suffix based on the os that we are running
:return: suffix for plugin files specific to a particular os
:rtype: string get the smoothbrush plugin based on information gathered on how maya is run
:return: the path of the plugin to load
:rtype: string connect a callback to a selection changed event
:param callback: the callback to connect
:type callback: function
:return: scriptjob that holds the callback
:rtype: string disconnect a callback present in the scene
:param handle: the name of the scriptjob to remove
:type handle: string # in the future we can also handle MCallbackId from API callbacks here get the path to the current file
:return: path of the api file
:rtype: string forwarded decorator function to load plugins
:note: maybe remove this? too many similar functions? combine them all together
:param input: name of the (python)plugin to load
:type input: string forwarded function to get joint information from skincluster
:param skinCluster: skincluster to gather data from
:type skinCluster: string
:return: list of all joints(fullpath) connected to the skincluster
:rtype: list forwarded function to get the skinning data of a mesh
:param geometry: mesh to get data from
:type geometry: string
:return: list of all weights on the mesh
:rtype: list forwarded function to set the skinning data on a mesh
:param geometry: mesh to set data to
:type geometry: string
:param skinCluster: skincluster attached to the current geometry
:type skinCluster: string
:param weights: list of weights to set
:type weights: list
:param influenceIndices: list of joints
:type influenceIndices: list given a skin, a vertex and a joint, return the weight
skin cluster can be obtained with skinClusterForObject
mvertex can be obtained with selectedObjectVertexList(True), joint can be obtained with skinClusterInfluences
:param skinClusterHandle: name of the current skincluster
:type skinClusterHandle: string
:param vertexHandle: name of current vertex
:type vertexHandle: string
:param influenceHandle: name of bone to get data from
:type influenceHandle: string
:return: list of influences
:rtype: list given a skin and a vertex, return the weight
skin cluster can be obtained with skinClusterForObject
vertex can be obtained with selectedObjectVertexList(True)
:param skinClusterHandle: name of the current skincluster
:type skinClusterHandle: string
:param vertexHandle: name of current vertex
:type vertexHandle: string
:return: list of influences
:rtype: list select vertices based on given vertex pairs
:param meshVertexPairs: list of objects that hold a list of which the second element is a vertex
:type meshVertexPairs: list We must return all widgets that receive strong focus that we want to tap into
such as the main window or 3D viewports that are not simple Qt widgets.
:return: the maya window and the active 3d viewport
:rtype: list convert given list to an openmaya arraytype
:param inList: list of objects to be added to arraytype
:type inList: list
:param arrayType: any openmaya array type
:type arrayType: OpenMaya.<array>
:return: the array filled with data
:rtype: OpenMaya.<array> eventfilter class to allow extra functionality to be added to the current maya qt eventfilters singleton of the current class for ease of identifying
:return: the current object singleton
:rtype: cls install the eventfilter on the current dcc
:return: `True` if succesfull
:rtype: bool remove the eventfilter on the current dcc
:return: list of widgets from which the eventfilters where removed
:rtype: list forwarded function to conevert components to indices
:param inlist: list of maya components
:type inlist: list
:return: list of indices
:rtype: list set the current progress of a function using test only
:param progress: percentage of progress
:type progress: float
:param message: the message to be displayed
:type message: string force display tool tips in maya as these are turned off by default | 2.127361 | 2 |
setup.py | adaamko/surface_realization | 0 | 6616477 | from setuptools import find_packages, setup
setup(
name='surface_realization',
version='0.1',
description='UD surface realization using IRTGs',
url='http://github.com/adaamko/surface_realization',
author='<NAME>,<NAME>',
author_email='<EMAIL>,<EMAIL>',
license='MIT',
install_requires=[
'flask',
'requests',
'stanza',
'tqdm'
],
packages=find_packages(),
zip_safe=False)
| from setuptools import find_packages, setup
setup(
name='surface_realization',
version='0.1',
description='UD surface realization using IRTGs',
url='http://github.com/adaamko/surface_realization',
author='<NAME>,<NAME>',
author_email='<EMAIL>,<EMAIL>',
license='MIT',
install_requires=[
'flask',
'requests',
'stanza',
'tqdm'
],
packages=find_packages(),
zip_safe=False)
| none | 1 | 1.00321 | 1 | |
qtpyvcp/core.py | awigen/qtpyvcp | 0 | 6616478 | <filename>qtpyvcp/core.py
from qtpyvcp.utilities.prefs import Prefs
from qtpyvcp.utilities.info import Info
| <filename>qtpyvcp/core.py
from qtpyvcp.utilities.prefs import Prefs
from qtpyvcp.utilities.info import Info
| none | 1 | 1.029974 | 1 | |
src/eeyore_nlp/utils/__init__.py | dpasse/eeyore | 0 | 6616479 | <filename>src/eeyore_nlp/utils/__init__.py
from .merger import Merger
from .spell_checker import SpellChecker
from .relationships import RelationshipBuilder
| <filename>src/eeyore_nlp/utils/__init__.py
from .merger import Merger
from .spell_checker import SpellChecker
from .relationships import RelationshipBuilder
| none | 1 | 1.025118 | 1 | |
test02.py | yulian/raspberrypi | 0 | 6616480 | <reponame>yulian/raspberrypi<filename>test02.py
# 기상 관측 이래, 서울의 최저 기온이 가장 낮았던 날은 언제였고, 몇 도였을까?
import csv
min_temp = 100
mix_date = ''
f = open('seoul.csv', 'r')
data = csv.reader(f)
header = next(data)
for row in data:
if len(row) != 5:
continue
if row[-2] == '':
row[-2] = 100
row[-2] = float(row[-2])
if min_temp > row[-2]:
min_date = row[0]
min_temp = row[-2]
f.close()
print('Date:'+str(min_date)+', Temp:'+str(min_temp))
| # 기상 관측 이래, 서울의 최저 기온이 가장 낮았던 날은 언제였고, 몇 도였을까?
import csv
min_temp = 100
mix_date = ''
f = open('seoul.csv', 'r')
data = csv.reader(f)
header = next(data)
for row in data:
if len(row) != 5:
continue
if row[-2] == '':
row[-2] = 100
row[-2] = float(row[-2])
if min_temp > row[-2]:
min_date = row[0]
min_temp = row[-2]
f.close()
print('Date:'+str(min_date)+', Temp:'+str(min_temp)) | ko | 1.00007 | # 기상 관측 이래, 서울의 최저 기온이 가장 낮았던 날은 언제였고, 몇 도였을까? | 3.593757 | 4 |
project/vis_label.py | hhaAndroid/u2net | 1 | 6616481 | import os.path as osp
import cv2
import mmcv
import numpy as np
if __name__ == '__main__':
out_dir = '/home/SENSETIME/huanghaian/dataset/project/unet'
paths = mmcv.scandir(out_dir, '.json')
for i, path in enumerate(paths):
img_path = osp.join(out_dir, path[:-4] + 'jpg')
image = mmcv.imread(img_path)
json_path = osp.join(out_dir, path)
json_data = mmcv.load(json_path)
points = np.array(json_data['points']).reshape(-1, 2)
print(points)
for point in points:
cv2.circle(image, tuple(point), 5, (255, 0, 0), -1)
cv2.namedWindow('img', 0)
mmcv.imshow(image, 'img')
| import os.path as osp
import cv2
import mmcv
import numpy as np
if __name__ == '__main__':
out_dir = '/home/SENSETIME/huanghaian/dataset/project/unet'
paths = mmcv.scandir(out_dir, '.json')
for i, path in enumerate(paths):
img_path = osp.join(out_dir, path[:-4] + 'jpg')
image = mmcv.imread(img_path)
json_path = osp.join(out_dir, path)
json_data = mmcv.load(json_path)
points = np.array(json_data['points']).reshape(-1, 2)
print(points)
for point in points:
cv2.circle(image, tuple(point), 5, (255, 0, 0), -1)
cv2.namedWindow('img', 0)
mmcv.imshow(image, 'img')
| none | 1 | 2.764923 | 3 | |
src/aoc/day8.py | miguellobato84/aoc2021 | 0 | 6616482 | import argparse
import logging
import sys
from numpy.lib.arraysetops import union1d, unique
from numpy.lib.index_tricks import c_
from aoc import __version__
import numpy as np
import collections
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
from collections import defaultdict
def state_to_code(state, word):
return "".join([str(state[letter]) for letter in word])
def code_to_number(code):
code = list(map(lambda x:int(x), list(code)))
valid_values = {
1:[3,6],
2:[1,3,4,5,7],
3:[1,3,4,6,7],
4:[2,3,4,6],
5:[1,2,4,6,7],
6:[1,2,4,5,6,7],
7:[1,3,6],
8:[1,2,3,4,5,6,7],
9:[1,2,3,4,6,7],
0:[1,2,3,5,6,7],
}
for n in valid_values:
valid_value = len(code) == len(valid_values[n]) and len(code) == len(np.intersect1d(code, valid_values[n]))
if valid_value: return n
return -1
def is_valid_state(numbers, state):
codes = [state_to_code(state, word) for word in numbers]
decoded = [code_to_number(code) for code in codes]
#logging.debug(f"Codes: {codes}, numbers: {numbers}, decoded: {decoded}, state: {state}")
return all([x >= 0 for x in decoded])
def _brute_force(numbers, state, pending_steps = "abcdefg"):
result = []
if pending_steps[0] == "g":
for combi in state["g"]:
c_state = {**state, **{"g":combi}}
result.append(c_state)
else:
for combi in state[pending_steps[0]]:
c_state = {**state, **{pending_steps[0]:combi}}
result = result + _brute_force(numbers, c_state, pending_steps[1:])
return result
def brute_force(numbers, state):
combis = _brute_force(numbers, state)
combis = [x for x in combis if len(np.unique(list(x.values()))) == 7 and is_valid_state(numbers, x)]
logging.debug(f"Found {len(combis)} combination")
assert len(combis) == 1
return combis[0]
def ex1(lines):
lines = [line[1] for line in lines]
lengths = [len(item) for sublist in lines for item in sublist]
numbers = [1 if x in (2,4,3,7) else 0 for x in lengths]
return sum(numbers)
def ex2(lines):
n = []
for a,b in lines:
n.append(_ex2(a,b))
return sum(n)
def _ex2(numbers, output):
logging.debug(numbers)
all = list(range(1,8))
state = {}
#step 1: only valid values
for n in [x for x in numbers if len(x) in [2,3,4]]:
if len(n) == 4:
possibilities = {x:[2,4,3,6] for x in n}
elif len(n) == 3:
possibilities = {x:[1,3,6] for x in n}
elif len(n) == 2:
possibilities = {x:[3,6] for x in n}
state = {**state, **{x:np.intersect1d(state[x] if x in state else all, possibilities[x]) for x in n}}
logging.debug(f"Step 1: {state}")
#step 2: isolate unique
unique = np.bincount([x for sublist in state.values() for x in sublist])
unique = [i for x,i in enumerate(unique) if x == 1]
state = {**state, **{x:unique for x in state if len(np.intersect1d(unique, state[x])) == 1}}
logging.debug(f"Step 2: {state}")
#step 3: isolate pairs
#TODO
#step 4: generate others combinations
used_values = [x for sublist in state.values() for x in sublist]
not_used_values = [i for i in all if i not in used_values]
state = {**{x:not_used_values for x in "abcdefg"}, **state}
logging.debug(f"Step 4: {state}")
#step 5: brute force
state = brute_force(numbers, state)
logging.debug(f"Step 5: {state}")
#step 6: translate output
logging.debug(output)
codes = [state_to_code(state, word) for word in output]
n = int("".join([str(code_to_number(code)) for code in codes]))
logging.debug(n)
return n | import argparse
import logging
import sys
from numpy.lib.arraysetops import union1d, unique
from numpy.lib.index_tricks import c_
from aoc import __version__
import numpy as np
import collections
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
from collections import defaultdict
def state_to_code(state, word):
return "".join([str(state[letter]) for letter in word])
def code_to_number(code):
code = list(map(lambda x:int(x), list(code)))
valid_values = {
1:[3,6],
2:[1,3,4,5,7],
3:[1,3,4,6,7],
4:[2,3,4,6],
5:[1,2,4,6,7],
6:[1,2,4,5,6,7],
7:[1,3,6],
8:[1,2,3,4,5,6,7],
9:[1,2,3,4,6,7],
0:[1,2,3,5,6,7],
}
for n in valid_values:
valid_value = len(code) == len(valid_values[n]) and len(code) == len(np.intersect1d(code, valid_values[n]))
if valid_value: return n
return -1
def is_valid_state(numbers, state):
codes = [state_to_code(state, word) for word in numbers]
decoded = [code_to_number(code) for code in codes]
#logging.debug(f"Codes: {codes}, numbers: {numbers}, decoded: {decoded}, state: {state}")
return all([x >= 0 for x in decoded])
def _brute_force(numbers, state, pending_steps = "abcdefg"):
result = []
if pending_steps[0] == "g":
for combi in state["g"]:
c_state = {**state, **{"g":combi}}
result.append(c_state)
else:
for combi in state[pending_steps[0]]:
c_state = {**state, **{pending_steps[0]:combi}}
result = result + _brute_force(numbers, c_state, pending_steps[1:])
return result
def brute_force(numbers, state):
combis = _brute_force(numbers, state)
combis = [x for x in combis if len(np.unique(list(x.values()))) == 7 and is_valid_state(numbers, x)]
logging.debug(f"Found {len(combis)} combination")
assert len(combis) == 1
return combis[0]
def ex1(lines):
lines = [line[1] for line in lines]
lengths = [len(item) for sublist in lines for item in sublist]
numbers = [1 if x in (2,4,3,7) else 0 for x in lengths]
return sum(numbers)
def ex2(lines):
n = []
for a,b in lines:
n.append(_ex2(a,b))
return sum(n)
def _ex2(numbers, output):
logging.debug(numbers)
all = list(range(1,8))
state = {}
#step 1: only valid values
for n in [x for x in numbers if len(x) in [2,3,4]]:
if len(n) == 4:
possibilities = {x:[2,4,3,6] for x in n}
elif len(n) == 3:
possibilities = {x:[1,3,6] for x in n}
elif len(n) == 2:
possibilities = {x:[3,6] for x in n}
state = {**state, **{x:np.intersect1d(state[x] if x in state else all, possibilities[x]) for x in n}}
logging.debug(f"Step 1: {state}")
#step 2: isolate unique
unique = np.bincount([x for sublist in state.values() for x in sublist])
unique = [i for x,i in enumerate(unique) if x == 1]
state = {**state, **{x:unique for x in state if len(np.intersect1d(unique, state[x])) == 1}}
logging.debug(f"Step 2: {state}")
#step 3: isolate pairs
#TODO
#step 4: generate others combinations
used_values = [x for sublist in state.values() for x in sublist]
not_used_values = [i for i in all if i not in used_values]
state = {**{x:not_used_values for x in "abcdefg"}, **state}
logging.debug(f"Step 4: {state}")
#step 5: brute force
state = brute_force(numbers, state)
logging.debug(f"Step 5: {state}")
#step 6: translate output
logging.debug(output)
codes = [state_to_code(state, word) for word in output]
n = int("".join([str(code_to_number(code)) for code in codes]))
logging.debug(n)
return n | en | 0.314879 | #logging.debug(f"Codes: {codes}, numbers: {numbers}, decoded: {decoded}, state: {state}") #step 1: only valid values #step 2: isolate unique #step 3: isolate pairs #TODO #step 4: generate others combinations #step 5: brute force #step 6: translate output | 2.498659 | 2 |
python_code/params.py | mmaleck/chondrocyte | 1 | 6616483 | """
This file is part of the chondrocyte modelling project at Simula
Research Laboratory, Norway. Refer to the files README and LICENSE for
more information about the project as well as terms of distribution.
Author : <NAME>, <NAME>, M.M.Malacker
Data created : July, 2021
Python version : 3.8.2
"""
from math import sqrt
params_dict = dict(
clamp_conc = False, # Toggle clamping of the internal concentrations (for debugging)
apply_Vm = False, # Toggle setting of the membrane voltage
clamp_Vm = True, # If apply_Vm is true, then one of clamp_Vm, ramp_Vm and step_Vm must be true to define what voltage is to be applied
step_Vm = False,
ramp_Vm = False,
# Clamp intracellular concentrations to simulate experimental conditions
clamp_Na_i = True,
clamp_K_i = True,
calmp_Ca_i = False,
clamp_H_i = False,
clamp_Cl_i = False,
V_final = 100.0, # Final value of membrane voltage when ramped (mV)
# Time-stepping information
t_final = 50000.0, # Final time (s)
dt = 1e0,
# External concentrations
Na_o = 140.0, # Clamped external sodium concentration (mM/l), 130 for cardiac; 140 for synovial fluid, 240-250 for chondrocyte matrix
K_o_0 = 5.4, # # Clamped external potassium concentration (mM/l), 5.4 for cardiac; 5 for synovial fluid, 7-12 for chondrocyte matrix
step_K_o = False,
Ca_o = 13, # Clamped external calcium concentration (mM/l), 1.8-2.0 for cardiac; 1.5 for synovial fluid, 6-15 for chondrocyte matrix
H_o = 10**(-7.4), # Clamped external hydrogen concentration (mM/l)
Cl_o = 140.0, # Clamped external chloride concentration (mM/l)
# Initial conditions
V_0 = -66.725, # Initial membrane potential (mV)
Na_i_0 = 25.0, # Initial internal sodium concentration (mM/l), 8.5 for cardiac; 40??? for chondrocyte - using either 12.0 or 20.0
Na_i_clamp = 25.0,
K_i_0 = 180.0, # Initial internal potassium concentration (mM/l), 120-140 for cardiac; 120-140 for chondrocyte
Ca_i_0 = 0.00001, # Initial internal calcium concentration (mM/l), 0.000067 mM/l for cardiac, 0.00001 - 0.00005 for chondrocyte
H_i_0 = 3.47426156721507e-10, # Initial internal hydrogen concentration (mM/l)
Cl_i_0 = 60.0, # Initial internal chloride concentration (mM/l)
# Initial value of Kv activation
a_ur_0 = 1.95403736678201e-04,
i_ur_0 = 9.99896050539933e-01,
cal_0 = 5.08345961310772e-05, # From Nygren, et al 1998
# Universal constants
R = 8314.472, # Universal gas constant (mJ K^-1 mol^-1)
T = 310.15, # Normal body temperature (K)
F = 96485.34, # Faradays constant (C mol^-1)
# Charges on each of the ions
z_Na = 1, # Charge on the sodium ion
z_K = 1, # Charge on the potassium ion
z_Ca = 2, # Charge on the calcium ion
z_H = 1, # Charge on the calcium ion
z_Cl = 1, # Charge on the chloride ion
# Cell parameters
C_m = 6.3, # Membrane capacitance, (pF)
vol_i_0 = 0.005884, # Internal volume (mL)
C_myo = 50.0, # (pF); myocyte capacitance from Nygren, et al, 1998, for scaling
# Constants related to external stimulation
t_cycle = 5.0, # Total cycle time (s)
t_stim = 1.0, # Stimulation time/cycle (s)
I_stim_bar = 0.0, # Stimulation current magnitude (pA)
# Background conductances
g_Na_b_bar = 0.1, # Background sodium leakage conductance (pS)
I_Na_b_scale = 1.0,
g_K_b_bar = 0.07, # Background potassium leakage conductance (pS), No background K in Nygren1998
g_Cl_b_bar = 0.05, # Background chloride leakage conductance (pS)
g_leak = 0.0, # Background seal leakage - set to 0.5
I_NaK_scale = 1.625,
K_NaK_K = 2.1, # (mmol/L) Nygren, et al, 1998
K_NaK_Na = 17.5, # (mmol/L) Nygren, et al, 1998
K_NaCa = 0.0374842, # pA/(mmol/L)4 (Nygren1998) - 0.04 - default
gamma_Na = 0.45, # same, dimensionless
d_NaCa = 0.0003, # same, (mmol/L)^4
# Constants related to the sodium-hydrogen exchanger
n_H = 1,
m_H = 3,
K_H_i_mod = 3.07e-5,
K_H_o_mod = 4.8e-7,
k1_p = 10.5,
k1_m = 0.201,
k2_p = 15.8,
k2_m = 183,
K_Na_i = 16.2,
K_Na_o = 195,
K_H_i = 6.05e-4,
K_H_o = 1.62e-3,
N_NaH_channel = 4899,
I_NaH_scale = 1.00,
# Constants related to the calcium pump
I_Ca_ATP_bar = 6.0, # ICaP = 4.0 (pA), Nygren1998
I_Ca_ATP_scale = 1.0,
k_Ca_ATP = 0.0002, # kCaP = 0.0002 (mmol/L)
# Constants related to the ultra-rapidly rectifying potassium channel
g_K_ur = 1.4623,
# Constants related to I_K_DR
i_K_DR = 1.0,
act_DR_shift = 10.0,
# Constants related to the two-pore potassium channel
P_K = 3.1e-6*sqrt(5/140),
Q = 0.1, # Based on experimental data from Bob; slope of IV in isopotential recording conditions
I_K_2pore_0 = 0.0,
I_K_2pore_scale = 1.35,
# Constants related to the calcium-activated potassium channel
Zj = 0.70,
Vhj = 250,
ZL = 0.1,
L0 = 12e-6,
KDc = 3e-6,
C = 8,
D = 25,
E = 2.4,
Gmax = 3.8*2.4,
N_channel = 1.0,
E_K_Ca_act = 42,
gBK = 2.50,
# Constants related to the TRP1 channel
g_TRP1 = 1.e-4*0.75*10/4,
a_TRP1 = 80,
b_TRP1 = -1000,
# Constants related to the TRPV4 channel
g_TRPV4 = 0.00046875*0.05, # (nS)
a_TRPV4 = 80,
b_TRPV4 = -1000,
E_Na = 55.0,
# Contants related to I_K_ATP
temp = 23,
# Constants related to I_Cl_b
E_Cl = -65.0,
# Constants related to I_K_ur_ref
G_K = 28.9, # pS/pF
V_h = -26.7, # mV
S_h = 4.1, # mV
# Constants related to I_K_ATP
sigma = 0.6,
f_M = 1.0,
Mg_i = 1.0,
delta_Mg = 0.32,
Q_10 = 1.3,
K_h_Na_0 = 25.9, #mM/L
delta_Na = 0.35,
p_0 = 0.91,
H_K_ATP = -0.001,
K_m_ATP = 0.56,
C_A = 8.0, # total concentration
ATP_i = 7.7,
# parameter for voltage clamp
V_step_size = 2501,
V_start = -150,
V_end = 100
)
params_dict["K_o"] = params_dict["K_o_0"]
# Constants related to the sodium-potassium pump, pA, from Nygren et al, 1998
params_dict["I_NaK_bar"] = params_dict["I_NaK_scale"]*70.8253*params_dict["C_m"]/params_dict["C_myo"]
# Constants related to the sodium-calcium exchanger
params_dict["NCX_scale"] = params_dict["C_m"] / params_dict["C_myo"]
# Constants related to I_K_DR, given in nS/pF per <NAME> communication 3/13/16, assuming C_m as above
params_dict["g_K_DR"] = 0.0289*params_dict["C_m"]
| """
This file is part of the chondrocyte modelling project at Simula
Research Laboratory, Norway. Refer to the files README and LICENSE for
more information about the project as well as terms of distribution.
Author : <NAME>, <NAME>, M.M.Malacker
Data created : July, 2021
Python version : 3.8.2
"""
from math import sqrt
params_dict = dict(
clamp_conc = False, # Toggle clamping of the internal concentrations (for debugging)
apply_Vm = False, # Toggle setting of the membrane voltage
clamp_Vm = True, # If apply_Vm is true, then one of clamp_Vm, ramp_Vm and step_Vm must be true to define what voltage is to be applied
step_Vm = False,
ramp_Vm = False,
# Clamp intracellular concentrations to simulate experimental conditions
clamp_Na_i = True,
clamp_K_i = True,
calmp_Ca_i = False,
clamp_H_i = False,
clamp_Cl_i = False,
V_final = 100.0, # Final value of membrane voltage when ramped (mV)
# Time-stepping information
t_final = 50000.0, # Final time (s)
dt = 1e0,
# External concentrations
Na_o = 140.0, # Clamped external sodium concentration (mM/l), 130 for cardiac; 140 for synovial fluid, 240-250 for chondrocyte matrix
K_o_0 = 5.4, # # Clamped external potassium concentration (mM/l), 5.4 for cardiac; 5 for synovial fluid, 7-12 for chondrocyte matrix
step_K_o = False,
Ca_o = 13, # Clamped external calcium concentration (mM/l), 1.8-2.0 for cardiac; 1.5 for synovial fluid, 6-15 for chondrocyte matrix
H_o = 10**(-7.4), # Clamped external hydrogen concentration (mM/l)
Cl_o = 140.0, # Clamped external chloride concentration (mM/l)
# Initial conditions
V_0 = -66.725, # Initial membrane potential (mV)
Na_i_0 = 25.0, # Initial internal sodium concentration (mM/l), 8.5 for cardiac; 40??? for chondrocyte - using either 12.0 or 20.0
Na_i_clamp = 25.0,
K_i_0 = 180.0, # Initial internal potassium concentration (mM/l), 120-140 for cardiac; 120-140 for chondrocyte
Ca_i_0 = 0.00001, # Initial internal calcium concentration (mM/l), 0.000067 mM/l for cardiac, 0.00001 - 0.00005 for chondrocyte
H_i_0 = 3.47426156721507e-10, # Initial internal hydrogen concentration (mM/l)
Cl_i_0 = 60.0, # Initial internal chloride concentration (mM/l)
# Initial value of Kv activation
a_ur_0 = 1.95403736678201e-04,
i_ur_0 = 9.99896050539933e-01,
cal_0 = 5.08345961310772e-05, # From Nygren, et al 1998
# Universal constants
R = 8314.472, # Universal gas constant (mJ K^-1 mol^-1)
T = 310.15, # Normal body temperature (K)
F = 96485.34, # Faradays constant (C mol^-1)
# Charges on each of the ions
z_Na = 1, # Charge on the sodium ion
z_K = 1, # Charge on the potassium ion
z_Ca = 2, # Charge on the calcium ion
z_H = 1, # Charge on the calcium ion
z_Cl = 1, # Charge on the chloride ion
# Cell parameters
C_m = 6.3, # Membrane capacitance, (pF)
vol_i_0 = 0.005884, # Internal volume (mL)
C_myo = 50.0, # (pF); myocyte capacitance from Nygren, et al, 1998, for scaling
# Constants related to external stimulation
t_cycle = 5.0, # Total cycle time (s)
t_stim = 1.0, # Stimulation time/cycle (s)
I_stim_bar = 0.0, # Stimulation current magnitude (pA)
# Background conductances
g_Na_b_bar = 0.1, # Background sodium leakage conductance (pS)
I_Na_b_scale = 1.0,
g_K_b_bar = 0.07, # Background potassium leakage conductance (pS), No background K in Nygren1998
g_Cl_b_bar = 0.05, # Background chloride leakage conductance (pS)
g_leak = 0.0, # Background seal leakage - set to 0.5
I_NaK_scale = 1.625,
K_NaK_K = 2.1, # (mmol/L) Nygren, et al, 1998
K_NaK_Na = 17.5, # (mmol/L) Nygren, et al, 1998
K_NaCa = 0.0374842, # pA/(mmol/L)4 (Nygren1998) - 0.04 - default
gamma_Na = 0.45, # same, dimensionless
d_NaCa = 0.0003, # same, (mmol/L)^4
# Constants related to the sodium-hydrogen exchanger
n_H = 1,
m_H = 3,
K_H_i_mod = 3.07e-5,
K_H_o_mod = 4.8e-7,
k1_p = 10.5,
k1_m = 0.201,
k2_p = 15.8,
k2_m = 183,
K_Na_i = 16.2,
K_Na_o = 195,
K_H_i = 6.05e-4,
K_H_o = 1.62e-3,
N_NaH_channel = 4899,
I_NaH_scale = 1.00,
# Constants related to the calcium pump
I_Ca_ATP_bar = 6.0, # ICaP = 4.0 (pA), Nygren1998
I_Ca_ATP_scale = 1.0,
k_Ca_ATP = 0.0002, # kCaP = 0.0002 (mmol/L)
# Constants related to the ultra-rapidly rectifying potassium channel
g_K_ur = 1.4623,
# Constants related to I_K_DR
i_K_DR = 1.0,
act_DR_shift = 10.0,
# Constants related to the two-pore potassium channel
P_K = 3.1e-6*sqrt(5/140),
Q = 0.1, # Based on experimental data from Bob; slope of IV in isopotential recording conditions
I_K_2pore_0 = 0.0,
I_K_2pore_scale = 1.35,
# Constants related to the calcium-activated potassium channel
Zj = 0.70,
Vhj = 250,
ZL = 0.1,
L0 = 12e-6,
KDc = 3e-6,
C = 8,
D = 25,
E = 2.4,
Gmax = 3.8*2.4,
N_channel = 1.0,
E_K_Ca_act = 42,
gBK = 2.50,
# Constants related to the TRP1 channel
g_TRP1 = 1.e-4*0.75*10/4,
a_TRP1 = 80,
b_TRP1 = -1000,
# Constants related to the TRPV4 channel
g_TRPV4 = 0.00046875*0.05, # (nS)
a_TRPV4 = 80,
b_TRPV4 = -1000,
E_Na = 55.0,
# Contants related to I_K_ATP
temp = 23,
# Constants related to I_Cl_b
E_Cl = -65.0,
# Constants related to I_K_ur_ref
G_K = 28.9, # pS/pF
V_h = -26.7, # mV
S_h = 4.1, # mV
# Constants related to I_K_ATP
sigma = 0.6,
f_M = 1.0,
Mg_i = 1.0,
delta_Mg = 0.32,
Q_10 = 1.3,
K_h_Na_0 = 25.9, #mM/L
delta_Na = 0.35,
p_0 = 0.91,
H_K_ATP = -0.001,
K_m_ATP = 0.56,
C_A = 8.0, # total concentration
ATP_i = 7.7,
# parameter for voltage clamp
V_step_size = 2501,
V_start = -150,
V_end = 100
)
params_dict["K_o"] = params_dict["K_o_0"]
# Constants related to the sodium-potassium pump, pA, from Nygren et al, 1998
params_dict["I_NaK_bar"] = params_dict["I_NaK_scale"]*70.8253*params_dict["C_m"]/params_dict["C_myo"]
# Constants related to the sodium-calcium exchanger
params_dict["NCX_scale"] = params_dict["C_m"] / params_dict["C_myo"]
# Constants related to I_K_DR, given in nS/pF per <NAME> communication 3/13/16, assuming C_m as above
params_dict["g_K_DR"] = 0.0289*params_dict["C_m"]
| en | 0.693344 | This file is part of the chondrocyte modelling project at Simula Research Laboratory, Norway. Refer to the files README and LICENSE for more information about the project as well as terms of distribution. Author : <NAME>, <NAME>, M.M.Malacker Data created : July, 2021 Python version : 3.8.2 # Toggle clamping of the internal concentrations (for debugging) # Toggle setting of the membrane voltage # If apply_Vm is true, then one of clamp_Vm, ramp_Vm and step_Vm must be true to define what voltage is to be applied # Clamp intracellular concentrations to simulate experimental conditions # Final value of membrane voltage when ramped (mV) # Time-stepping information # Final time (s) # External concentrations # Clamped external sodium concentration (mM/l), 130 for cardiac; 140 for synovial fluid, 240-250 for chondrocyte matrix # # Clamped external potassium concentration (mM/l), 5.4 for cardiac; 5 for synovial fluid, 7-12 for chondrocyte matrix # Clamped external calcium concentration (mM/l), 1.8-2.0 for cardiac; 1.5 for synovial fluid, 6-15 for chondrocyte matrix # Clamped external hydrogen concentration (mM/l) # Clamped external chloride concentration (mM/l) # Initial conditions # Initial membrane potential (mV) # Initial internal sodium concentration (mM/l), 8.5 for cardiac; 40??? for chondrocyte - using either 12.0 or 20.0 # Initial internal potassium concentration (mM/l), 120-140 for cardiac; 120-140 for chondrocyte # Initial internal calcium concentration (mM/l), 0.000067 mM/l for cardiac, 0.00001 - 0.00005 for chondrocyte # Initial internal hydrogen concentration (mM/l) # Initial internal chloride concentration (mM/l) # Initial value of Kv activation # From Nygren, et al 1998 # Universal constants # Universal gas constant (mJ K^-1 mol^-1) # Normal body temperature (K) # Faradays constant (C mol^-1) # Charges on each of the ions # Charge on the sodium ion # Charge on the potassium ion # Charge on the calcium ion # Charge on the calcium ion # Charge on the chloride ion # Cell parameters # Membrane capacitance, (pF) # Internal volume (mL) # (pF); myocyte capacitance from Nygren, et al, 1998, for scaling # Constants related to external stimulation # Total cycle time (s) # Stimulation time/cycle (s) # Stimulation current magnitude (pA) # Background conductances # Background sodium leakage conductance (pS) # Background potassium leakage conductance (pS), No background K in Nygren1998 # Background chloride leakage conductance (pS) # Background seal leakage - set to 0.5 # (mmol/L) Nygren, et al, 1998 # (mmol/L) Nygren, et al, 1998 # pA/(mmol/L)4 (Nygren1998) - 0.04 - default # same, dimensionless # same, (mmol/L)^4 # Constants related to the sodium-hydrogen exchanger # Constants related to the calcium pump # ICaP = 4.0 (pA), Nygren1998 # kCaP = 0.0002 (mmol/L) # Constants related to the ultra-rapidly rectifying potassium channel # Constants related to I_K_DR # Constants related to the two-pore potassium channel # Based on experimental data from Bob; slope of IV in isopotential recording conditions # Constants related to the calcium-activated potassium channel # Constants related to the TRP1 channel # Constants related to the TRPV4 channel # (nS) # Contants related to I_K_ATP # Constants related to I_Cl_b # Constants related to I_K_ur_ref # pS/pF # mV # mV # Constants related to I_K_ATP #mM/L # total concentration # parameter for voltage clamp # Constants related to the sodium-potassium pump, pA, from Nygren et al, 1998 # Constants related to the sodium-calcium exchanger # Constants related to I_K_DR, given in nS/pF per <NAME> communication 3/13/16, assuming C_m as above | 2.069764 | 2 |
src/backend/common/consts/playoff_type.py | guineawheek/ftc-data-take-2 | 266 | 6616484 | <gh_stars>100-1000
from __future__ import annotations
import enum
from typing import Dict, Set, Tuple
from backend.common.consts.comp_level import CompLevel
@enum.unique
class DoubleElimBracket(str, enum.Enum):
WINNER = "winner"
LOSER = "loser"
@enum.unique
class PlayoffType(enum.IntEnum):
# Standard Brackets
BRACKET_8_TEAM = 0
BRACKET_16_TEAM = 1
BRACKET_4_TEAM = 2
# 2015 is special
AVG_SCORE_8_TEAM = 3
# Round Robin
ROUND_ROBIN_6_TEAM = 4
# Double Elimination Bracket
DOUBLE_ELIM_8_TEAM = 5
# Festival of Champions
BO5_FINALS = 6
BO3_FINALS = 7
# Custom
CUSTOM = 8
BRACKET_TYPES: Set[PlayoffType] = {
PlayoffType.BRACKET_8_TEAM,
PlayoffType.BRACKET_16_TEAM,
PlayoffType.BRACKET_4_TEAM,
}
DOUBLE_ELIM_TYPES: Set[PlayoffType] = {
PlayoffType.DOUBLE_ELIM_8_TEAM,
}
# Names for Rendering
TYPE_NAMES: Dict[PlayoffType, str] = {
PlayoffType.BRACKET_8_TEAM: "Elimination Bracket (8 Alliances)",
PlayoffType.BRACKET_4_TEAM: "Elimination Bracket (4 Alliances)",
PlayoffType.BRACKET_16_TEAM: "Elimination Bracket (16 Alliances)",
PlayoffType.AVG_SCORE_8_TEAM: "Average Score (8 Alliances)",
PlayoffType.ROUND_ROBIN_6_TEAM: "Round Robin (6 Alliances)",
PlayoffType.DOUBLE_ELIM_8_TEAM: "Double Elimination Bracket (8 Alliances)",
PlayoffType.BO3_FINALS: "Best of 3 Finals",
PlayoffType.BO5_FINALS: "Best of 5 Finals",
PlayoffType.CUSTOM: "Custom",
}
BRACKET_ELIM_MAPPING: Dict[int, Tuple[int, int]] = {
1: (1, 1), # (set, match)
2: (2, 1),
3: (3, 1),
4: (4, 1),
5: (1, 2),
6: (2, 2),
7: (3, 2),
8: (4, 2),
9: (1, 3),
10: (2, 3),
11: (3, 3),
12: (4, 3),
13: (1, 1),
14: (2, 1),
15: (1, 2),
16: (2, 2),
17: (1, 3),
18: (2, 3),
19: (1, 1),
20: (1, 2),
21: (1, 3),
22: (1, 4),
23: (1, 5),
24: (1, 6),
}
BRACKET_OCTO_ELIM_MAPPING: Dict[int, Tuple[int, int]] = {
# octofinals
1: (1, 1), # (set, match)
2: (2, 1),
3: (3, 1),
4: (4, 1),
5: (5, 1),
6: (6, 1),
7: (7, 1),
8: (8, 1),
9: (1, 2),
10: (2, 2),
11: (3, 2),
12: (4, 2),
13: (5, 2),
14: (6, 2),
15: (7, 2),
16: (8, 2),
17: (1, 3),
18: (2, 3),
19: (3, 3),
20: (4, 3),
21: (5, 3),
22: (6, 3),
23: (7, 3),
24: (8, 3),
# quarterfinals
25: (1, 1),
26: (2, 1),
27: (3, 1),
28: (4, 1),
29: (1, 2),
30: (2, 2),
31: (3, 2),
32: (4, 2),
33: (1, 3),
34: (2, 3),
35: (3, 3),
36: (4, 3),
# semifinals
37: (1, 1),
38: (2, 1),
39: (1, 2),
40: (2, 2),
41: (1, 3),
42: (2, 3),
# finals
43: (1, 1),
44: (1, 2),
45: (1, 3),
46: (1, 4),
47: (1, 5),
48: (1, 6),
}
# Map match number -> set/match for a 8 alliance double elim bracket
# Based off: https://www.printyourbrackets.com/fillable-brackets/8-seeded-double-fillable.pdf
# Matches 1-6 are ef, 7-10 are qf, 11/12 are sf, 13 is f1, and 14/15 are f2
DOUBLE_ELIM_MAPPING: Dict[int, Tuple[CompLevel, int, int]] = {
# octofinals (winners bracket)
1: (CompLevel.EF, 1, 1),
2: (CompLevel.EF, 2, 1),
3: (CompLevel.EF, 3, 1),
4: (CompLevel.EF, 4, 1),
# octofinals (losers bracket)
5: (CompLevel.EF, 5, 1),
6: (CompLevel.EF, 6, 1),
# quarterfinals (winners bracket)
7: (CompLevel.QF, 1, 1),
8: (CompLevel.QF, 2, 1),
# quarterfinals (losers bracket)
9: (CompLevel.QF, 3, 1),
10: (CompLevel.QF, 4, 1),
# semifinals (winners bracket)
11: (CompLevel.SF, 1, 1),
# semifinals (losers bracket)
12: (CompLevel.SF, 2, 1),
# finals (losers bracket)
13: (CompLevel.F, 1, 1),
# overall finals (winners bracket)
14: (CompLevel.F, 2, 1),
15: (CompLevel.F, 2, 2),
}
| from __future__ import annotations
import enum
from typing import Dict, Set, Tuple
from backend.common.consts.comp_level import CompLevel
@enum.unique
class DoubleElimBracket(str, enum.Enum):
WINNER = "winner"
LOSER = "loser"
@enum.unique
class PlayoffType(enum.IntEnum):
# Standard Brackets
BRACKET_8_TEAM = 0
BRACKET_16_TEAM = 1
BRACKET_4_TEAM = 2
# 2015 is special
AVG_SCORE_8_TEAM = 3
# Round Robin
ROUND_ROBIN_6_TEAM = 4
# Double Elimination Bracket
DOUBLE_ELIM_8_TEAM = 5
# Festival of Champions
BO5_FINALS = 6
BO3_FINALS = 7
# Custom
CUSTOM = 8
BRACKET_TYPES: Set[PlayoffType] = {
PlayoffType.BRACKET_8_TEAM,
PlayoffType.BRACKET_16_TEAM,
PlayoffType.BRACKET_4_TEAM,
}
DOUBLE_ELIM_TYPES: Set[PlayoffType] = {
PlayoffType.DOUBLE_ELIM_8_TEAM,
}
# Names for Rendering
TYPE_NAMES: Dict[PlayoffType, str] = {
PlayoffType.BRACKET_8_TEAM: "Elimination Bracket (8 Alliances)",
PlayoffType.BRACKET_4_TEAM: "Elimination Bracket (4 Alliances)",
PlayoffType.BRACKET_16_TEAM: "Elimination Bracket (16 Alliances)",
PlayoffType.AVG_SCORE_8_TEAM: "Average Score (8 Alliances)",
PlayoffType.ROUND_ROBIN_6_TEAM: "Round Robin (6 Alliances)",
PlayoffType.DOUBLE_ELIM_8_TEAM: "Double Elimination Bracket (8 Alliances)",
PlayoffType.BO3_FINALS: "Best of 3 Finals",
PlayoffType.BO5_FINALS: "Best of 5 Finals",
PlayoffType.CUSTOM: "Custom",
}
BRACKET_ELIM_MAPPING: Dict[int, Tuple[int, int]] = {
1: (1, 1), # (set, match)
2: (2, 1),
3: (3, 1),
4: (4, 1),
5: (1, 2),
6: (2, 2),
7: (3, 2),
8: (4, 2),
9: (1, 3),
10: (2, 3),
11: (3, 3),
12: (4, 3),
13: (1, 1),
14: (2, 1),
15: (1, 2),
16: (2, 2),
17: (1, 3),
18: (2, 3),
19: (1, 1),
20: (1, 2),
21: (1, 3),
22: (1, 4),
23: (1, 5),
24: (1, 6),
}
BRACKET_OCTO_ELIM_MAPPING: Dict[int, Tuple[int, int]] = {
# octofinals
1: (1, 1), # (set, match)
2: (2, 1),
3: (3, 1),
4: (4, 1),
5: (5, 1),
6: (6, 1),
7: (7, 1),
8: (8, 1),
9: (1, 2),
10: (2, 2),
11: (3, 2),
12: (4, 2),
13: (5, 2),
14: (6, 2),
15: (7, 2),
16: (8, 2),
17: (1, 3),
18: (2, 3),
19: (3, 3),
20: (4, 3),
21: (5, 3),
22: (6, 3),
23: (7, 3),
24: (8, 3),
# quarterfinals
25: (1, 1),
26: (2, 1),
27: (3, 1),
28: (4, 1),
29: (1, 2),
30: (2, 2),
31: (3, 2),
32: (4, 2),
33: (1, 3),
34: (2, 3),
35: (3, 3),
36: (4, 3),
# semifinals
37: (1, 1),
38: (2, 1),
39: (1, 2),
40: (2, 2),
41: (1, 3),
42: (2, 3),
# finals
43: (1, 1),
44: (1, 2),
45: (1, 3),
46: (1, 4),
47: (1, 5),
48: (1, 6),
}
# Map match number -> set/match for a 8 alliance double elim bracket
# Based off: https://www.printyourbrackets.com/fillable-brackets/8-seeded-double-fillable.pdf
# Matches 1-6 are ef, 7-10 are qf, 11/12 are sf, 13 is f1, and 14/15 are f2
DOUBLE_ELIM_MAPPING: Dict[int, Tuple[CompLevel, int, int]] = {
# octofinals (winners bracket)
1: (CompLevel.EF, 1, 1),
2: (CompLevel.EF, 2, 1),
3: (CompLevel.EF, 3, 1),
4: (CompLevel.EF, 4, 1),
# octofinals (losers bracket)
5: (CompLevel.EF, 5, 1),
6: (CompLevel.EF, 6, 1),
# quarterfinals (winners bracket)
7: (CompLevel.QF, 1, 1),
8: (CompLevel.QF, 2, 1),
# quarterfinals (losers bracket)
9: (CompLevel.QF, 3, 1),
10: (CompLevel.QF, 4, 1),
# semifinals (winners bracket)
11: (CompLevel.SF, 1, 1),
# semifinals (losers bracket)
12: (CompLevel.SF, 2, 1),
# finals (losers bracket)
13: (CompLevel.F, 1, 1),
# overall finals (winners bracket)
14: (CompLevel.F, 2, 1),
15: (CompLevel.F, 2, 2),
} | en | 0.840782 | # Standard Brackets # 2015 is special # Round Robin # Double Elimination Bracket # Festival of Champions # Custom # Names for Rendering # (set, match) # octofinals # (set, match) # quarterfinals # semifinals # finals # Map match number -> set/match for a 8 alliance double elim bracket # Based off: https://www.printyourbrackets.com/fillable-brackets/8-seeded-double-fillable.pdf # Matches 1-6 are ef, 7-10 are qf, 11/12 are sf, 13 is f1, and 14/15 are f2 # octofinals (winners bracket) # octofinals (losers bracket) # quarterfinals (winners bracket) # quarterfinals (losers bracket) # semifinals (winners bracket) # semifinals (losers bracket) # finals (losers bracket) # overall finals (winners bracket) | 2.611634 | 3 |
code/main.py | mwang87/GNPS_CytoscapeFormatting | 0 | 6616485 | # main.py
from app import app
import views
if __name__ == '__main__':
app.run(host='0.0.0.0',port='5051', debug=True)
| # main.py
from app import app
import views
if __name__ == '__main__':
app.run(host='0.0.0.0',port='5051', debug=True)
| none | 1 | 1.568381 | 2 | |
tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py | zhangyujing/tensorflow | 3 | 6616486 | <filename>tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests boosted_trees estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.estimator.python.estimator import boosted_trees
from tensorflow.python.estimator.canned import boosted_trees as canned_boosted_trees
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
NUM_FEATURES = 3
BUCKET_BOUNDARIES = [-2., .5, 12.] # Boundaries for all the features.
INPUT_FEATURES = np.array(
[
[12.5, 1.0, -2.001, -2.0001, -1.999], # feature_0 quantized:[3,2,0,0,1]
[2.0, -3.0, 0.5, 0.0, 0.4995], # feature_1 quantized:[2,0,2,1,1]
[3.0, 20.0, 50.0, -100.0, 102.75], # feature_2 quantized:[2,3,3,0,3]
],
dtype=np.float32)
CLASSIFICATION_LABELS = [[0.], [1.], [1.], [0.], [0.]]
REGRESSION_LABELS = [[1.5], [0.3], [0.2], [2.], [5.]]
FEATURES_DICT = {'f_%d' % i: INPUT_FEATURES[i] for i in range(NUM_FEATURES)}
def _make_train_input_fn(is_classification):
"""Makes train input_fn for classification/regression."""
def _input_fn():
features = dict(FEATURES_DICT)
if is_classification:
labels = CLASSIFICATION_LABELS
else:
labels = REGRESSION_LABELS
return features, labels
return _input_fn
class BoostedTreesEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._head = canned_boosted_trees._create_regression_head(label_dimension=1)
self._feature_columns = {
feature_column.bucketized_column(
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES)
for i in range(NUM_FEATURES)
}
def _assert_checkpoint(self, model_dir, expected_global_step):
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(model_dir,
ops.GraphKeys.GLOBAL_STEP))
def testTrainAndEvaluateEstimator(self):
input_fn = _make_train_input_fn(is_classification=False)
est = boosted_trees._BoostedTreesEstimator(
feature_columns=self._feature_columns,
n_batches_per_layer=1,
n_trees=2,
head=self._head,
max_depth=5)
# It will stop after 10 steps because of the max depth and num trees.
num_steps = 100
# Train for a few steps, and validate final checkpoint.
est.train(input_fn, steps=num_steps)
self._assert_checkpoint(est.model_dir, 11)
eval_res = est.evaluate(input_fn=input_fn, steps=1)
self.assertAllClose(eval_res['average_loss'], 0.913176)
def testInferEstimator(self):
train_input_fn = _make_train_input_fn(is_classification=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees._BoostedTreesEstimator(
feature_columns=self._feature_columns,
n_batches_per_layer=1,
n_trees=1,
max_depth=5,
head=self._head)
# It will stop after 5 steps because of the max depth and num trees.
num_steps = 100
# Train for a few steps, and validate final checkpoint.
est.train(train_input_fn, steps=num_steps)
self._assert_checkpoint(est.model_dir, 6)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEquals(5, len(predictions))
self.assertAllClose([0.703549], predictions[0]['predictions'])
self.assertAllClose([0.266539], predictions[1]['predictions'])
self.assertAllClose([0.256479], predictions[2]['predictions'])
self.assertAllClose([1.088732], predictions[3]['predictions'])
self.assertAllClose([1.901732], predictions[4]['predictions'])
class BoostedTreesClassifierTrainInMemoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._feature_columns = {
feature_column.bucketized_column(
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES)
for i in range(NUM_FEATURES)
}
def _assert_checkpoint(self, model_dir, expected_global_step):
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(model_dir,
ops.GraphKeys.GLOBAL_STEP))
def testBinaryClassifierTrainInMemoryAndEvalAndInfer(self):
train_input_fn = _make_train_input_fn(is_classification=True)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees.boosted_trees_classifier_train_in_memory(
train_input_fn=train_input_fn,
feature_columns=self._feature_columns,
n_trees=1,
max_depth=5)
# It will stop after 5 steps because of the max depth and num trees.
self._assert_checkpoint(est.model_dir, 6)
# Check eval.
eval_res = est.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(eval_res['accuracy'], 1.0)
# Check predict that all labels are correct.
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEquals(5, len(predictions))
self.assertAllClose([0], predictions[0]['class_ids'])
self.assertAllClose([1], predictions[1]['class_ids'])
self.assertAllClose([1], predictions[2]['class_ids'])
self.assertAllClose([0], predictions[3]['class_ids'])
self.assertAllClose([0], predictions[4]['class_ids'])
class BoostedTreesRegressorTrainInMemoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._feature_columns = {
feature_column.bucketized_column(
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES)
for i in range(NUM_FEATURES)
}
def _assert_checkpoint(self, model_dir, expected_global_step):
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(model_dir,
ops.GraphKeys.GLOBAL_STEP))
def testRegressorTrainInMemoryAndEvalAndInfer(self):
train_input_fn = _make_train_input_fn(is_classification=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees.boosted_trees_regressor_train_in_memory(
train_input_fn=train_input_fn,
feature_columns=self._feature_columns,
n_trees=1,
max_depth=5)
# It will stop after 5 steps because of the max depth and num trees.
self._assert_checkpoint(est.model_dir, 6)
# Check eval.
eval_res = est.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(eval_res['average_loss'], 2.2136638)
# Validate predictions.
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEquals(5, len(predictions))
self.assertAllClose([0.703549], predictions[0]['predictions'])
self.assertAllClose([0.266539], predictions[1]['predictions'])
self.assertAllClose([0.256479], predictions[2]['predictions'])
self.assertAllClose([1.088732], predictions[3]['predictions'])
self.assertAllClose([1.901732], predictions[4]['predictions'])
if __name__ == '__main__':
googletest.main()
| <filename>tensorflow/contrib/estimator/python/estimator/boosted_trees_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests boosted_trees estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.estimator.python.estimator import boosted_trees
from tensorflow.python.estimator.canned import boosted_trees as canned_boosted_trees
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
NUM_FEATURES = 3
BUCKET_BOUNDARIES = [-2., .5, 12.] # Boundaries for all the features.
INPUT_FEATURES = np.array(
[
[12.5, 1.0, -2.001, -2.0001, -1.999], # feature_0 quantized:[3,2,0,0,1]
[2.0, -3.0, 0.5, 0.0, 0.4995], # feature_1 quantized:[2,0,2,1,1]
[3.0, 20.0, 50.0, -100.0, 102.75], # feature_2 quantized:[2,3,3,0,3]
],
dtype=np.float32)
CLASSIFICATION_LABELS = [[0.], [1.], [1.], [0.], [0.]]
REGRESSION_LABELS = [[1.5], [0.3], [0.2], [2.], [5.]]
FEATURES_DICT = {'f_%d' % i: INPUT_FEATURES[i] for i in range(NUM_FEATURES)}
def _make_train_input_fn(is_classification):
"""Makes train input_fn for classification/regression."""
def _input_fn():
features = dict(FEATURES_DICT)
if is_classification:
labels = CLASSIFICATION_LABELS
else:
labels = REGRESSION_LABELS
return features, labels
return _input_fn
class BoostedTreesEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._head = canned_boosted_trees._create_regression_head(label_dimension=1)
self._feature_columns = {
feature_column.bucketized_column(
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES)
for i in range(NUM_FEATURES)
}
def _assert_checkpoint(self, model_dir, expected_global_step):
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(model_dir,
ops.GraphKeys.GLOBAL_STEP))
def testTrainAndEvaluateEstimator(self):
input_fn = _make_train_input_fn(is_classification=False)
est = boosted_trees._BoostedTreesEstimator(
feature_columns=self._feature_columns,
n_batches_per_layer=1,
n_trees=2,
head=self._head,
max_depth=5)
# It will stop after 10 steps because of the max depth and num trees.
num_steps = 100
# Train for a few steps, and validate final checkpoint.
est.train(input_fn, steps=num_steps)
self._assert_checkpoint(est.model_dir, 11)
eval_res = est.evaluate(input_fn=input_fn, steps=1)
self.assertAllClose(eval_res['average_loss'], 0.913176)
def testInferEstimator(self):
train_input_fn = _make_train_input_fn(is_classification=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees._BoostedTreesEstimator(
feature_columns=self._feature_columns,
n_batches_per_layer=1,
n_trees=1,
max_depth=5,
head=self._head)
# It will stop after 5 steps because of the max depth and num trees.
num_steps = 100
# Train for a few steps, and validate final checkpoint.
est.train(train_input_fn, steps=num_steps)
self._assert_checkpoint(est.model_dir, 6)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEquals(5, len(predictions))
self.assertAllClose([0.703549], predictions[0]['predictions'])
self.assertAllClose([0.266539], predictions[1]['predictions'])
self.assertAllClose([0.256479], predictions[2]['predictions'])
self.assertAllClose([1.088732], predictions[3]['predictions'])
self.assertAllClose([1.901732], predictions[4]['predictions'])
class BoostedTreesClassifierTrainInMemoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._feature_columns = {
feature_column.bucketized_column(
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES)
for i in range(NUM_FEATURES)
}
def _assert_checkpoint(self, model_dir, expected_global_step):
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(model_dir,
ops.GraphKeys.GLOBAL_STEP))
def testBinaryClassifierTrainInMemoryAndEvalAndInfer(self):
train_input_fn = _make_train_input_fn(is_classification=True)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees.boosted_trees_classifier_train_in_memory(
train_input_fn=train_input_fn,
feature_columns=self._feature_columns,
n_trees=1,
max_depth=5)
# It will stop after 5 steps because of the max depth and num trees.
self._assert_checkpoint(est.model_dir, 6)
# Check eval.
eval_res = est.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(eval_res['accuracy'], 1.0)
# Check predict that all labels are correct.
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEquals(5, len(predictions))
self.assertAllClose([0], predictions[0]['class_ids'])
self.assertAllClose([1], predictions[1]['class_ids'])
self.assertAllClose([1], predictions[2]['class_ids'])
self.assertAllClose([0], predictions[3]['class_ids'])
self.assertAllClose([0], predictions[4]['class_ids'])
class BoostedTreesRegressorTrainInMemoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._feature_columns = {
feature_column.bucketized_column(
feature_column.numeric_column('f_%d' % i, dtype=dtypes.float32),
BUCKET_BOUNDARIES)
for i in range(NUM_FEATURES)
}
def _assert_checkpoint(self, model_dir, expected_global_step):
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(model_dir,
ops.GraphKeys.GLOBAL_STEP))
def testRegressorTrainInMemoryAndEvalAndInfer(self):
train_input_fn = _make_train_input_fn(is_classification=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=FEATURES_DICT, y=None, batch_size=1, num_epochs=1, shuffle=False)
est = boosted_trees.boosted_trees_regressor_train_in_memory(
train_input_fn=train_input_fn,
feature_columns=self._feature_columns,
n_trees=1,
max_depth=5)
# It will stop after 5 steps because of the max depth and num trees.
self._assert_checkpoint(est.model_dir, 6)
# Check eval.
eval_res = est.evaluate(input_fn=train_input_fn, steps=1)
self.assertAllClose(eval_res['average_loss'], 2.2136638)
# Validate predictions.
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEquals(5, len(predictions))
self.assertAllClose([0.703549], predictions[0]['predictions'])
self.assertAllClose([0.266539], predictions[1]['predictions'])
self.assertAllClose([0.256479], predictions[2]['predictions'])
self.assertAllClose([1.088732], predictions[3]['predictions'])
self.assertAllClose([1.901732], predictions[4]['predictions'])
if __name__ == '__main__':
googletest.main()
| en | 0.831662 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests boosted_trees estimators. # Boundaries for all the features. # feature_0 quantized:[3,2,0,0,1] # feature_1 quantized:[2,0,2,1,1] # feature_2 quantized:[2,3,3,0,3] Makes train input_fn for classification/regression. # It will stop after 10 steps because of the max depth and num trees. # Train for a few steps, and validate final checkpoint. # It will stop after 5 steps because of the max depth and num trees. # Train for a few steps, and validate final checkpoint. # It will stop after 5 steps because of the max depth and num trees. # Check eval. # Check predict that all labels are correct. # It will stop after 5 steps because of the max depth and num trees. # Check eval. # Validate predictions. | 2.043384 | 2 |
bikesharing/migrations/0030_auto_20200618_1414.py | mohnbroetchen2/cykel_jenarad | 80 | 6616487 | # Generated by Django 2.2.13 on 2020-06-18 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bikesharing", "0029_bike_non_static_bike_id_unique_20200610_1901"),
]
operations = [
migrations.AlterField(
model_name="location",
name="reported_at",
field=models.DateTimeField(default=None),
),
]
| # Generated by Django 2.2.13 on 2020-06-18 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bikesharing", "0029_bike_non_static_bike_id_unique_20200610_1901"),
]
operations = [
migrations.AlterField(
model_name="location",
name="reported_at",
field=models.DateTimeField(default=None),
),
]
| en | 0.757575 | # Generated by Django 2.2.13 on 2020-06-18 12:14 | 1.459735 | 1 |
output/models/nist_data/atomic/int_pkg/schema_instance/nistschema_sv_iv_atomic_int_pattern_4_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 6616488 | <reponame>tefra/xsdata-w3c-tests
from output.models.nist_data.atomic.int_pkg.schema_instance.nistschema_sv_iv_atomic_int_pattern_4_xsd.nistschema_sv_iv_atomic_int_pattern_4 import NistschemaSvIvAtomicIntPattern4
__all__ = [
"NistschemaSvIvAtomicIntPattern4",
]
| from output.models.nist_data.atomic.int_pkg.schema_instance.nistschema_sv_iv_atomic_int_pattern_4_xsd.nistschema_sv_iv_atomic_int_pattern_4 import NistschemaSvIvAtomicIntPattern4
__all__ = [
"NistschemaSvIvAtomicIntPattern4",
] | none | 1 | 0.987019 | 1 | |
operalib/onorma.py | vishalbelsare/operalib | 18 | 6616489 | """
:mod:`operalib.ridge` implements Operator-valued Naive Online
Regularised Risk Minimization Algorithm (ONORMA)
"""
# Author: <NAME> <<EMAIL>> with help from
# the scikit-learn community.
# License: MIT
from numpy import eye, empty, ravel, vstack, zeros
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import check_is_fitted
from .metrics import first_periodic_kernel
from .kernels import DecomposableKernel, DotProductKernel
from .learningrate import Constant, InvScaling
# When adding a new kernel, update this table and the _get_kernel_map
# method
PAIRWISE_KERNEL_FUNCTIONS = {
'DGauss': DecomposableKernel,
'DotProduct': DotProductKernel,
'DPeriodic': DecomposableKernel,
'DotProduct': DotProductKernel}
# When adding a new learning rate, update this table and the _get_learning_rate
# method
LEARNING_RATE_FUNCTIONS = {
'constant': Constant,
'invscaling': InvScaling}
class ONORMA(BaseEstimator, RegressorMixin):
"""Operator-valued Naive Online Regularised Risk
Minimization Algorithm .
Operator-Valued kernel Operator-valued Naive Online Regularised Risk
Minimization Algorithm (ONORMA) extends the standard kernel-based
online learning algorithm NORMA from scalar-valued to operator-valued
setting. The truncation is currently not implemented.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
linop_ : callable
Callable which associate to the training points X the Gram matrix (the
Gram matrix being a LinearOperator)
A_ : array, shape = [n_targets, n_targets]
Set when Linear operator used by the decomposable kernel is default or
None.
T_ : integer
Total number of iterations
n_ : integer
Total number of datapoints
p_ : integer
Dimensionality of the outputs
References
----------
* Audiffren, Julien, and <NAME>.
"Online learning with multiple operator-valued kernels."
arXiv preprint arXiv:1311.0222 (2013).
* Kivinen, Jyrki, <NAME>, and <NAME>.
"Online learning with kernels."
IEEE transactions on signal processing 52.8 (2004): 2165-2176.
See also
--------
sklearn.Ridge
Linear ridge regression.
sklearn.KernelRidge
Kernel ridge regression.
sklearn.SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> n_samples, n_features, n_targets = 10, 5, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples, n_targets)
>>> X = rng.randn(n_samples, n_features)
>>> clf = ovk.ONORMA('DGauss', lbda=1.)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
ONORMA(A=None, T=None, eta=1.0, gamma=None, kernel='DGauss', lbda=1.0,
learning_rate='invscaling', mu=0.2, power=0.5, random_state=0,
shuffle=True, truncation=None)
"""
def __init__(self, kernel='DGauss', lbda=1e-5,
T=None, A=None, learning_rate='invscaling', truncation=None,
gamma=None, mu=0.2, eta=1., power=.5,
shuffle=True, random_state=0):
"""Initialize ONORMA.
Parameters
----------
kernel : {string, callable}, default='DGauss'
Kernel mapping used internally. A callable should accept two
arguments, and should return a LinearOperator.
lbda : {float}, default=1e-5
Small positive values of lbda improve the conditioning of the
problem and reduce the variance of the estimates. Lbda corresponds
to ``(2*C)^-1`` in other linear models such as LogisticRegression
or LinearSVC.
T : {integer}, default=None
Number of iterations.
A : {LinearOperator, array-like, sparse matrix}, default=None
Linear operator used by the decomposable kernel. If default is
None, wich is set to identity matrix of size y.shape[1] when
fitting.
mu : {array, LinearOperator}, shape = [n_targets, n_targets]
Tradeoff between shared and independant components in the Dot
Product kernel.
learning_rate : {Callable}
Learning rate, a function that return the step size at given step
truncation : learning_rate : {Callable}
TODO
gamma : {float}, default=None.
Gamma parameter for the Decomposable Gaussian kernel.
Ignored by other kernels.
"""
self.kernel = kernel
self.lbda = lbda
self.T = T
self.A = A
self.learning_rate = learning_rate
self.truncation = truncation
self.gamma = gamma
self.mu = mu
self.shuffle = shuffle
self.random_state = random_state
self.eta = eta
self.power = power
def _validate_params(self):
# check on self.kernel is performed in method __get_kernel
if self.lbda < 0:
raise ValueError('lbda must be a positive scalar')
if self.mu < 0 or self.mu > 1:
raise ValueError('mu must be a scalar between 0. and 1.')
if self.T is not None:
if self.T <= 0:
raise ValueError('T must be a positive integer')
# if self.A < 0: # Check whether A is S PD would be really expensive
# raise ValueError('A must be a symmetric positive operator')
if self.gamma is not None:
if self.gamma < 0:
raise ValueError('gamma must be positive or default (None)')
def _default_decomposable_op(self, y):
if self.A is not None:
return self.A
elif y.ndim == 2:
return eye(y.shape[1])
else:
return eye(1)
def _get_kernel_map(self, X, y):
# When adding a new kernel, update this table and the _get_kernel_map
# method
if callable(self.kernel):
ov_kernel = self.kernel
elif type(self.kernel) is str:
# 1) check string and assign the right parameters
if self.kernel == 'DGauss':
self.A_ = self._default_decomposable_op(y)
kernel_params = {'A': self.A_, 'scalar_kernel': rbf_kernel,
'scalar_kernel_params': {'gamma': self.gamma}}
elif self.kernel == 'DotProduct':
kernel_params = {'mu': self.mu, 'p': y.shape[1]}
elif self.kernel == 'DPeriodic':
self.A_ = self._default_decomposable_op(y)
self.period_ = self._default_period(X, y)
kernel_params = {'A': self.A_,
'scalar_kernel': first_periodic_kernel,
'scalar_kernel_params': {'gamma': self.theta,
'period':
self.period_}, }
else:
raise NotImplemented('unsupported kernel')
# 2) Uses lookup table to select the right kernel from string
ov_kernel = PAIRWISE_KERNEL_FUNCTIONS[self.kernel](**kernel_params)
else:
raise NotImplemented('unsupported kernel')
return ov_kernel
def _get_learning_rate(self):
if callable(self.learning_rate):
return self.learning_rate
elif type(self.learning_rate) is str:
# 1) check string and assign the right parameters
if self.learning_rate == 'constant':
lr_params = {'eta': self.eta}
elif self.learning_rate == 'invscaling':
lr_params = {'eta': self.eta, 'power': self.power}
else:
raise NotImplemented('unsupported kernel')
lr = LEARNING_RATE_FUNCTIONS[self.learning_rate](**lr_params)
else:
raise NotImplemented('unsupported learning rate')
return lr
def _decision_function(self, X):
self.linop_ = self.ov_kernel_(self.X_seen_)
pred = self.linop_(X) * self.coefs_[:self.t_ * self.p_]
return pred.reshape(X.shape[0], -1) if self.linop_.p > 1 else pred
def predict(self, X):
"""Predict using ONORMA model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : {array}, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ['coefs_', 't_', 'p_',
'X_seen_', 'y_seen_'], all_or_any=all)
X = check_array(X)
linop = self.ov_kernel_(self.X_seen_)
pred = linop(X) * self.coefs_[:self.t_ * self.p_]
return pred.reshape(X.shape[0], -1) if linop.p > 1 else pred
def partial_fit(self, X, y):
"""Partial fit of ONORMA model.
This method is usefull for online learning for instance.
Must call
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data.
y : {array-like}, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
n = 1 if X.ndim <= 1 else X.shape[0]
Xt = X.reshape(n, -1) if X.ndim <= 1 else X
yt = y.reshape(n, -1) if y.ndim <= 1 else y
init = not (hasattr(self, 'coefs_') and hasattr(self, 't_'))
if hasattr(self, 't_'):
init = self.t_ == 0
if init:
Xtt = Xt[0, :].reshape(1, -1)
ytt = yt[0, :].reshape(1, -1)
self.d_ = Xtt.shape[1]
self.p_ = ytt.shape[1]
self.learning_rate_ = self._get_learning_rate()
self.coefs_ = empty(self.p_)
eta_t = self.learning_rate_(1)
self.coefs_[:self.p_] = -ravel(eta_t * (0 - ytt))
self.X_seen_ = Xtt
self.y_seen_ = ytt
self.ov_kernel_ = self._get_kernel_map(self.X_seen_, self.y_seen_)
self.t_ = 1
# Reshape if self.coefs_ has not been preallocated
self.coefs_.resize((self.t_ + (n - 1 if init else n)) * self.p_)
for idx in range(1 if init else 0, n):
Xtt = Xt[idx, :].reshape(1, -1)
ytt = yt[idx, :].reshape(1, -1)
eta_t = self.learning_rate_(self.t_ + 1)
# Update weights
self.coefs_[self.t_ * self.p_:(self.t_ + 1) * self.p_] = -ravel(
eta_t * (self._decision_function(Xtt) - ytt))
self.coefs_[:self.t_ * self.p_] *= (1. - eta_t * self.lbda / 2)
# Update seen data
self.X_seen_ = vstack((self.X_seen_, Xtt))
self.y_seen_ = vstack((self.y_seen_, ytt))
# prepare next step
self.t_ += 1
return self
def fit(self, X, y):
"""Fit ONORMA model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data.
y : {array-like}, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, False, y_numeric=True, multi_output=True)
self._validate_params()
self.T_ = X.shape[0] if self.T is None else self.T
self.t_ = 0
if y.ndim > 1:
self.coefs_ = zeros(self.T_ * y.shape[1])
for i in range(self.T_):
idx = i % X.shape[0]
self.partial_fit(X[idx, :], y[idx, :])
else:
self.coefs_ = zeros(self.T_)
for i in range(self.T_):
idx = i % X.shape[0]
self.partial_fit(X[idx, :], y[idx])
return self
| """
:mod:`operalib.ridge` implements Operator-valued Naive Online
Regularised Risk Minimization Algorithm (ONORMA)
"""
# Author: <NAME> <<EMAIL>> with help from
# the scikit-learn community.
# License: MIT
from numpy import eye, empty, ravel, vstack, zeros
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import check_is_fitted
from .metrics import first_periodic_kernel
from .kernels import DecomposableKernel, DotProductKernel
from .learningrate import Constant, InvScaling
# When adding a new kernel, update this table and the _get_kernel_map
# method
PAIRWISE_KERNEL_FUNCTIONS = {
'DGauss': DecomposableKernel,
'DotProduct': DotProductKernel,
'DPeriodic': DecomposableKernel,
'DotProduct': DotProductKernel}
# When adding a new learning rate, update this table and the _get_learning_rate
# method
LEARNING_RATE_FUNCTIONS = {
'constant': Constant,
'invscaling': InvScaling}
class ONORMA(BaseEstimator, RegressorMixin):
"""Operator-valued Naive Online Regularised Risk
Minimization Algorithm .
Operator-Valued kernel Operator-valued Naive Online Regularised Risk
Minimization Algorithm (ONORMA) extends the standard kernel-based
online learning algorithm NORMA from scalar-valued to operator-valued
setting. The truncation is currently not implemented.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
linop_ : callable
Callable which associate to the training points X the Gram matrix (the
Gram matrix being a LinearOperator)
A_ : array, shape = [n_targets, n_targets]
Set when Linear operator used by the decomposable kernel is default or
None.
T_ : integer
Total number of iterations
n_ : integer
Total number of datapoints
p_ : integer
Dimensionality of the outputs
References
----------
* Audiffren, Julien, and <NAME>.
"Online learning with multiple operator-valued kernels."
arXiv preprint arXiv:1311.0222 (2013).
* Kivinen, Jyrki, <NAME>, and <NAME>.
"Online learning with kernels."
IEEE transactions on signal processing 52.8 (2004): 2165-2176.
See also
--------
sklearn.Ridge
Linear ridge regression.
sklearn.KernelRidge
Kernel ridge regression.
sklearn.SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> n_samples, n_features, n_targets = 10, 5, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples, n_targets)
>>> X = rng.randn(n_samples, n_features)
>>> clf = ovk.ONORMA('DGauss', lbda=1.)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
ONORMA(A=None, T=None, eta=1.0, gamma=None, kernel='DGauss', lbda=1.0,
learning_rate='invscaling', mu=0.2, power=0.5, random_state=0,
shuffle=True, truncation=None)
"""
def __init__(self, kernel='DGauss', lbda=1e-5,
T=None, A=None, learning_rate='invscaling', truncation=None,
gamma=None, mu=0.2, eta=1., power=.5,
shuffle=True, random_state=0):
"""Initialize ONORMA.
Parameters
----------
kernel : {string, callable}, default='DGauss'
Kernel mapping used internally. A callable should accept two
arguments, and should return a LinearOperator.
lbda : {float}, default=1e-5
Small positive values of lbda improve the conditioning of the
problem and reduce the variance of the estimates. Lbda corresponds
to ``(2*C)^-1`` in other linear models such as LogisticRegression
or LinearSVC.
T : {integer}, default=None
Number of iterations.
A : {LinearOperator, array-like, sparse matrix}, default=None
Linear operator used by the decomposable kernel. If default is
None, wich is set to identity matrix of size y.shape[1] when
fitting.
mu : {array, LinearOperator}, shape = [n_targets, n_targets]
Tradeoff between shared and independant components in the Dot
Product kernel.
learning_rate : {Callable}
Learning rate, a function that return the step size at given step
truncation : learning_rate : {Callable}
TODO
gamma : {float}, default=None.
Gamma parameter for the Decomposable Gaussian kernel.
Ignored by other kernels.
"""
self.kernel = kernel
self.lbda = lbda
self.T = T
self.A = A
self.learning_rate = learning_rate
self.truncation = truncation
self.gamma = gamma
self.mu = mu
self.shuffle = shuffle
self.random_state = random_state
self.eta = eta
self.power = power
def _validate_params(self):
# check on self.kernel is performed in method __get_kernel
if self.lbda < 0:
raise ValueError('lbda must be a positive scalar')
if self.mu < 0 or self.mu > 1:
raise ValueError('mu must be a scalar between 0. and 1.')
if self.T is not None:
if self.T <= 0:
raise ValueError('T must be a positive integer')
# if self.A < 0: # Check whether A is S PD would be really expensive
# raise ValueError('A must be a symmetric positive operator')
if self.gamma is not None:
if self.gamma < 0:
raise ValueError('gamma must be positive or default (None)')
def _default_decomposable_op(self, y):
if self.A is not None:
return self.A
elif y.ndim == 2:
return eye(y.shape[1])
else:
return eye(1)
def _get_kernel_map(self, X, y):
# When adding a new kernel, update this table and the _get_kernel_map
# method
if callable(self.kernel):
ov_kernel = self.kernel
elif type(self.kernel) is str:
# 1) check string and assign the right parameters
if self.kernel == 'DGauss':
self.A_ = self._default_decomposable_op(y)
kernel_params = {'A': self.A_, 'scalar_kernel': rbf_kernel,
'scalar_kernel_params': {'gamma': self.gamma}}
elif self.kernel == 'DotProduct':
kernel_params = {'mu': self.mu, 'p': y.shape[1]}
elif self.kernel == 'DPeriodic':
self.A_ = self._default_decomposable_op(y)
self.period_ = self._default_period(X, y)
kernel_params = {'A': self.A_,
'scalar_kernel': first_periodic_kernel,
'scalar_kernel_params': {'gamma': self.theta,
'period':
self.period_}, }
else:
raise NotImplemented('unsupported kernel')
# 2) Uses lookup table to select the right kernel from string
ov_kernel = PAIRWISE_KERNEL_FUNCTIONS[self.kernel](**kernel_params)
else:
raise NotImplemented('unsupported kernel')
return ov_kernel
def _get_learning_rate(self):
if callable(self.learning_rate):
return self.learning_rate
elif type(self.learning_rate) is str:
# 1) check string and assign the right parameters
if self.learning_rate == 'constant':
lr_params = {'eta': self.eta}
elif self.learning_rate == 'invscaling':
lr_params = {'eta': self.eta, 'power': self.power}
else:
raise NotImplemented('unsupported kernel')
lr = LEARNING_RATE_FUNCTIONS[self.learning_rate](**lr_params)
else:
raise NotImplemented('unsupported learning rate')
return lr
def _decision_function(self, X):
self.linop_ = self.ov_kernel_(self.X_seen_)
pred = self.linop_(X) * self.coefs_[:self.t_ * self.p_]
return pred.reshape(X.shape[0], -1) if self.linop_.p > 1 else pred
def predict(self, X):
"""Predict using ONORMA model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : {array}, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ['coefs_', 't_', 'p_',
'X_seen_', 'y_seen_'], all_or_any=all)
X = check_array(X)
linop = self.ov_kernel_(self.X_seen_)
pred = linop(X) * self.coefs_[:self.t_ * self.p_]
return pred.reshape(X.shape[0], -1) if linop.p > 1 else pred
def partial_fit(self, X, y):
"""Partial fit of ONORMA model.
This method is usefull for online learning for instance.
Must call
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data.
y : {array-like}, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
n = 1 if X.ndim <= 1 else X.shape[0]
Xt = X.reshape(n, -1) if X.ndim <= 1 else X
yt = y.reshape(n, -1) if y.ndim <= 1 else y
init = not (hasattr(self, 'coefs_') and hasattr(self, 't_'))
if hasattr(self, 't_'):
init = self.t_ == 0
if init:
Xtt = Xt[0, :].reshape(1, -1)
ytt = yt[0, :].reshape(1, -1)
self.d_ = Xtt.shape[1]
self.p_ = ytt.shape[1]
self.learning_rate_ = self._get_learning_rate()
self.coefs_ = empty(self.p_)
eta_t = self.learning_rate_(1)
self.coefs_[:self.p_] = -ravel(eta_t * (0 - ytt))
self.X_seen_ = Xtt
self.y_seen_ = ytt
self.ov_kernel_ = self._get_kernel_map(self.X_seen_, self.y_seen_)
self.t_ = 1
# Reshape if self.coefs_ has not been preallocated
self.coefs_.resize((self.t_ + (n - 1 if init else n)) * self.p_)
for idx in range(1 if init else 0, n):
Xtt = Xt[idx, :].reshape(1, -1)
ytt = yt[idx, :].reshape(1, -1)
eta_t = self.learning_rate_(self.t_ + 1)
# Update weights
self.coefs_[self.t_ * self.p_:(self.t_ + 1) * self.p_] = -ravel(
eta_t * (self._decision_function(Xtt) - ytt))
self.coefs_[:self.t_ * self.p_] *= (1. - eta_t * self.lbda / 2)
# Update seen data
self.X_seen_ = vstack((self.X_seen_, Xtt))
self.y_seen_ = vstack((self.y_seen_, ytt))
# prepare next step
self.t_ += 1
return self
def fit(self, X, y):
"""Fit ONORMA model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data.
y : {array-like}, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, False, y_numeric=True, multi_output=True)
self._validate_params()
self.T_ = X.shape[0] if self.T is None else self.T
self.t_ = 0
if y.ndim > 1:
self.coefs_ = zeros(self.T_ * y.shape[1])
for i in range(self.T_):
idx = i % X.shape[0]
self.partial_fit(X[idx, :], y[idx, :])
else:
self.coefs_ = zeros(self.T_)
for i in range(self.T_):
idx = i % X.shape[0]
self.partial_fit(X[idx, :], y[idx])
return self
| en | 0.672959 | :mod:`operalib.ridge` implements Operator-valued Naive Online Regularised Risk Minimization Algorithm (ONORMA) # Author: <NAME> <<EMAIL>> with help from # the scikit-learn community. # License: MIT # When adding a new kernel, update this table and the _get_kernel_map # method # When adding a new learning rate, update this table and the _get_learning_rate # method Operator-valued Naive Online Regularised Risk Minimization Algorithm . Operator-Valued kernel Operator-valued Naive Online Regularised Risk Minimization Algorithm (ONORMA) extends the standard kernel-based online learning algorithm NORMA from scalar-valued to operator-valued setting. The truncation is currently not implemented. Attributes ---------- coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s) in kernel space linop_ : callable Callable which associate to the training points X the Gram matrix (the Gram matrix being a LinearOperator) A_ : array, shape = [n_targets, n_targets] Set when Linear operator used by the decomposable kernel is default or None. T_ : integer Total number of iterations n_ : integer Total number of datapoints p_ : integer Dimensionality of the outputs References ---------- * Audiffren, Julien, and <NAME>. "Online learning with multiple operator-valued kernels." arXiv preprint arXiv:1311.0222 (2013). * Kivinen, Jyrki, <NAME>, and <NAME>. "Online learning with kernels." IEEE transactions on signal processing 52.8 (2004): 2165-2176. See also -------- sklearn.Ridge Linear ridge regression. sklearn.KernelRidge Kernel ridge regression. sklearn.SVR Support Vector Regression implemented using libsvm. Examples -------- >>> import operalib as ovk >>> import numpy as np >>> n_samples, n_features, n_targets = 10, 5, 5 >>> rng = np.random.RandomState(0) >>> y = rng.randn(n_samples, n_targets) >>> X = rng.randn(n_samples, n_features) >>> clf = ovk.ONORMA('DGauss', lbda=1.) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS ONORMA(A=None, T=None, eta=1.0, gamma=None, kernel='DGauss', lbda=1.0, learning_rate='invscaling', mu=0.2, power=0.5, random_state=0, shuffle=True, truncation=None) Initialize ONORMA. Parameters ---------- kernel : {string, callable}, default='DGauss' Kernel mapping used internally. A callable should accept two arguments, and should return a LinearOperator. lbda : {float}, default=1e-5 Small positive values of lbda improve the conditioning of the problem and reduce the variance of the estimates. Lbda corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. T : {integer}, default=None Number of iterations. A : {LinearOperator, array-like, sparse matrix}, default=None Linear operator used by the decomposable kernel. If default is None, wich is set to identity matrix of size y.shape[1] when fitting. mu : {array, LinearOperator}, shape = [n_targets, n_targets] Tradeoff between shared and independant components in the Dot Product kernel. learning_rate : {Callable} Learning rate, a function that return the step size at given step truncation : learning_rate : {Callable} TODO gamma : {float}, default=None. Gamma parameter for the Decomposable Gaussian kernel. Ignored by other kernels. # check on self.kernel is performed in method __get_kernel # if self.A < 0: # Check whether A is S PD would be really expensive # raise ValueError('A must be a symmetric positive operator') # When adding a new kernel, update this table and the _get_kernel_map # method # 1) check string and assign the right parameters # 2) Uses lookup table to select the right kernel from string # 1) check string and assign the right parameters Predict using ONORMA model. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Samples. Returns ------- C : {array}, shape = [n_samples] or [n_samples, n_targets] Returns predicted values. Partial fit of ONORMA model. This method is usefull for online learning for instance. Must call Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data. y : {array-like}, shape = [n_samples] or [n_samples, n_targets] Target values. Returns ------- self : returns an instance of self. # Reshape if self.coefs_ has not been preallocated # Update weights # Update seen data # prepare next step Fit ONORMA model. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data. y : {array-like}, shape = [n_samples] or [n_samples, n_targets] Target values. Returns ------- self : returns an instance of self. | 2.340847 | 2 |
draalcore/test_apps/admin/tests/test_admin.py | jojanper/draalcore | 1 | 6616490 | <reponame>jojanper/draalcore<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# System imports
import logging
# Project imports
from draalcore.test_utils.basetest import BaseTestUser
from draalcore.test_apps.test_models.tests.utils.mixins import TestModelMixin
logger = logging.getLogger(__name__)
class AdminAppTestCase(TestModelMixin, BaseTestUser):
"""Admin app tests"""
APP = 'admin'
def test_unsupported_action(self):
"""Actions for unsupported applications are queried"""
# GIVEN app that does not have application level actions
app = 'dummy'
# WHEN quering the aplication level actions
response = self.api.app_actions(app)
# THEN it should fail
self.assertTrue(response.error)
def test_admin_app_actions(self):
"""Admin app actions requiring user authentication are queried"""
# GIVEN admin app
# WHEN quering the application level actions
response = self.api.app_actions(self.APP)
# THEN it should succeed
self.assertTrue(response.success)
# -----
for action, data in response.data.items():
# WHEN calling available actions
response = self.api.app_action(self.APP, action, data['method'], data=None)
# THEN it should succeed
self.assertTrue(response.success)
# AND response data is available
self.assertEqual(len(response.data), 1)
# -----
# WHEN calling action using HTTP method that is not supported
response = self.api.app_action(self.APP, action, 'GET')
# THEN it should fail
self.assertTrue(response.error)
def test_admin_app_public_actions(self):
"""Public admin actions are queried"""
# GIVEN unauthenticated user
self.logout()
# WHEN quering the application level actions
response = self.api.app_public_actions(self.APP)
# THEN it should succeed
self.assertTrue(response.success)
# AND expected action data is received
self.assertTrue('admin-public-action' in response.data)
self.assertFalse(response.data['admin-public-action']['authenticate'], False)
def test_admin_app_public_action(self):
"""Public admin action is executed"""
# GIVEN unauthenticated user
self.logout()
# WHEN executing action
kwargs = {'data': None}
response = self.api.app_public_action(self.APP, 'admin-public-action', 'post', **kwargs)
# THEN it should succeed
self.assertTrue(response.success)
# AND expected action data is received
self.assertTrue('Ok' in response.data)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# System imports
import logging
# Project imports
from draalcore.test_utils.basetest import BaseTestUser
from draalcore.test_apps.test_models.tests.utils.mixins import TestModelMixin
logger = logging.getLogger(__name__)
class AdminAppTestCase(TestModelMixin, BaseTestUser):
"""Admin app tests"""
APP = 'admin'
def test_unsupported_action(self):
"""Actions for unsupported applications are queried"""
# GIVEN app that does not have application level actions
app = 'dummy'
# WHEN quering the aplication level actions
response = self.api.app_actions(app)
# THEN it should fail
self.assertTrue(response.error)
def test_admin_app_actions(self):
"""Admin app actions requiring user authentication are queried"""
# GIVEN admin app
# WHEN quering the application level actions
response = self.api.app_actions(self.APP)
# THEN it should succeed
self.assertTrue(response.success)
# -----
for action, data in response.data.items():
# WHEN calling available actions
response = self.api.app_action(self.APP, action, data['method'], data=None)
# THEN it should succeed
self.assertTrue(response.success)
# AND response data is available
self.assertEqual(len(response.data), 1)
# -----
# WHEN calling action using HTTP method that is not supported
response = self.api.app_action(self.APP, action, 'GET')
# THEN it should fail
self.assertTrue(response.error)
def test_admin_app_public_actions(self):
"""Public admin actions are queried"""
# GIVEN unauthenticated user
self.logout()
# WHEN quering the application level actions
response = self.api.app_public_actions(self.APP)
# THEN it should succeed
self.assertTrue(response.success)
# AND expected action data is received
self.assertTrue('admin-public-action' in response.data)
self.assertFalse(response.data['admin-public-action']['authenticate'], False)
def test_admin_app_public_action(self):
"""Public admin action is executed"""
# GIVEN unauthenticated user
self.logout()
# WHEN executing action
kwargs = {'data': None}
response = self.api.app_public_action(self.APP, 'admin-public-action', 'post', **kwargs)
# THEN it should succeed
self.assertTrue(response.success)
# AND expected action data is received
self.assertTrue('Ok' in response.data) | en | 0.893069 | #!/usr/bin/env python # -*- coding: utf-8 -*- # System imports # Project imports Admin app tests Actions for unsupported applications are queried # GIVEN app that does not have application level actions # WHEN quering the aplication level actions # THEN it should fail Admin app actions requiring user authentication are queried # GIVEN admin app # WHEN quering the application level actions # THEN it should succeed # ----- # WHEN calling available actions # THEN it should succeed # AND response data is available # ----- # WHEN calling action using HTTP method that is not supported # THEN it should fail Public admin actions are queried # GIVEN unauthenticated user # WHEN quering the application level actions # THEN it should succeed # AND expected action data is received Public admin action is executed # GIVEN unauthenticated user # WHEN executing action # THEN it should succeed # AND expected action data is received | 2.427999 | 2 |
Pytorch/Sex recognize/train_pytorch.py | wu-huipeng/Deep-Learning | 31 | 6616491 | import torch
from torch.autograd import Variable
import torch.optim
import torch.utils.data as data
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import torch.nn.functional as F
import torch.nn
from torchvision import transforms as T
from PIL import Image
from torch.utils.data import Dataset,DataLoader
import os
import time
transform = T.Compose([
T.Resize(128),
T.CenterCrop(128),
T.ToTensor(),
T.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])
])
class dataset(Dataset):
def __init__(self,root,transform=None):
imgs = os.listdir(root)
self.imgs = [os.path.join(root,img) for img in imgs]
self.transform = transform
def __getitem__(self, index):
img_path = self.imgs[index]
label = 0 if 'woman' in img_path.split('/')[-1] else 1
data = Image.open(img_path)
data = self.transform(data)
return data,label
def __len__(self):
return len(self.imgs)
data = dataset('./man',transform=transform)
data = DataLoader(data,batch_size=128,shuffle=True,drop_last=True,num_workers=0)
class CNN(torch.nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(3,16,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2) #64
)
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(16,32,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2,stride=2) #32
)
self.layer3 = torch.nn.Sequential(
torch.nn.Conv2d(32,64,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(inplace=True)
)
self.layer4 = torch.nn.Sequential(
torch.nn.Conv2d(64,128,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2,stride=2) #16
)
self.fc = torch.nn.Sequential(
torch.nn.Linear(128*16*16,2048),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(2048,2048),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(2048,2)
)
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0),-1)
x = F.softmax(self.fc(x),dim=1)
return x
cnn = CNN()
optimizers = torch.optim.SGD(cnn.parameters(),lr=0.001,momentum=0.9)
losses = torch.nn.MSELoss()
cnn.train()
for i in range(25):
acc = 0
for img,label in data:
lb = OneHotEncoder(categories='auto')
lb.fit(label.reshape(-1,1))
label = lb.transform(label.reshape(-1,1)).toarray()
img = Variable(img)
label = torch.from_numpy(label).float()
label = Variable(label)
optimizers.zero_grad()
predict = cnn(img)
loss = losses(predict,label)
loss.backward()
optimizers.step()
a = torch.argmax(predict,dim=1)
b = torch.argmax(label.data,dim=1)
for k in range(len(a)):
if(a[k] == b[k]): acc += 1
print(loss)
print("accuracy:%2f"%(acc/4128))
torch.save(cnn.state_dict(),'sex.pth')
| import torch
from torch.autograd import Variable
import torch.optim
import torch.utils.data as data
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import torch.nn.functional as F
import torch.nn
from torchvision import transforms as T
from PIL import Image
from torch.utils.data import Dataset,DataLoader
import os
import time
transform = T.Compose([
T.Resize(128),
T.CenterCrop(128),
T.ToTensor(),
T.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])
])
class dataset(Dataset):
def __init__(self,root,transform=None):
imgs = os.listdir(root)
self.imgs = [os.path.join(root,img) for img in imgs]
self.transform = transform
def __getitem__(self, index):
img_path = self.imgs[index]
label = 0 if 'woman' in img_path.split('/')[-1] else 1
data = Image.open(img_path)
data = self.transform(data)
return data,label
def __len__(self):
return len(self.imgs)
data = dataset('./man',transform=transform)
data = DataLoader(data,batch_size=128,shuffle=True,drop_last=True,num_workers=0)
class CNN(torch.nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.layer1 = torch.nn.Sequential(
torch.nn.Conv2d(3,16,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(16),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2) #64
)
self.layer2 = torch.nn.Sequential(
torch.nn.Conv2d(16,32,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2,stride=2) #32
)
self.layer3 = torch.nn.Sequential(
torch.nn.Conv2d(32,64,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(inplace=True)
)
self.layer4 = torch.nn.Sequential(
torch.nn.Conv2d(64,128,kernel_size=3,stride=1,padding=1),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2,stride=2) #16
)
self.fc = torch.nn.Sequential(
torch.nn.Linear(128*16*16,2048),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(2048,2048),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(2048,2)
)
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0),-1)
x = F.softmax(self.fc(x),dim=1)
return x
cnn = CNN()
optimizers = torch.optim.SGD(cnn.parameters(),lr=0.001,momentum=0.9)
losses = torch.nn.MSELoss()
cnn.train()
for i in range(25):
acc = 0
for img,label in data:
lb = OneHotEncoder(categories='auto')
lb.fit(label.reshape(-1,1))
label = lb.transform(label.reshape(-1,1)).toarray()
img = Variable(img)
label = torch.from_numpy(label).float()
label = Variable(label)
optimizers.zero_grad()
predict = cnn(img)
loss = losses(predict,label)
loss.backward()
optimizers.step()
a = torch.argmax(predict,dim=1)
b = torch.argmax(label.data,dim=1)
for k in range(len(a)):
if(a[k] == b[k]): acc += 1
print(loss)
print("accuracy:%2f"%(acc/4128))
torch.save(cnn.state_dict(),'sex.pth')
| en | 0.431675 | #64 #32 #16 | 2.663641 | 3 |
coop/guide/migrations/0012_merge_20170126_1203.py | jalibras/coop | 1 | 6616492 | <reponame>jalibras/coop
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-26 12:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('guide', '0010_auto_20170120_2344'),
('guide', '0011_auto_20170126_1023'),
]
operations = [
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-26 12:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('guide', '0010_auto_20170120_2344'),
('guide', '0011_auto_20170126_1023'),
]
operations = [
] | en | 0.758919 | # -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-26 12:03 | 1.376877 | 1 |
lambda_check/lambda_function.py | rafty/CheckDailyBackup | 0 | 6616493 | <reponame>rafty/CheckDailyBackup<gh_stars>0
# -*- coding: utf-8 -*-
import os
import datetime
import logging
import boto3
from aws_ec2 import EC2
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SNS_TOPIC_ARN = os.environ['SNS_TOPIC_ARN']
sns = boto3.client('sns')
def backup_alarm(instance_id):
message = 'SSM Backup Error.\nInstanceId: {}'.format(instance_id)
sns.publish(
TopicArn=SNS_TOPIC_ARN,
Message=message,
Subject='Alert! SSM Daily Backup Error!'
)
logger.error('SSM Backup Error. InstanceId: {}'.format(instance_id))
def backup_time_range():
time_range_hour = 4
now = datetime.datetime.utcnow()
now_str = now.strftime("%Y-%m-%dT%H.%M.%S")
before = now - datetime.timedelta(hours=time_range_hour)
before_str = before.strftime("%Y-%m-%dT%H.%M.%S")
return before_str, now_str
def backup_time(image_name):
date_time_list = image_name.split('_')[-2:]
date_time = 'T'.join(date_time_list)
return date_time
def is_backup_executed(instance_id, backup_image_list):
before, now = backup_time_range()
images_of_instance = [name for name in backup_image_list if instance_id in name]
logger.info('Images of instance: {}'.format(images_of_instance))
result = False
for image_name in images_of_instance:
_backup_time = backup_time(image_name)
logger.info('if {} <= {} <= {}'.format(before, _backup_time, now))
if before <= _backup_time <= now:
result = True
return result
def check_backup(instance_id_list, backup_image_list):
for instance_id in instance_id_list:
result = is_backup_executed(instance_id, backup_image_list)
if result is False:
backup_alarm(instance_id)
logger.info('result: {}'.format(result))
def lambda_handler(event, context):
ec2 = EC2()
instance_id_list = ec2.describe_instances_with_tag()
logger.info('instance_id_list: {}, '.format(instance_id_list))
backup_image_list = ec2.describe_images()
logger.info('instance_image_list: {}, '.format(backup_image_list))
check_backup(instance_id_list, backup_image_list)
return True
| # -*- coding: utf-8 -*-
import os
import datetime
import logging
import boto3
from aws_ec2 import EC2
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SNS_TOPIC_ARN = os.environ['SNS_TOPIC_ARN']
sns = boto3.client('sns')
def backup_alarm(instance_id):
message = 'SSM Backup Error.\nInstanceId: {}'.format(instance_id)
sns.publish(
TopicArn=SNS_TOPIC_ARN,
Message=message,
Subject='Alert! SSM Daily Backup Error!'
)
logger.error('SSM Backup Error. InstanceId: {}'.format(instance_id))
def backup_time_range():
time_range_hour = 4
now = datetime.datetime.utcnow()
now_str = now.strftime("%Y-%m-%dT%H.%M.%S")
before = now - datetime.timedelta(hours=time_range_hour)
before_str = before.strftime("%Y-%m-%dT%H.%M.%S")
return before_str, now_str
def backup_time(image_name):
date_time_list = image_name.split('_')[-2:]
date_time = 'T'.join(date_time_list)
return date_time
def is_backup_executed(instance_id, backup_image_list):
before, now = backup_time_range()
images_of_instance = [name for name in backup_image_list if instance_id in name]
logger.info('Images of instance: {}'.format(images_of_instance))
result = False
for image_name in images_of_instance:
_backup_time = backup_time(image_name)
logger.info('if {} <= {} <= {}'.format(before, _backup_time, now))
if before <= _backup_time <= now:
result = True
return result
def check_backup(instance_id_list, backup_image_list):
for instance_id in instance_id_list:
result = is_backup_executed(instance_id, backup_image_list)
if result is False:
backup_alarm(instance_id)
logger.info('result: {}'.format(result))
def lambda_handler(event, context):
ec2 = EC2()
instance_id_list = ec2.describe_instances_with_tag()
logger.info('instance_id_list: {}, '.format(instance_id_list))
backup_image_list = ec2.describe_images()
logger.info('instance_image_list: {}, '.format(backup_image_list))
check_backup(instance_id_list, backup_image_list)
return True | en | 0.769321 | # -*- coding: utf-8 -*- | 2.403332 | 2 |
lib/layers/functions/prior_box.py | keyongyu/ssds.pytorch | 1 | 6616494 | from __future__ import division
import torch
from math import sqrt as sqrt
from math import ceil as ceil
from itertools import product as product
class PriorBox(object):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
def __init__(self, image_size, feature_maps, aspect_ratios, scale, archor_stride=None, archor_offest=None, clip=True):
super(PriorBox, self).__init__()
self.image_size = image_size #[height, width]
self.feature_maps = feature_maps #[(height, width), ...]
self.aspect_ratios = aspect_ratios
# number of priors for feature map location (either 4 or 6)
self.num_priors = len(aspect_ratios)
self.clip = clip
# scale value
if isinstance(scale[0], list):
# get max of the result
self.scales = [max(s[0] / self.image_size[0], s[1] / self.image_size[1]) for s in scale]
elif isinstance(scale[0], float) and len(scale) == 2:
num_layers = len(feature_maps)
min_scale, max_scale = scale
self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) for i in range(num_layers)] + [1.0]
else: #[0.025,0.08, 0.16, 0.32, 0.6]
self.scales = scale
#if archor_stride:
# self.steps = [(steps[0] / self.image_size[0], steps[1] / self.image_size[1]) for steps in archor_stride]
#else:
if False:
print("<<<<<<<<<<auto steps>>>>>>>>>>>>>>>>>>")
self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps]
print(self.steps)
print("<<<<<<<<<<auto steps>>>>>>>>>>>>>>>>>>")
if archor_offest:
self.offset = [[offset[0] / self.image_size[0], offset[1] * self.image_size[1]] for offset in archor_offest]
else:
self.offset = [[steps[0] * 0.5, steps[1] * 0.5] for steps in self.steps]
else:
#self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps[0:1] ] + \
# [(2/f_h, 2/f_w) for f_h, f_w in feature_maps[0:-1] ]
num_feature_layers= len(feature_maps)
self.steps = [(16*(2**i)/image_size[0], 16*(2**i)/image_size[1]) for i in range(num_feature_layers) ]
self.offset = [[steps[0] * 0.5, steps[1] * 0.5] for steps in self.steps]
#for f_h0, f_w0 in self.feature_maps[0:-1], self.feature_maps[1:])):
# self.steps.append( (2.0/f_h0, 2.0/f_w0))
#self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps]
def get_anchor_number(aspect_ratios ):
anchor_number_list=[]
for k, aspect_ratio in enumerate(aspect_ratios):
num_anch =1
for ar in aspect_ratio:
ar_sqrt = sqrt(ar)
anchor_w = ar_sqrt
anchor_h = 1.0/ar_sqrt
if 0.333 < ar < 3.0: # 0.6:1.8
num_anch+=1
else if ar <=0.333:
num_anch+= ceil(1.0/anchor_w)
else: #ar >=3.0
num_anch+= ceil(1.0/anchor_h)
anchor_number_list.append(num_anch)
return anchor_number_list
def forward(self):
mean = []
aspect=self.image_size[1]/self.image_size[0] # w/h
#aspect=1.0
# l = 0
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f[0]), range(f[1])):
cx = j * self.steps[k][1] + self.offset[k][1]
cy = i * self.steps[k][0] + self.offset[k][0]
s_k = self.scales[k]
s_k_y = s_k*aspect
# rest of aspect ratios
for ar in self.aspect_ratios[k]:
ar_sqrt = sqrt(ar)
anchor_w = s_k*ar_sqrt
#anchor_h = s_k * aspect/ar_sqrt
anchor_h = s_k_y / ar_sqrt
if 3.0 > ar > 0.333:
mean += [cx, cy, anchor_w, anchor_h]
else:
if ar <= 0.333:
x1=cx-s_k*0.5
x2=cx+s_k*0.5
while x1 + anchor_w <=x2:
mean += [x1+anchor_w*0.5, cy, anchor_w, anchor_h]
x1 = x1+anchor_w
if x1 < x2 and x1+ anchor_w > x2:
mean += [x2-anchor_w*0.5, cy, anchor_w, anchor_h]
else: #ar >= 3.0
y1=cy - s_k_y*0.5
y2=cy + s_k_y*0.5
while y1 + anchor_h <= y2:
mean += [cx, y1 + anchor_h*0.5, anchor_w, anchor_h]
y1 = y1 + anchor_h
if y1 < y2 and y1 + anchor_h > y2:
mean += [cx, y2 - anchor_h*0.5, anchor_w, anchor_h]
# s_k_prime = sqrt(s_k * self.scales[k + 1])
# for ar in self.aspect_ratios[k]:
# ar_sqrt = sqrt(ar)
# archor_w = s_k_prime*ar_sqrt
# if ar > 0.333:
# mean += [cx, cy, archor_w, s_k_prime*aspect/ar_sqrt]
# else:
# x1=cx-s_k_prime*0.5
# x2=cx+s_k_prime*0.5
# while x1 + archor_w <=x2:
# mean += [x1+archor_w*0.5, cy, archor_w, s_k_prime*aspect/ar_sqrt]
# x1 = x1+archor_w
#
# if x1 < x2 and x1+ archor_w >x2:
# mean += [x2-archor_w*0.5, cy, archor_w, s_k_prime*aspect/ar_sqrt]
s_k_prime = sqrt(s_k * self.scales[k + 1])
mean += [cx, cy, s_k_prime, s_k_prime*aspect]
# if isinstance(ar, int):
# if ar == 1:
# # aspect_ratio: 1 Min size
# mean += [cx, cy, s_k, s_k]
#
# # aspect_ratio: 1 Max size
# # rel size: sqrt(s_k * s_(k+1))
# s_k_prime = sqrt(s_k * self.scales[k+1])
# mean += [cx, cy, s_k_prime, s_k_prime]
# else:
# ar_sqrt = sqrt(ar)
# mean += [cx, cy, s_k*ar_sqrt, s_k/ar_sqrt]
# mean += [cx, cy, s_k/ar_sqrt, s_k*ar_sqrt]
# elif isinstance(ar, list):
# mean += [cx, cy, s_k*ar[0], s_k*ar[1]]
# print(f, self.aspect_ratios[k])
# assert False
# back to torch land
output = torch.Tensor(mean).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0)
return output
| from __future__ import division
import torch
from math import sqrt as sqrt
from math import ceil as ceil
from itertools import product as product
class PriorBox(object):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
def __init__(self, image_size, feature_maps, aspect_ratios, scale, archor_stride=None, archor_offest=None, clip=True):
super(PriorBox, self).__init__()
self.image_size = image_size #[height, width]
self.feature_maps = feature_maps #[(height, width), ...]
self.aspect_ratios = aspect_ratios
# number of priors for feature map location (either 4 or 6)
self.num_priors = len(aspect_ratios)
self.clip = clip
# scale value
if isinstance(scale[0], list):
# get max of the result
self.scales = [max(s[0] / self.image_size[0], s[1] / self.image_size[1]) for s in scale]
elif isinstance(scale[0], float) and len(scale) == 2:
num_layers = len(feature_maps)
min_scale, max_scale = scale
self.scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) for i in range(num_layers)] + [1.0]
else: #[0.025,0.08, 0.16, 0.32, 0.6]
self.scales = scale
#if archor_stride:
# self.steps = [(steps[0] / self.image_size[0], steps[1] / self.image_size[1]) for steps in archor_stride]
#else:
if False:
print("<<<<<<<<<<auto steps>>>>>>>>>>>>>>>>>>")
self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps]
print(self.steps)
print("<<<<<<<<<<auto steps>>>>>>>>>>>>>>>>>>")
if archor_offest:
self.offset = [[offset[0] / self.image_size[0], offset[1] * self.image_size[1]] for offset in archor_offest]
else:
self.offset = [[steps[0] * 0.5, steps[1] * 0.5] for steps in self.steps]
else:
#self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps[0:1] ] + \
# [(2/f_h, 2/f_w) for f_h, f_w in feature_maps[0:-1] ]
num_feature_layers= len(feature_maps)
self.steps = [(16*(2**i)/image_size[0], 16*(2**i)/image_size[1]) for i in range(num_feature_layers) ]
self.offset = [[steps[0] * 0.5, steps[1] * 0.5] for steps in self.steps]
#for f_h0, f_w0 in self.feature_maps[0:-1], self.feature_maps[1:])):
# self.steps.append( (2.0/f_h0, 2.0/f_w0))
#self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps]
def get_anchor_number(aspect_ratios ):
anchor_number_list=[]
for k, aspect_ratio in enumerate(aspect_ratios):
num_anch =1
for ar in aspect_ratio:
ar_sqrt = sqrt(ar)
anchor_w = ar_sqrt
anchor_h = 1.0/ar_sqrt
if 0.333 < ar < 3.0: # 0.6:1.8
num_anch+=1
else if ar <=0.333:
num_anch+= ceil(1.0/anchor_w)
else: #ar >=3.0
num_anch+= ceil(1.0/anchor_h)
anchor_number_list.append(num_anch)
return anchor_number_list
def forward(self):
mean = []
aspect=self.image_size[1]/self.image_size[0] # w/h
#aspect=1.0
# l = 0
for k, f in enumerate(self.feature_maps):
for i, j in product(range(f[0]), range(f[1])):
cx = j * self.steps[k][1] + self.offset[k][1]
cy = i * self.steps[k][0] + self.offset[k][0]
s_k = self.scales[k]
s_k_y = s_k*aspect
# rest of aspect ratios
for ar in self.aspect_ratios[k]:
ar_sqrt = sqrt(ar)
anchor_w = s_k*ar_sqrt
#anchor_h = s_k * aspect/ar_sqrt
anchor_h = s_k_y / ar_sqrt
if 3.0 > ar > 0.333:
mean += [cx, cy, anchor_w, anchor_h]
else:
if ar <= 0.333:
x1=cx-s_k*0.5
x2=cx+s_k*0.5
while x1 + anchor_w <=x2:
mean += [x1+anchor_w*0.5, cy, anchor_w, anchor_h]
x1 = x1+anchor_w
if x1 < x2 and x1+ anchor_w > x2:
mean += [x2-anchor_w*0.5, cy, anchor_w, anchor_h]
else: #ar >= 3.0
y1=cy - s_k_y*0.5
y2=cy + s_k_y*0.5
while y1 + anchor_h <= y2:
mean += [cx, y1 + anchor_h*0.5, anchor_w, anchor_h]
y1 = y1 + anchor_h
if y1 < y2 and y1 + anchor_h > y2:
mean += [cx, y2 - anchor_h*0.5, anchor_w, anchor_h]
# s_k_prime = sqrt(s_k * self.scales[k + 1])
# for ar in self.aspect_ratios[k]:
# ar_sqrt = sqrt(ar)
# archor_w = s_k_prime*ar_sqrt
# if ar > 0.333:
# mean += [cx, cy, archor_w, s_k_prime*aspect/ar_sqrt]
# else:
# x1=cx-s_k_prime*0.5
# x2=cx+s_k_prime*0.5
# while x1 + archor_w <=x2:
# mean += [x1+archor_w*0.5, cy, archor_w, s_k_prime*aspect/ar_sqrt]
# x1 = x1+archor_w
#
# if x1 < x2 and x1+ archor_w >x2:
# mean += [x2-archor_w*0.5, cy, archor_w, s_k_prime*aspect/ar_sqrt]
s_k_prime = sqrt(s_k * self.scales[k + 1])
mean += [cx, cy, s_k_prime, s_k_prime*aspect]
# if isinstance(ar, int):
# if ar == 1:
# # aspect_ratio: 1 Min size
# mean += [cx, cy, s_k, s_k]
#
# # aspect_ratio: 1 Max size
# # rel size: sqrt(s_k * s_(k+1))
# s_k_prime = sqrt(s_k * self.scales[k+1])
# mean += [cx, cy, s_k_prime, s_k_prime]
# else:
# ar_sqrt = sqrt(ar)
# mean += [cx, cy, s_k*ar_sqrt, s_k/ar_sqrt]
# mean += [cx, cy, s_k/ar_sqrt, s_k*ar_sqrt]
# elif isinstance(ar, list):
# mean += [cx, cy, s_k*ar[0], s_k*ar[1]]
# print(f, self.aspect_ratios[k])
# assert False
# back to torch land
output = torch.Tensor(mean).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0)
return output
| en | 0.542852 | Compute priorbox coordinates in center-offset form for each source feature map. #[height, width] #[(height, width), ...] # number of priors for feature map location (either 4 or 6) # scale value # get max of the result #[0.025,0.08, 0.16, 0.32, 0.6] #if archor_stride: # self.steps = [(steps[0] / self.image_size[0], steps[1] / self.image_size[1]) for steps in archor_stride] #else: #self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps[0:1] ] + \ # [(2/f_h, 2/f_w) for f_h, f_w in feature_maps[0:-1] ] #for f_h0, f_w0 in self.feature_maps[0:-1], self.feature_maps[1:])): # self.steps.append( (2.0/f_h0, 2.0/f_w0)) #self.steps = [(1/f_h, 1/f_w) for f_h, f_w in feature_maps] # 0.6:1.8 #ar >=3.0 # w/h #aspect=1.0 # l = 0 # rest of aspect ratios #anchor_h = s_k * aspect/ar_sqrt #ar >= 3.0 # s_k_prime = sqrt(s_k * self.scales[k + 1]) # for ar in self.aspect_ratios[k]: # ar_sqrt = sqrt(ar) # archor_w = s_k_prime*ar_sqrt # if ar > 0.333: # mean += [cx, cy, archor_w, s_k_prime*aspect/ar_sqrt] # else: # x1=cx-s_k_prime*0.5 # x2=cx+s_k_prime*0.5 # while x1 + archor_w <=x2: # mean += [x1+archor_w*0.5, cy, archor_w, s_k_prime*aspect/ar_sqrt] # x1 = x1+archor_w # # if x1 < x2 and x1+ archor_w >x2: # mean += [x2-archor_w*0.5, cy, archor_w, s_k_prime*aspect/ar_sqrt] # if isinstance(ar, int): # if ar == 1: # # aspect_ratio: 1 Min size # mean += [cx, cy, s_k, s_k] # # # aspect_ratio: 1 Max size # # rel size: sqrt(s_k * s_(k+1)) # s_k_prime = sqrt(s_k * self.scales[k+1]) # mean += [cx, cy, s_k_prime, s_k_prime] # else: # ar_sqrt = sqrt(ar) # mean += [cx, cy, s_k*ar_sqrt, s_k/ar_sqrt] # mean += [cx, cy, s_k/ar_sqrt, s_k*ar_sqrt] # elif isinstance(ar, list): # mean += [cx, cy, s_k*ar[0], s_k*ar[1]] # print(f, self.aspect_ratios[k]) # assert False # back to torch land | 2.233468 | 2 |
src/sentry/api/serializers/models/event.py | arsh-co/sentry | 0 | 6616495 | <filename>src/sentry/api/serializers/models/event.py
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import Event
@register(Event)
class EventSerializer(Serializer):
def _get_entries(self, event, user):
# XXX(dcramer): These are called entries for future-proofing
interface_list = []
for interface in event.interfaces.itervalues():
entry = {
'data': interface.to_json(),
'type': interface.get_alias(),
}
interface_list.append((interface, entry))
interface_list.sort(key=lambda x: x[0].get_display_score(), reverse=True)
return [i[1] for i in interface_list]
def get_attrs(self, item_list, user):
Event.objects.bind_nodes(item_list, 'data')
results = {}
for item in item_list:
results[item] = {
'entries': self._get_entries(item, user)
}
return results
def serialize(self, obj, attrs, user):
d = {
'id': str(obj.id),
'eventID': str(obj.event_id),
'entries': attrs['entries'],
'message': obj.message,
'platform': obj.platform,
'dateCreated': obj.datetime,
'timeSpent': obj.time_spent,
}
return d
| <filename>src/sentry/api/serializers/models/event.py
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import Event
@register(Event)
class EventSerializer(Serializer):
def _get_entries(self, event, user):
# XXX(dcramer): These are called entries for future-proofing
interface_list = []
for interface in event.interfaces.itervalues():
entry = {
'data': interface.to_json(),
'type': interface.get_alias(),
}
interface_list.append((interface, entry))
interface_list.sort(key=lambda x: x[0].get_display_score(), reverse=True)
return [i[1] for i in interface_list]
def get_attrs(self, item_list, user):
Event.objects.bind_nodes(item_list, 'data')
results = {}
for item in item_list:
results[item] = {
'entries': self._get_entries(item, user)
}
return results
def serialize(self, obj, attrs, user):
d = {
'id': str(obj.id),
'eventID': str(obj.event_id),
'entries': attrs['entries'],
'message': obj.message,
'platform': obj.platform,
'dateCreated': obj.datetime,
'timeSpent': obj.time_spent,
}
return d
| en | 0.919592 | # XXX(dcramer): These are called entries for future-proofing | 2.230091 | 2 |
envoy.distribution.repo/envoy/distribution/repo/__init__.py | Nordix/pytooling | 1 | 6616496 |
from .abstract import ARepoBuildingRunner, ARepoManager, ReleaseConfigDict
from .exceptions import RepoError
from .runner import RepoBuildingRunner
from .cmd import cmd, main
from .deb import (
AAptly, AptlyError,
DebRepoError, DebRepoManager)
__all__ = (
"AAptly",
"AptlyError",
"ARepoBuildingRunner",
"ARepoManager",
"cmd",
"DebRepoError",
"DebRepoManager",
"main",
"ReleaseConfigDict",
"RepoBuildingRunner",
"RepoError")
|
from .abstract import ARepoBuildingRunner, ARepoManager, ReleaseConfigDict
from .exceptions import RepoError
from .runner import RepoBuildingRunner
from .cmd import cmd, main
from .deb import (
AAptly, AptlyError,
DebRepoError, DebRepoManager)
__all__ = (
"AAptly",
"AptlyError",
"ARepoBuildingRunner",
"ARepoManager",
"cmd",
"DebRepoError",
"DebRepoManager",
"main",
"ReleaseConfigDict",
"RepoBuildingRunner",
"RepoError")
| none | 1 | 1.414248 | 1 | |
Ch4_Trees_and_Graphs/09_bst_sequences.py | fatima-rizvi/CtCI-Solutions-6th-Edition | 0 | 6616497 | # A binary search tree was created by traversin through an array from left to right and inserting each element.
# Given a binary search tree with distinct elements, print all possible arrays that could have led to this tree.
from utils import Node
def bst_sequences(bst):
return bst_sequences_partial([bst])
def bst_sequences_partial(subtrees, partial = []):
if not len(subtrees):
return [partial]
sequences = []
# Use enumerate() to get the count of the current iteration and the value of the item at the current iteration
for index, subtree in enumerate(subtrees):
new_partial = partial + [subtree.value]
new_subtrees = subtrees[:index] + subtrees[index+1:]
if subtree.left:
new_subtrees.append(subtree.left)
if subtree.right:
new_subtrees.append(subtree.right)
sequences += bst_sequences_partial(new_subtrees, new_partial)
return sequences
# # Binary Search Tree
# node_f = Node(14)
# node_e = Node(6)
# node_d = Node(1)
# node_c = Node(10, None, node_f)
# node_b = Node(3, node_d, node_e)
# node_a = Node(8, node_b, node_c)
# Smaller Binary Search Tree
node_t = Node(10)
node_s = Node(3)
node_r = Node(8, node_s, node_t)
# print(bst_sequences(node_a))
print(bst_sequences(node_r))
# Expected result: [[8, 3, 10], [8, 10, 3]]
| # A binary search tree was created by traversin through an array from left to right and inserting each element.
# Given a binary search tree with distinct elements, print all possible arrays that could have led to this tree.
from utils import Node
def bst_sequences(bst):
return bst_sequences_partial([bst])
def bst_sequences_partial(subtrees, partial = []):
if not len(subtrees):
return [partial]
sequences = []
# Use enumerate() to get the count of the current iteration and the value of the item at the current iteration
for index, subtree in enumerate(subtrees):
new_partial = partial + [subtree.value]
new_subtrees = subtrees[:index] + subtrees[index+1:]
if subtree.left:
new_subtrees.append(subtree.left)
if subtree.right:
new_subtrees.append(subtree.right)
sequences += bst_sequences_partial(new_subtrees, new_partial)
return sequences
# # Binary Search Tree
# node_f = Node(14)
# node_e = Node(6)
# node_d = Node(1)
# node_c = Node(10, None, node_f)
# node_b = Node(3, node_d, node_e)
# node_a = Node(8, node_b, node_c)
# Smaller Binary Search Tree
node_t = Node(10)
node_s = Node(3)
node_r = Node(8, node_s, node_t)
# print(bst_sequences(node_a))
print(bst_sequences(node_r))
# Expected result: [[8, 3, 10], [8, 10, 3]]
| en | 0.76284 | # A binary search tree was created by traversin through an array from left to right and inserting each element. # Given a binary search tree with distinct elements, print all possible arrays that could have led to this tree. # Use enumerate() to get the count of the current iteration and the value of the item at the current iteration # # Binary Search Tree # node_f = Node(14) # node_e = Node(6) # node_d = Node(1) # node_c = Node(10, None, node_f) # node_b = Node(3, node_d, node_e) # node_a = Node(8, node_b, node_c) # Smaller Binary Search Tree # print(bst_sequences(node_a)) # Expected result: [[8, 3, 10], [8, 10, 3]] | 3.99784 | 4 |
examples/forecasts_async.py | krabchuk/yaweather | 0 | 6616498 | <gh_stars>0
import asyncio
from yaweather import China, YaWeatherAsync
async def main():
async with YaWeatherAsync(api_key='secret') as y:
res = await y.forecast(China.Beijing)
for f in res.forecasts:
day = f.parts.day_short
print(f'{f.date} | {day.temp} °C, {day.condition}')
asyncio.run(main())
| import asyncio
from yaweather import China, YaWeatherAsync
async def main():
async with YaWeatherAsync(api_key='secret') as y:
res = await y.forecast(China.Beijing)
for f in res.forecasts:
day = f.parts.day_short
print(f'{f.date} | {day.temp} °C, {day.condition}')
asyncio.run(main()) | none | 1 | 3.144871 | 3 | |
vm/nga-python/Memory.py | crcx/retroforth | 65 | 6616499 | <filename>vm/nga-python/Memory.py
import os
import struct
class Memory(list):
def __init__(self, source, initial, size):
m = [0] * size
self.extend(m)
if len(initial) == 0:
cells = int(os.path.getsize(source) / 4)
f = open(source, "rb")
i = 0
for cell in list(struct.unpack(cells * "i", f.read())):
self[i] = cell
i = i + 1
f.close()
else:
i = 0
for cell in initial:
if type(cell) == list:
for v in range(0, cell[0]):
self[i] = cell[1]
i = i + 1
else:
self[i] = cell
i = i + 1
def load_image(self, name):
cells = int(os.path.getsize(name) / 4)
f = open(name, "rb")
i = 0
for cell in list(struct.unpack(cells * "i", f.read())):
self[i] = cell
i = i + 1
f.close()
def size(self):
return len(self)
| <filename>vm/nga-python/Memory.py
import os
import struct
class Memory(list):
def __init__(self, source, initial, size):
m = [0] * size
self.extend(m)
if len(initial) == 0:
cells = int(os.path.getsize(source) / 4)
f = open(source, "rb")
i = 0
for cell in list(struct.unpack(cells * "i", f.read())):
self[i] = cell
i = i + 1
f.close()
else:
i = 0
for cell in initial:
if type(cell) == list:
for v in range(0, cell[0]):
self[i] = cell[1]
i = i + 1
else:
self[i] = cell
i = i + 1
def load_image(self, name):
cells = int(os.path.getsize(name) / 4)
f = open(name, "rb")
i = 0
for cell in list(struct.unpack(cells * "i", f.read())):
self[i] = cell
i = i + 1
f.close()
def size(self):
return len(self)
| none | 1 | 2.82479 | 3 | |
singUpSystem/logout.py | xodbox/prediccionf1 | 0 | 6616500 | <filename>singUpSystem/logout.py<gh_stars>0
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
""" Log out de la cuenta. """
self.response.headers.add_header('Set-Cookie', 'userId="", Path=/')
redirectTo = '/'
self.redirect(redirectTo)
app = webapp2.WSGIApplication([ ('/logout', MainPage)],
debug=True)
| <filename>singUpSystem/logout.py<gh_stars>0
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
""" Log out de la cuenta. """
self.response.headers.add_header('Set-Cookie', 'userId="", Path=/')
redirectTo = '/'
self.redirect(redirectTo)
app = webapp2.WSGIApplication([ ('/logout', MainPage)],
debug=True)
| es | 0.845423 | Log out de la cuenta. | 2.436819 | 2 |
tests/unittests/test_OIMNSFScience.py | jthiltges/gracc-request | 0 | 6616501 | <gh_stars>0
import unittest
import os
from graccreq.oim import nsfscience
class TestOIMNSFScience(unittest.TestCase):
def test_minimal(self):
# Get the current file path of this file
curdir = os.path.dirname(os.path.abspath(__file__))
test_csv = os.path.join(curdir, "../", "mapping-table-test.csv")
science = nsfscience.NSFScience(url="file://" + test_csv)
stuff = science.parseDoc({'OIM_FieldOfScience': 'Medical Imaging'})
self.assertEqual(stuff['OIM_NSFFieldOfScience'], 'Health')
stuff = science.parseDoc({'OIM_FieldOfScience': 'Evolutionary Sciences'})
self.assertEqual(stuff['OIM_NSFFieldOfScience'], 'Biological Sciences')
# Test when the field of science does not exist
stuff = science.parseDoc({'OIM_FieldOfScience': 'Does Not exist'})
self.assertEqual(len(stuff), 0)
# When the record doesn't have a message
stuff = science.parseDoc({})
self.assertEqual(len(stuff), 0)
return True
if __name__ == '__main__':
unittest.main()
| import unittest
import os
from graccreq.oim import nsfscience
class TestOIMNSFScience(unittest.TestCase):
def test_minimal(self):
# Get the current file path of this file
curdir = os.path.dirname(os.path.abspath(__file__))
test_csv = os.path.join(curdir, "../", "mapping-table-test.csv")
science = nsfscience.NSFScience(url="file://" + test_csv)
stuff = science.parseDoc({'OIM_FieldOfScience': 'Medical Imaging'})
self.assertEqual(stuff['OIM_NSFFieldOfScience'], 'Health')
stuff = science.parseDoc({'OIM_FieldOfScience': 'Evolutionary Sciences'})
self.assertEqual(stuff['OIM_NSFFieldOfScience'], 'Biological Sciences')
# Test when the field of science does not exist
stuff = science.parseDoc({'OIM_FieldOfScience': 'Does Not exist'})
self.assertEqual(len(stuff), 0)
# When the record doesn't have a message
stuff = science.parseDoc({})
self.assertEqual(len(stuff), 0)
return True
if __name__ == '__main__':
unittest.main() | en | 0.925688 | # Get the current file path of this file # Test when the field of science does not exist # When the record doesn't have a message | 3.021078 | 3 |
cornflow-dags/DAG/graph_coloring/__init__.py | ggsdc/corn | 2 | 6616502 | from cornflow_client import (
get_empty_schema,
ApplicationCore,
)
from typing import List, Dict
import pytups as pt
import os
from .solvers import OrToolsCP
from .core import Instance, Solution
class GraphColoring(ApplicationCore):
name = "graph_coloring"
instance = Instance
solution = Solution
solvers = dict(default=OrToolsCP)
schema = get_empty_schema(
properties=dict(timeLimit=dict(type="number")), solvers=list(solvers.keys())
)
@property
def test_cases(self) -> List[Dict]:
def read_file(filePath):
with open(filePath, "r") as f:
contents = f.read().splitlines()
pairs = (
pt.TupList(contents[1:])
.vapply(lambda v: v.split(" "))
.vapply(lambda v: dict(n1=int(v[0]), n2=int(v[1])))
)
return dict(pairs=pairs)
file_dir = os.path.join(os.path.dirname(__file__), "data")
files = os.listdir(file_dir)
test_files = pt.TupList(files).vfilter(lambda v: v.startswith("gc_"))
return [read_file(os.path.join(file_dir, fileName)) for fileName in test_files]
| from cornflow_client import (
get_empty_schema,
ApplicationCore,
)
from typing import List, Dict
import pytups as pt
import os
from .solvers import OrToolsCP
from .core import Instance, Solution
class GraphColoring(ApplicationCore):
name = "graph_coloring"
instance = Instance
solution = Solution
solvers = dict(default=OrToolsCP)
schema = get_empty_schema(
properties=dict(timeLimit=dict(type="number")), solvers=list(solvers.keys())
)
@property
def test_cases(self) -> List[Dict]:
def read_file(filePath):
with open(filePath, "r") as f:
contents = f.read().splitlines()
pairs = (
pt.TupList(contents[1:])
.vapply(lambda v: v.split(" "))
.vapply(lambda v: dict(n1=int(v[0]), n2=int(v[1])))
)
return dict(pairs=pairs)
file_dir = os.path.join(os.path.dirname(__file__), "data")
files = os.listdir(file_dir)
test_files = pt.TupList(files).vfilter(lambda v: v.startswith("gc_"))
return [read_file(os.path.join(file_dir, fileName)) for fileName in test_files]
| none | 1 | 2.266027 | 2 | |
processing_scripts/repack_arxiv.py | gunjan-bhattarai/the-pile | 380 | 6616503 | <reponame>gunjan-bhattarai/the-pile
import lm_dataformat as lmd
import os
import hashlib
import re
from tqdm import tqdm
def sha256str(s):
h = hashlib.sha256()
h.update(s)
return h.hexdigest()
def stableorder(x):
arr = [(elem, sha256str(elem.encode('utf-8'))) for elem in x]
arr.sort(key=lambda x: x[1])
return [elem for elem,_ in arr]
def ls(x):
return [x + '/' + fn for fn in stableorder(os.listdir(x))]
def fread(fname):
with open(fname) as fh:
return fh.read()
def strip_markdown_colons(x):
return re.sub(r'^:::.*?\n', '', x, flags=re.MULTILINE)
def compose(*fs):
def _f(x):
for f in reversed(fs):
x = f(x)
return x
return _f
ar = lmd.Archive('arxiv_lmd')
for doc in map(compose(strip_markdown_colons, fread), tqdm(ls('documents'))):
ar.add_data(doc)
ar.commit() | import lm_dataformat as lmd
import os
import hashlib
import re
from tqdm import tqdm
def sha256str(s):
h = hashlib.sha256()
h.update(s)
return h.hexdigest()
def stableorder(x):
arr = [(elem, sha256str(elem.encode('utf-8'))) for elem in x]
arr.sort(key=lambda x: x[1])
return [elem for elem,_ in arr]
def ls(x):
return [x + '/' + fn for fn in stableorder(os.listdir(x))]
def fread(fname):
with open(fname) as fh:
return fh.read()
def strip_markdown_colons(x):
return re.sub(r'^:::.*?\n', '', x, flags=re.MULTILINE)
def compose(*fs):
def _f(x):
for f in reversed(fs):
x = f(x)
return x
return _f
ar = lmd.Archive('arxiv_lmd')
for doc in map(compose(strip_markdown_colons, fread), tqdm(ls('documents'))):
ar.add_data(doc)
ar.commit() | none | 1 | 2.314505 | 2 | |
moff/node/template/collection_node.py | Tikubonn/moff | 0 | 6616504 |
from .node import Node
from moff.node.whitespace_node import WhitespaceNode
import copy
class CollectionNode (Node):
def __init__(self, nodes=list()):
self.__nodes = list(nodes)
def add_node(self, node):
self.__nodes.append(node)
def get_nodes(self):
return copy.copy(self.__nodes)
# override
def write(self, stream):
for node in self.get_nodes():
node.write(stream)
def trim(self):
start = None
for index, node in enumerate(self.__nodes):
if isinstance(node, WhitespaceNode):
start = index + 1
else:
break
end = None
for index, node in reversed(list(enumerate(self.__nodes))):
if isinstance(node, WhitespaceNode):
end = index
else:
break
self.__nodes = self.__nodes[start: end]
return self
|
from .node import Node
from moff.node.whitespace_node import WhitespaceNode
import copy
class CollectionNode (Node):
def __init__(self, nodes=list()):
self.__nodes = list(nodes)
def add_node(self, node):
self.__nodes.append(node)
def get_nodes(self):
return copy.copy(self.__nodes)
# override
def write(self, stream):
for node in self.get_nodes():
node.write(stream)
def trim(self):
start = None
for index, node in enumerate(self.__nodes):
if isinstance(node, WhitespaceNode):
start = index + 1
else:
break
end = None
for index, node in reversed(list(enumerate(self.__nodes))):
if isinstance(node, WhitespaceNode):
end = index
else:
break
self.__nodes = self.__nodes[start: end]
return self
| en | 0.650388 | # override | 2.848866 | 3 |
duqo/stoch/get_margs.py | canbooo/pyRDO | 11 | 6616505 | # -*- coding: utf-8 -*-
"""
Created on Tue May 12 21:30:39 2016
WARNING: VERY OLD CODE BUT SEEMS TO WORK SO FAR
@author: Bogoclu
"""
from scipy import stats
import numpy as np
def sp_margs(dist):
"""
Creates a list of scipy distribution objects from a list
of strings, corresponding to the distribution names in
scipy, vector of Means and vector of Standard Deviations.
Inputs
------
distribution:
a Distribution object as defined in pyRDO.uncertainy.model
Returns
-------
oMargs:list
List of scipy frozen_distribution
"""
if dist.name.lower() == 'exponential' or dist.name.lower() == 'expon':
return stats.expon(dist.mean - dist.std, dist.std)
if dist.name.lower() == 'gumbel':
scale = dist.std * np.sqrt(6) / np.pi
loc = dist.mean - scale * np.euler_gamma
return stats.gumbel_r(loc, scale)
if dist.name.lower() == 'lognormal' or dist.name.lower() == 'lognorm':
sigma = np.sqrt(np.log((dist.std / dist.mean) ** 2 + 1))
logmean = np.log(dist.mean / np.sqrt((dist.std / dist.mean) ** 2 + 1))
return stats.lognorm(sigma, 0, np.exp(logmean))
if dist.name.lower() == 'normal' or dist.name.lower() == 'norm':
return stats.norm(dist.mean, dist.std)
if dist.name.lower() == 'uniform':
args = (dist.lower_bound, dist.upper_bound - dist.lower_bound)
return stats.uniform(*args)
if dist.name.lower() == 'triangular':
if not dist.params:
consta = 0.5
scale = np.sqrt(18 * (dist.std ** 2) / (consta ** 2 - consta + 1))
loc = dist.mean - (consta + 1) * scale / 3
else:
mean_tmp = dist.mean
mid_point = dist.params[0]
aux_var = (-18 * (dist.std ** 2) + mid_point * (2 * mid_point - 3 * mean_tmp))
aux_var = (9 * (mean_tmp ** 2) - 6 * mid_point * mean_tmp +
(mid_point ** 2) + aux_var) / 3
aux_var = np.sqrt((9 * mean_tmp ** 2 - 6 * mid_point * mean_tmp
+ mid_point ** 2) / 4 - aux_var)
loc = (3 * mean_tmp - mid_point) / 2 + aux_var
scale = 3 * mean_tmp - 2 * loc - mid_point
if scale < 0:
loc = (3 * mean_tmp - mid_point) / 2 - aux_var
scale = 3 * mean_tmp - 2 * loc - mid_point
consta = (mid_point - loc) / scale
return stats.triang(consta, loc, scale)
if dist.name.lower() == 'truncnormal' or dist.name.lower() == 'truncnorm':
a = (dist.lower_bound - dist.mean) / dist.std
b = (dist.upper_bound - dist.mean) / dist.std
args = (a, b, dist.mean, dist.std)
return stats.truncnorm(*args)
if dist.name.lower() == 'bernoulli':
if not dist.params:
return stats.bernoulli(0.5)
cond = np.isfinite(dist.params[0]) and dist.params[0] > 0
if cond and dist.params[0] < 1:
return stats.bernoulli(dist.params[0])
raise ValueError("Distribution parameters are invalid for Bernoulli.")
#######################################
# FRECHET AND WEIBULL missing among others
#########################################
msg = '%s distribution is not supported yet.' % dist.name
# warnings.warn(sWarnMsg)
raise NotImplementedError(msg)
| # -*- coding: utf-8 -*-
"""
Created on Tue May 12 21:30:39 2016
WARNING: VERY OLD CODE BUT SEEMS TO WORK SO FAR
@author: Bogoclu
"""
from scipy import stats
import numpy as np
def sp_margs(dist):
"""
Creates a list of scipy distribution objects from a list
of strings, corresponding to the distribution names in
scipy, vector of Means and vector of Standard Deviations.
Inputs
------
distribution:
a Distribution object as defined in pyRDO.uncertainy.model
Returns
-------
oMargs:list
List of scipy frozen_distribution
"""
if dist.name.lower() == 'exponential' or dist.name.lower() == 'expon':
return stats.expon(dist.mean - dist.std, dist.std)
if dist.name.lower() == 'gumbel':
scale = dist.std * np.sqrt(6) / np.pi
loc = dist.mean - scale * np.euler_gamma
return stats.gumbel_r(loc, scale)
if dist.name.lower() == 'lognormal' or dist.name.lower() == 'lognorm':
sigma = np.sqrt(np.log((dist.std / dist.mean) ** 2 + 1))
logmean = np.log(dist.mean / np.sqrt((dist.std / dist.mean) ** 2 + 1))
return stats.lognorm(sigma, 0, np.exp(logmean))
if dist.name.lower() == 'normal' or dist.name.lower() == 'norm':
return stats.norm(dist.mean, dist.std)
if dist.name.lower() == 'uniform':
args = (dist.lower_bound, dist.upper_bound - dist.lower_bound)
return stats.uniform(*args)
if dist.name.lower() == 'triangular':
if not dist.params:
consta = 0.5
scale = np.sqrt(18 * (dist.std ** 2) / (consta ** 2 - consta + 1))
loc = dist.mean - (consta + 1) * scale / 3
else:
mean_tmp = dist.mean
mid_point = dist.params[0]
aux_var = (-18 * (dist.std ** 2) + mid_point * (2 * mid_point - 3 * mean_tmp))
aux_var = (9 * (mean_tmp ** 2) - 6 * mid_point * mean_tmp +
(mid_point ** 2) + aux_var) / 3
aux_var = np.sqrt((9 * mean_tmp ** 2 - 6 * mid_point * mean_tmp
+ mid_point ** 2) / 4 - aux_var)
loc = (3 * mean_tmp - mid_point) / 2 + aux_var
scale = 3 * mean_tmp - 2 * loc - mid_point
if scale < 0:
loc = (3 * mean_tmp - mid_point) / 2 - aux_var
scale = 3 * mean_tmp - 2 * loc - mid_point
consta = (mid_point - loc) / scale
return stats.triang(consta, loc, scale)
if dist.name.lower() == 'truncnormal' or dist.name.lower() == 'truncnorm':
a = (dist.lower_bound - dist.mean) / dist.std
b = (dist.upper_bound - dist.mean) / dist.std
args = (a, b, dist.mean, dist.std)
return stats.truncnorm(*args)
if dist.name.lower() == 'bernoulli':
if not dist.params:
return stats.bernoulli(0.5)
cond = np.isfinite(dist.params[0]) and dist.params[0] > 0
if cond and dist.params[0] < 1:
return stats.bernoulli(dist.params[0])
raise ValueError("Distribution parameters are invalid for Bernoulli.")
#######################################
# FRECHET AND WEIBULL missing among others
#########################################
msg = '%s distribution is not supported yet.' % dist.name
# warnings.warn(sWarnMsg)
raise NotImplementedError(msg)
| en | 0.538294 | # -*- coding: utf-8 -*- Created on Tue May 12 21:30:39 2016 WARNING: VERY OLD CODE BUT SEEMS TO WORK SO FAR @author: Bogoclu Creates a list of scipy distribution objects from a list of strings, corresponding to the distribution names in scipy, vector of Means and vector of Standard Deviations. Inputs ------ distribution: a Distribution object as defined in pyRDO.uncertainy.model Returns ------- oMargs:list List of scipy frozen_distribution ####################################### # FRECHET AND WEIBULL missing among others ######################################### # warnings.warn(sWarnMsg) | 2.866744 | 3 |
bye.py | alphaQuartz/GridWorld_game | 0 | 6616506 | print("My name is Bittu")
print("hi my name is Bachchu")
print("My name is Prasenjit")
| print("My name is Bittu")
print("hi my name is Bachchu")
print("My name is Prasenjit")
| none | 1 | 1.724738 | 2 | |
volume_provider/providers/k8s.py | bento-dbaas/volume-provider | 1 | 6616507 | <filename>volume_provider/providers/k8s.py
import jinja2
import yaml
from kubernetes.client import Configuration, ApiClient, CoreV1Api
from volume_provider.utils.uuid_helper import generate_random_uuid
from volume_provider.credentials.k8s import CredentialK8s, CredentialAddK8s
from volume_provider.providers.base import ProviderBase, CommandsBase
class ProviderK8s(ProviderBase):
def get_commands(self):
return CommandsK8s()
@classmethod
def get_provider(cls):
return 'k8s'
def render_to_string(self, template_contenxt):
env = jinja2.Environment(
loader=jinja2.PackageLoader('volume_provider', 'templates')
)
template = env.get_template('k8s/yamls/persistent_volume_claim.yaml')
return template.render(**template_contenxt)
def yaml_file(self, context):
yaml_file = self.render_to_string(
context
)
return yaml.safe_load(yaml_file)
def build_client(self):
configuration = Configuration()
configuration.api_key['authorization'] = "Bearer {}".format(self.auth_info['K8S-Token'])
configuration.host = self.auth_info['K8S-Endpoint']
configuration.verify_ssl = self._verify_ssl
api_client = ApiClient(configuration)
return CoreV1Api(api_client)
@property
def _verify_ssl(self):
verify_ssl = self.auth_info.get("K8S-Verify-Ssl", 'false')
return verify_ssl != 'false' and verify_ssl != 0
def build_credential(self):
return CredentialK8s(self.provider, self.environment)
def get_credential_add(self):
return CredentialAddK8s
def _get_snapshot_status(self, snapshot):
return 'available'
def _create_volume(self, volume, snapshot=None, *args, **kwargs):
volume.owner_address = ''
volume.identifier = generate_random_uuid()
volume.resource_id = volume.identifier
volume.path = "/"
self.client.create_namespaced_persistent_volume_claim(
self.auth_info.get("K8S-Namespace", "default"),
self.yaml_file({
'STORAGE_NAME': volume.identifier,
'STORAGE_SIZE': volume.size_gb,
'STORAGE_TYPE': self.auth_info.get('K8S-Storage-Type', '')
})
)
return volume
def _delete_volume(self, volume, **kw):
self.client.delete_namespaced_persistent_volume_claim(
volume.identifier,
self.auth_info.get("K8S-Namespace", "default"),
)
def _resize(self, volume, new_size_kb):
new_size_gb = volume.convert_kb_to_gb(new_size_kb)
self.client.patch_namespaced_persistent_volume_claim(
name=volume.identifier,
namespace=self.auth_info.get("K8S-Namespace", "default"),
body=self.yaml_file({
'STORAGE_NAME': volume.identifier,
'STORAGE_SIZE': new_size_gb,
'STORAGE_TYPE': self.auth_info.get('K8S-Storage-Type', '')
})
)
def _take_snapshot(self, volume, snapshot, *args):
new_snapshot = self.client.create_snapshot(volume)
snapshot.identifier = str(new_snapshot['snapshot']['id'])
snapshot.description = new_snapshot['snapshot']['name']
def _remove_snapshot(self, snapshot, force):
self.client.delete_snapshot(snapshot.volume, snapshot)
return True
def _restore_snapshot(self, snapshot, volume, engine, team_name, db_name):
restore_job = self.client.restore_snapshot(snapshot.volume, snapshot)
job_result = self.client.wait_for_job_finished(restore_job['job'])
volume.identifier = str(job_result['id'])
export = self.client.export_get(volume)
volume.resource_id = export['resource_id']
volume.path = job_result['full_path']
def _add_access(self, volume, to_address, *args, **kwargs):
pass
def _delete_old_volume(self, volume):
pass
class CommandsK8s(CommandsBase):
pass
| <filename>volume_provider/providers/k8s.py
import jinja2
import yaml
from kubernetes.client import Configuration, ApiClient, CoreV1Api
from volume_provider.utils.uuid_helper import generate_random_uuid
from volume_provider.credentials.k8s import CredentialK8s, CredentialAddK8s
from volume_provider.providers.base import ProviderBase, CommandsBase
class ProviderK8s(ProviderBase):
def get_commands(self):
return CommandsK8s()
@classmethod
def get_provider(cls):
return 'k8s'
def render_to_string(self, template_contenxt):
env = jinja2.Environment(
loader=jinja2.PackageLoader('volume_provider', 'templates')
)
template = env.get_template('k8s/yamls/persistent_volume_claim.yaml')
return template.render(**template_contenxt)
def yaml_file(self, context):
yaml_file = self.render_to_string(
context
)
return yaml.safe_load(yaml_file)
def build_client(self):
configuration = Configuration()
configuration.api_key['authorization'] = "Bearer {}".format(self.auth_info['K8S-Token'])
configuration.host = self.auth_info['K8S-Endpoint']
configuration.verify_ssl = self._verify_ssl
api_client = ApiClient(configuration)
return CoreV1Api(api_client)
@property
def _verify_ssl(self):
verify_ssl = self.auth_info.get("K8S-Verify-Ssl", 'false')
return verify_ssl != 'false' and verify_ssl != 0
def build_credential(self):
return CredentialK8s(self.provider, self.environment)
def get_credential_add(self):
return CredentialAddK8s
def _get_snapshot_status(self, snapshot):
return 'available'
def _create_volume(self, volume, snapshot=None, *args, **kwargs):
volume.owner_address = ''
volume.identifier = generate_random_uuid()
volume.resource_id = volume.identifier
volume.path = "/"
self.client.create_namespaced_persistent_volume_claim(
self.auth_info.get("K8S-Namespace", "default"),
self.yaml_file({
'STORAGE_NAME': volume.identifier,
'STORAGE_SIZE': volume.size_gb,
'STORAGE_TYPE': self.auth_info.get('K8S-Storage-Type', '')
})
)
return volume
def _delete_volume(self, volume, **kw):
self.client.delete_namespaced_persistent_volume_claim(
volume.identifier,
self.auth_info.get("K8S-Namespace", "default"),
)
def _resize(self, volume, new_size_kb):
new_size_gb = volume.convert_kb_to_gb(new_size_kb)
self.client.patch_namespaced_persistent_volume_claim(
name=volume.identifier,
namespace=self.auth_info.get("K8S-Namespace", "default"),
body=self.yaml_file({
'STORAGE_NAME': volume.identifier,
'STORAGE_SIZE': new_size_gb,
'STORAGE_TYPE': self.auth_info.get('K8S-Storage-Type', '')
})
)
def _take_snapshot(self, volume, snapshot, *args):
new_snapshot = self.client.create_snapshot(volume)
snapshot.identifier = str(new_snapshot['snapshot']['id'])
snapshot.description = new_snapshot['snapshot']['name']
def _remove_snapshot(self, snapshot, force):
self.client.delete_snapshot(snapshot.volume, snapshot)
return True
def _restore_snapshot(self, snapshot, volume, engine, team_name, db_name):
restore_job = self.client.restore_snapshot(snapshot.volume, snapshot)
job_result = self.client.wait_for_job_finished(restore_job['job'])
volume.identifier = str(job_result['id'])
export = self.client.export_get(volume)
volume.resource_id = export['resource_id']
volume.path = job_result['full_path']
def _add_access(self, volume, to_address, *args, **kwargs):
pass
def _delete_old_volume(self, volume):
pass
class CommandsK8s(CommandsBase):
pass
| none | 1 | 2.066267 | 2 | |
etiquetas_correios/exc.py | davidrios/etiquetas-correios | 1 | 6616508 | class ErroEtiquetasCorreios(Exception):
pass
class ErroEnderecoInvalido(ErroEtiquetasCorreios):
pass
| class ErroEtiquetasCorreios(Exception):
pass
class ErroEnderecoInvalido(ErroEtiquetasCorreios):
pass
| none | 1 | 1.383394 | 1 | |
console_interaction.py | Silicrex/DiscordReactionTracker | 0 | 6616509 | <gh_stars>0
import os
import json
def get_console_confirmation(question):
# Adds " (y/n)" text. Does not add the question mark.
# example question input: 'Generate the file?'
while True: # Repeat until 'y'/'yes' or 'n'/'no' are given
print(question + ' (y/n)')
console_response = input().lower()
if console_response in {'y', 'yes'}:
return True
elif console_response in {'n', 'no'}:
return False
def get_bot_token():
if os.path.isfile('token.json'): # token.json exists, attempt to read the token from it
with open('token.json') as file:
try:
token = json.load(file) # If successful, then return token
return token
except json.JSONDecodeError:
print('token.json could not parsed. Is it formatted correctly?', end='\n\n')
if get_console_confirmation('Should I overwrite the token to it?'):
print() # Newline
return write_token() # Returns token back
else:
quit()
else: # token.json file does not exist in directory
print('ERROR: Could not find token.json file')
if not get_console_confirmation('Should I generate it?'): # Offer to generate token.json in directory
quit()
with open('token.json', 'w') as file: # Confirmed; proceed to generate file
pass
print() # Newline
print('Generated token.json. It should contain your bot token as a double-quote string.')
if get_console_confirmation('Should I write the token to it?'):
print() # Newline
return write_token() # Returns token back
else:
quit()
def write_token():
with open('token.json', 'w') as file:
print('Enter your token, without quotes:')
token = input()
json.dump(token, file)
return token
| import os
import json
def get_console_confirmation(question):
# Adds " (y/n)" text. Does not add the question mark.
# example question input: 'Generate the file?'
while True: # Repeat until 'y'/'yes' or 'n'/'no' are given
print(question + ' (y/n)')
console_response = input().lower()
if console_response in {'y', 'yes'}:
return True
elif console_response in {'n', 'no'}:
return False
def get_bot_token():
if os.path.isfile('token.json'): # token.json exists, attempt to read the token from it
with open('token.json') as file:
try:
token = json.load(file) # If successful, then return token
return token
except json.JSONDecodeError:
print('token.json could not parsed. Is it formatted correctly?', end='\n\n')
if get_console_confirmation('Should I overwrite the token to it?'):
print() # Newline
return write_token() # Returns token back
else:
quit()
else: # token.json file does not exist in directory
print('ERROR: Could not find token.json file')
if not get_console_confirmation('Should I generate it?'): # Offer to generate token.json in directory
quit()
with open('token.json', 'w') as file: # Confirmed; proceed to generate file
pass
print() # Newline
print('Generated token.json. It should contain your bot token as a double-quote string.')
if get_console_confirmation('Should I write the token to it?'):
print() # Newline
return write_token() # Returns token back
else:
quit()
def write_token():
with open('token.json', 'w') as file:
print('Enter your token, without quotes:')
token = input()
json.dump(token, file)
return token | en | 0.676612 | # Adds " (y/n)" text. Does not add the question mark. # example question input: 'Generate the file?' # Repeat until 'y'/'yes' or 'n'/'no' are given # token.json exists, attempt to read the token from it # If successful, then return token # Newline # Returns token back # token.json file does not exist in directory # Offer to generate token.json in directory # Confirmed; proceed to generate file # Newline # Newline # Returns token back | 3.858991 | 4 |
Medium/The k-th Lexicographical String of All Happy Strings of Length n.py | Koushik-Deb/LeetCode | 0 | 6616510 | <filename>Medium/The k-th Lexicographical String of All Happy Strings of Length n.py
# class Solution:
# def getHappyString(self, n: int, k: int) -> str:
# def dfs(ind,nlen,path,res):
# if ind == nlen:
# res.append(path)
# return
# for i in range(3):
# if len(path)==0 or (path[-1]!=ls[i]):
# dfs(ind+1,nlen,path+ls[i],res)
# ls = ['a','b','c']
# vis = [0]*3
# res = []
# dfs(0,n,'',res)
# res.sort()
# if len(res)<k:
# return ''
# return res[k-1]
# class Solution:
# def getHappyString(self, n: int, k: int) -> str:
# k -= 1
# if k // 2 ** (n - 1) > 2: return ''
# result, lookup = '^', {'^': 'abc', 'a': 'bc', 'b': 'ac', 'c': 'ab'}
# for i in reversed(range(n)):
# idx, k = divmod(k, 2 ** i)
# result += lookup[result[-1]][idx]
# return result[1:]
from math import ceil
class Solution:
def getHappyString(self, n: int, k: int) -> str:
single_ele = 2 ** (n - 1)
if k > 3 * single_ele:
return ""
result = ['a', 'b', 'c'][ceil(k / single_ele) - 1]
while single_ele > 1:
k = (k - 1) % single_ele + 1
single_ele = single_ele // 2
if result[-1] == 'a':
result += ['b', 'c'][ceil(k / single_ele) - 1]
elif result[-1] == 'b':
result += ['a', 'c'][ceil(k / single_ele) - 1]
else:
result += ['a', 'b'][ceil(k / single_ele) - 1]
return result | <filename>Medium/The k-th Lexicographical String of All Happy Strings of Length n.py
# class Solution:
# def getHappyString(self, n: int, k: int) -> str:
# def dfs(ind,nlen,path,res):
# if ind == nlen:
# res.append(path)
# return
# for i in range(3):
# if len(path)==0 or (path[-1]!=ls[i]):
# dfs(ind+1,nlen,path+ls[i],res)
# ls = ['a','b','c']
# vis = [0]*3
# res = []
# dfs(0,n,'',res)
# res.sort()
# if len(res)<k:
# return ''
# return res[k-1]
# class Solution:
# def getHappyString(self, n: int, k: int) -> str:
# k -= 1
# if k // 2 ** (n - 1) > 2: return ''
# result, lookup = '^', {'^': 'abc', 'a': 'bc', 'b': 'ac', 'c': 'ab'}
# for i in reversed(range(n)):
# idx, k = divmod(k, 2 ** i)
# result += lookup[result[-1]][idx]
# return result[1:]
from math import ceil
class Solution:
def getHappyString(self, n: int, k: int) -> str:
single_ele = 2 ** (n - 1)
if k > 3 * single_ele:
return ""
result = ['a', 'b', 'c'][ceil(k / single_ele) - 1]
while single_ele > 1:
k = (k - 1) % single_ele + 1
single_ele = single_ele // 2
if result[-1] == 'a':
result += ['b', 'c'][ceil(k / single_ele) - 1]
elif result[-1] == 'b':
result += ['a', 'c'][ceil(k / single_ele) - 1]
else:
result += ['a', 'b'][ceil(k / single_ele) - 1]
return result | en | 0.17461 | # class Solution: # def getHappyString(self, n: int, k: int) -> str: # def dfs(ind,nlen,path,res): # if ind == nlen: # res.append(path) # return # for i in range(3): # if len(path)==0 or (path[-1]!=ls[i]): # dfs(ind+1,nlen,path+ls[i],res) # ls = ['a','b','c'] # vis = [0]*3 # res = [] # dfs(0,n,'',res) # res.sort() # if len(res)<k: # return '' # return res[k-1] # class Solution: # def getHappyString(self, n: int, k: int) -> str: # k -= 1 # if k // 2 ** (n - 1) > 2: return '' # result, lookup = '^', {'^': 'abc', 'a': 'bc', 'b': 'ac', 'c': 'ab'} # for i in reversed(range(n)): # idx, k = divmod(k, 2 ** i) # result += lookup[result[-1]][idx] # return result[1:] | 3.139323 | 3 |
spark_deploy/cli/install.py | Sebastiaan-Alvarez-Rodriguez/spark-deploy | 0 | 6616511 | <filename>spark_deploy/cli/install.py
import spark_deploy.cli.util as _cli_util
import spark_deploy.install as _install
import spark_deploy.internal.defaults.install as defaults
'''CLI module to install Spark and Java on a cluster.'''
def subparser(subparsers):
'''Register subparser modules'''
installparser = subparsers.add_parser('install', help='Install Spark environment on server cluster.')
installparser.add_argument('--spark-url', dest='spark_url', type=str, default=defaults.spark_url(), help='Spark download URL.')
installparser.add_argument('--java-url', dest='java_url', type=str, default=defaults.java_url(), help='Java download URL. Make sure the downloaded version is acceptable (between [`java-min`, `java-max`])')
installparser.add_argument('--java-min', dest='java_min', type=int, default=defaults.java_min(), help='Java minimal version (default={}). 0 means "no limit". use this to ensure a recent-enough version is installed for use with your Spark version.'.format(defaults.java_min()))
installparser.add_argument('--java-max', dest='java_max', type=int, default=defaults.java_max(), help='Java minimal version (default={}). 0 means "no limit". use this to ensure a recent-enough version is installed for use with your Spark version.'.format(defaults.java_max()))
installparser.add_argument('--use-sudo', dest='use_sudo', help='If set, uses superuser-priviledged commands during installation. Otherwise, performs local installs, no superuser privileges required.')
installparser.add_argument('--force-reinstall', dest='force_reinstall', help='If set, we always will re-download and install Spark. Otherwise, we will skip installing if we already have installed Spark.', action='store_true')
installparser.add_argument('--silent', help='If set, less boot output is shown.', action='store_true')
installparser.add_argument('--retries', metavar='amount', type=int, default=defaults.retries(), help='Amount of retries to use for risky operations (default={}).'.format(defaults.retries()))
return [installparser]
def deploy_args_set(args):
'''Indicates whether we will handle command parse output in this module.
`deploy()` function will be called if set.
Returns:
`True` if we found arguments used by this subsubparser, `False` otherwise.'''
return args.command == 'install'
def deploy(parsers, args):
reservation = _cli_util.read_reservation_cli()
return _install(reservation, install_dir=args.install_dir, key_path=args.key_path, spark_url=args.spark_url, java_url=args.java_url, java_min=args.java_min, java_max=args.java_max, use_sudo=args.use_sudo, force_reinstall=args.force_reinstall, silent=args.silent, retries=args.retries) if reservation else False | <filename>spark_deploy/cli/install.py
import spark_deploy.cli.util as _cli_util
import spark_deploy.install as _install
import spark_deploy.internal.defaults.install as defaults
'''CLI module to install Spark and Java on a cluster.'''
def subparser(subparsers):
'''Register subparser modules'''
installparser = subparsers.add_parser('install', help='Install Spark environment on server cluster.')
installparser.add_argument('--spark-url', dest='spark_url', type=str, default=defaults.spark_url(), help='Spark download URL.')
installparser.add_argument('--java-url', dest='java_url', type=str, default=defaults.java_url(), help='Java download URL. Make sure the downloaded version is acceptable (between [`java-min`, `java-max`])')
installparser.add_argument('--java-min', dest='java_min', type=int, default=defaults.java_min(), help='Java minimal version (default={}). 0 means "no limit". use this to ensure a recent-enough version is installed for use with your Spark version.'.format(defaults.java_min()))
installparser.add_argument('--java-max', dest='java_max', type=int, default=defaults.java_max(), help='Java minimal version (default={}). 0 means "no limit". use this to ensure a recent-enough version is installed for use with your Spark version.'.format(defaults.java_max()))
installparser.add_argument('--use-sudo', dest='use_sudo', help='If set, uses superuser-priviledged commands during installation. Otherwise, performs local installs, no superuser privileges required.')
installparser.add_argument('--force-reinstall', dest='force_reinstall', help='If set, we always will re-download and install Spark. Otherwise, we will skip installing if we already have installed Spark.', action='store_true')
installparser.add_argument('--silent', help='If set, less boot output is shown.', action='store_true')
installparser.add_argument('--retries', metavar='amount', type=int, default=defaults.retries(), help='Amount of retries to use for risky operations (default={}).'.format(defaults.retries()))
return [installparser]
def deploy_args_set(args):
'''Indicates whether we will handle command parse output in this module.
`deploy()` function will be called if set.
Returns:
`True` if we found arguments used by this subsubparser, `False` otherwise.'''
return args.command == 'install'
def deploy(parsers, args):
reservation = _cli_util.read_reservation_cli()
return _install(reservation, install_dir=args.install_dir, key_path=args.key_path, spark_url=args.spark_url, java_url=args.java_url, java_min=args.java_min, java_max=args.java_max, use_sudo=args.use_sudo, force_reinstall=args.force_reinstall, silent=args.silent, retries=args.retries) if reservation else False | en | 0.434959 | CLI module to install Spark and Java on a cluster. Register subparser modules Indicates whether we will handle command parse output in this module. `deploy()` function will be called if set. Returns: `True` if we found arguments used by this subsubparser, `False` otherwise. | 2.48725 | 2 |
pipext.py | GarryGaller/pipext | 0 | 6616512 | <reponame>GarryGaller/pipext
#--------------------------------------
"""
Script : pipext.py
Author : <NAME>
Copyright(C): <NAME>, 2017. All rights reserved
Version : 1.0.2
Date : 27.07.2017
"""
#--------------------------------------
__version__ = '1.0.2'
__author__ = '<NAME>'
#--------------------------------------
import os,sys
#from pip import get_installed_distributions
from pip._internal.utils.misc import get_installed_distributions, get_installed_version
#from pip.utils import get_installed_version
#from pip.commands.list import ListCommand
from pip._internal.commands.list import ListCommand
import pip
import argparse
from pprint import pprint
from collections import OrderedDict
def requries(dist):
"""Вспомогательная функция для стрингификации объекта Requirement.parse"""
results = []
result = ''
# [Requirement.parse('six>=1.9.0'), ...]
for req in dist.requires():
result += req.name
# [('>=', '1.9.0')]
if req.specs:
result += req.specs[0][0]
result += req.specs[0][1]
results.append(result)
result = ''
return results or ''
def get_list_modules(select,
noformat=None,
not_required=False,
local_only=False,
user_only=False,
editables_only=False,
):
"""Получить расширенный список всех инсталлированных модулей"""
list_modules = []
distribs = get_installed_distributions(
local_only=local_only,
user_only=user_only,
editables_only=editables_only
)
#-------------------------------
cmd = ListCommand()
if '-o' in select:
options,args = cmd.parser.parse_args(select)
distribs = cmd.get_outdated(distribs,options)
if '-u' in select:
options,args = cmd.parser.parse_args(select)
distribs = cmd.get_uptodate(distribs,options)
#------------------------------
list_not_required = []
if not_required == 1:
options,args = cmd.parser.parse_args(select)
distribs = cmd.get_not_required(distribs,options)
elif not_required == 2:
for dist in distribs:
if requries(dist) == "":
list_not_required.append(dist)
distribs = list_not_required
del list_not_required
#-------------------------------
if noformat == 'name':
for dist in distribs:
list_modules.append(dist.project_name)
list_modules.sort()
elif noformat == 'egg':
for dist in distribs:
list_modules.append(dist.egg_name())
list_modules.sort()
elif noformat is None:
for dist in distribs:
list_modules.append({
'0 Package':dist.project_name,
'1 Version':dist.version,
'2 Location':dist.location,
'3 Latest':str(dist.latest_version) if set(select) & {'-o','-u'} else str(None),
'4 Type':str(dist.latest_filetype) if set(select) & {'-o','-u'} else str(None),
'5 Python':str(dist.py_version) if set(select) & {'-o','-u'} else str(None),
'6 Depends':str(requries(dist))
})
list_modules.sort(key=lambda x:x['0 Package'].lower())
return list_modules
def update_modules(select,install_opts,distribs=None,exclude=None,extended_opts=None):
"""Обновить все модули либо только указанные в списке
>> update_modules(['-o'],['--upgrade'])
>> update_modules(None,['--upgrade'])
>> update_modules(None,['--upgrade'],['comtypes','cssselect','pony'])
"""
if extended_opts is None:
extended_opts = (False,False,False,False)
if not distribs:
distribs = [name for name in get_list_modules(select,'name',*extended_opts)]
if isinstance(exclude,(list,set,tuple)):
distribs = set([i.lower() for i in distribs]) - set([i.lower() for i in exclude])
if not distribs:
return print('No modules of the specified type were found.')
for dist_name in distribs:
pip.main(['install',*install_opts, dist_name])
#-----------------------
def tabulate(modules,columns):
# вычисление отступов
#-----------------------
headers = OrderedDict()
separator = {}
sformat = ''
for line in modules:
for key in ('0 Package','1 Version','2 Location','3 Latest', '4 Type','5 Python','6 Depends'):
if headers.get(key) is None:
headers[key] = []
headers[key].append(len(line[key]))
for key,val in headers.items():
headers[key] = max(val) + 2
if headers[key] < len(key) + 2:
headers[key] = len(key) + 2
headers = list(headers.items()) # преобразуем в список кортежей
# создание строки форматирования для заголовков
for i in columns:
sformat += '{' + headers[i][0] + ':' + str(headers[i][1]) +'}'
separator.update({headers[i][0]:(headers[i][1]-2) * '-'})
# создание строки сепаратора
sep = sformat.format(**separator)
return (headers,sformat,sep)
#--------------------------------------
# обработка завершения программы по Ctrl+C
#--------------------------------------
_old_excepthook = sys.excepthook
def on_exit_by_ctrl_c(exctype, value, traceback):
if exctype == KeyboardInterrupt:
sys.stdout.write('\nExecution aborted\n')
sys.exit(0)
else:
_old_excepthook(exctype, value, traceback)
sys.excepthook = on_exit_by_ctrl_c
#===============================
# обоработка аргументов командной строки
#===============================
def argument_parse(argv=None):
description = """
Script : %(prog)s
Author : {author}
Copyright(C): {author}, 2017. All rights reserved
Version : {version}""" .format(
author=__author__,version= __version__
)
parser = argparse.ArgumentParser(
formatter_class = argparse.RawDescriptionHelpFormatter,
description = description,
)
parser.add_argument('-s','--select',
type = str,
nargs = '+',
help = """Module selection type:
a|all|full - all installed (by default),
o|out|outdated - outdated,
u|up|uptodate - uptodated
""")
group_main = parser.add_mutually_exclusive_group()
group_main.add_argument('--raw',
type = str,
help = "Raw options")
group_main.add_argument('-i','--install',
type = str,
nargs = '+',
help = """List of parameters to update.
u,U,upgrade: --upgrade,-U;
if,if-needed: --upgrade-strategy only-if-needed;
eager: --upgrade-strategy eager;
f,force: -U --force-reinstall;
i,ignore: --ignore-installed;
""")
group_i = parser.add_argument_group('install')
group_i.add_argument('-m','--modules',
type = str,
nargs = '+',
help = 'The list of modules to update.'
)
group_i.add_argument('-e','--exclude',
type = str,
nargs = '+',
help = 'The list of modules to exclude from the update. Only option -U')
group_i.add_argument('--no-deps',
action = 'store_true',
default = False,
help = "Don't install package dependencies.")
group_c = parser.add_mutually_exclusive_group()
group_c.add_argument('-c','--columns',
type = int,
nargs = '+',
help = """List of the number of columns to display in the range 0-6.
By default, when using --select=all displayed columns 0,1,2,6,
when using --select=out - extended version - 0,1,3,4,5,6 - without the Location column.""")
group_c.add_argument('-C',
type = int,
nargs = '+',
help = """List of columns to exclude.""")
parser.add_argument('--pre',
action = 'store_true',
default = False,
help = "Include pre-release and development versions. By default, pip only finds stable versions.")
group_f = parser.add_mutually_exclusive_group()
group_f.add_argument('-f','--format',
type = str,
nargs = '+',
default = ['header'],
help = """List of output formatting options:
header|head|h - to show headings,
separator|sep|s - to separate lines.""")
group_f.add_argument('-F','--no-format',
type = str,
help = """Output only a list of names.
name - for standard names,
egg - names in the format egg,
count - output only the number of modules""")
group_nr = parser.add_mutually_exclusive_group()
group_nr.add_argument('--not-req',
action = 'store_true',
default = False,
help = "List packages that are not dependencies of installed packages (original option).")
group_nr.add_argument('--not-req2',
action = 'store_true',
default = False,
help = "List of packages that do not have dependencies.")
group_t = parser.add_mutually_exclusive_group()
group_t.add_argument('--editable',
action = 'store_true',
default = False,
help = "List editable projects.")
group_t.add_argument('--user',
action = 'store_true',
default = False,
help = "Only output packages installed in user-site.")
group_t.add_argument('--local',
action = 'store_true',
default = False,
help = "If in a virtualenv that has global access, do not list globally-installed packages.")
parser.add_argument('--test',
action = 'store_true',
default = False,
help = "Test options")
parser.add_argument('-v','--version',
type = str,
help = "The version of the installed module: --version pip")
return parser,vars(parser.parse_args(argv))
#===============================
# разбор аргументов
#===============================
def main(argv=None):
parser,options = argument_parse(argv)
if options is None:return
if options['version']:
return print(get_installed_version(options['version']))
#-----------------------
# трансформация опций
#-----------------------
transforms_i = {
('u','U','upgrade'):['--upgrade'],
('if','if-needed' ):['--upgrade-strategy','only-if-needed'],
('eager' ):['--upgrade-strategy','eager'],
('f','force' ):['-U','--force-reinstall'],
('i','ignore' ):['--ignore-installed'],
}
transforms_s = {
('a','all','full' ):[],
('o','out','outdated' ):['-o'],
('u','up','uptodate' ):['-u'],
}
#-----------------------
if options['not_req']:
not_required = 1
elif options['not_req2']:
not_required = 2
else:
not_required = False
#-----------------------
select = options['select']
if select is None:
select = []
else:
matches = False
for key,val in transforms_s.items():
if set(key) & set(select):
select = val
matches = True
if not matches:
return parser.print_usage()
#-----------------------
if options['pre']:
if select:
select.append('--pre')
else:
print('The --pre option is ignored.\n')
extended_opts = not_required,options['local'], options['user'], options['editable']
#-----------------------
if options['columns']:
operation = set.union
columns = set(options['columns'])
elif options['C']:
operation = set.difference
columns = set(options['C'])
else:
operation = set.union
columns = set()
if '-o' in select:
columns = sorted(operation(set([0,1,3,4,5,6]),columns))
elif '-u' in select:
columns = sorted(operation(set([0,1,3,4,5]),columns))
else:
columns = sorted(operation(set([0,1,2,6]),columns))
if not columns: return print('Specify at least one column')
options['columns'] = columns
if not all(0 <= i < 7 for i in columns):
return print('[--columns] The column index should be in the range from 0 to 6')
#-------------------------------
modules = options['modules']
exclude = options['exclude']
install_opts = options['raw']
if install_opts:
install_opts = install_opts.split()
options['install'] = install_opts
else:
if options['install']:
install_opts = options['install']
matches = False
for key,val in transforms_i.items():
if set(key) & set(install_opts):
install_opts = val
matches = True
if not matches:
return parser.print_usage()
if options['no_deps']:
install_opts.append('--no-deps')
options['install'] = install_opts
if options['select'] is None:
select = ['-o']
else:
if options['no_deps']:
print('The --no-deps option is ignored.\n')
#-----------------------
options['select'] = select
#-----------------------
if options['test']:
return pprint(options)
elif install_opts:
print('Install|upgrade options:{}\nType:{}\nModules:{}\nExclude:{}'.format(
install_opts,select,modules,exclude)
)
print('-'*20)
return update_modules(
select,
install_opts,
modules,
exclude,
extended_opts
)
#-----------------------
# если не обновление и не тест опций - читаем опции форматирования
#-----------------------
format = set(options['format'])
noformat = options['no_format']
print_header = {'header','head','h'} & format
print_sep = {'separator','sep','s'} & format
if noformat not in ['egg','name','count',None]:
return print('[--no-format] Unknown format. Use egg, name or count.')
if noformat == 'count':
return print(len(get_list_modules(select,'name',*extended_opts)))
modules = get_list_modules(select,noformat,*extended_opts)
if not modules:
return print('No modules of the specified type were found.')
if noformat in ['egg','name']:
for m in modules:
print(m)
#-----------------------
# форматированный вывод
#-----------------------
else:
headers,sformat,sep = tabulate(modules,columns)
#-----------------------
if print_header:
print(sep)
print(sformat.format(**{h[0]:h[0] for h in headers}))
print(sep)
for m in modules:
print(sformat.format(**m))
if print_sep:
print(sep)
#===============================
# запуск модуля как скрипт
#===============================
if __name__== '__main__':
sys.exit(main())
| #--------------------------------------
"""
Script : pipext.py
Author : <NAME>
Copyright(C): <NAME>, 2017. All rights reserved
Version : 1.0.2
Date : 27.07.2017
"""
#--------------------------------------
__version__ = '1.0.2'
__author__ = '<NAME>'
#--------------------------------------
import os,sys
#from pip import get_installed_distributions
from pip._internal.utils.misc import get_installed_distributions, get_installed_version
#from pip.utils import get_installed_version
#from pip.commands.list import ListCommand
from pip._internal.commands.list import ListCommand
import pip
import argparse
from pprint import pprint
from collections import OrderedDict
def requries(dist):
"""Вспомогательная функция для стрингификации объекта Requirement.parse"""
results = []
result = ''
# [Requirement.parse('six>=1.9.0'), ...]
for req in dist.requires():
result += req.name
# [('>=', '1.9.0')]
if req.specs:
result += req.specs[0][0]
result += req.specs[0][1]
results.append(result)
result = ''
return results or ''
def get_list_modules(select,
noformat=None,
not_required=False,
local_only=False,
user_only=False,
editables_only=False,
):
"""Получить расширенный список всех инсталлированных модулей"""
list_modules = []
distribs = get_installed_distributions(
local_only=local_only,
user_only=user_only,
editables_only=editables_only
)
#-------------------------------
cmd = ListCommand()
if '-o' in select:
options,args = cmd.parser.parse_args(select)
distribs = cmd.get_outdated(distribs,options)
if '-u' in select:
options,args = cmd.parser.parse_args(select)
distribs = cmd.get_uptodate(distribs,options)
#------------------------------
list_not_required = []
if not_required == 1:
options,args = cmd.parser.parse_args(select)
distribs = cmd.get_not_required(distribs,options)
elif not_required == 2:
for dist in distribs:
if requries(dist) == "":
list_not_required.append(dist)
distribs = list_not_required
del list_not_required
#-------------------------------
if noformat == 'name':
for dist in distribs:
list_modules.append(dist.project_name)
list_modules.sort()
elif noformat == 'egg':
for dist in distribs:
list_modules.append(dist.egg_name())
list_modules.sort()
elif noformat is None:
for dist in distribs:
list_modules.append({
'0 Package':dist.project_name,
'1 Version':dist.version,
'2 Location':dist.location,
'3 Latest':str(dist.latest_version) if set(select) & {'-o','-u'} else str(None),
'4 Type':str(dist.latest_filetype) if set(select) & {'-o','-u'} else str(None),
'5 Python':str(dist.py_version) if set(select) & {'-o','-u'} else str(None),
'6 Depends':str(requries(dist))
})
list_modules.sort(key=lambda x:x['0 Package'].lower())
return list_modules
def update_modules(select,install_opts,distribs=None,exclude=None,extended_opts=None):
"""Обновить все модули либо только указанные в списке
>> update_modules(['-o'],['--upgrade'])
>> update_modules(None,['--upgrade'])
>> update_modules(None,['--upgrade'],['comtypes','cssselect','pony'])
"""
if extended_opts is None:
extended_opts = (False,False,False,False)
if not distribs:
distribs = [name for name in get_list_modules(select,'name',*extended_opts)]
if isinstance(exclude,(list,set,tuple)):
distribs = set([i.lower() for i in distribs]) - set([i.lower() for i in exclude])
if not distribs:
return print('No modules of the specified type were found.')
for dist_name in distribs:
pip.main(['install',*install_opts, dist_name])
#-----------------------
def tabulate(modules,columns):
# вычисление отступов
#-----------------------
headers = OrderedDict()
separator = {}
sformat = ''
for line in modules:
for key in ('0 Package','1 Version','2 Location','3 Latest', '4 Type','5 Python','6 Depends'):
if headers.get(key) is None:
headers[key] = []
headers[key].append(len(line[key]))
for key,val in headers.items():
headers[key] = max(val) + 2
if headers[key] < len(key) + 2:
headers[key] = len(key) + 2
headers = list(headers.items()) # преобразуем в список кортежей
# создание строки форматирования для заголовков
for i in columns:
sformat += '{' + headers[i][0] + ':' + str(headers[i][1]) +'}'
separator.update({headers[i][0]:(headers[i][1]-2) * '-'})
# создание строки сепаратора
sep = sformat.format(**separator)
return (headers,sformat,sep)
#--------------------------------------
# обработка завершения программы по Ctrl+C
#--------------------------------------
_old_excepthook = sys.excepthook
def on_exit_by_ctrl_c(exctype, value, traceback):
if exctype == KeyboardInterrupt:
sys.stdout.write('\nExecution aborted\n')
sys.exit(0)
else:
_old_excepthook(exctype, value, traceback)
sys.excepthook = on_exit_by_ctrl_c
#===============================
# обоработка аргументов командной строки
#===============================
def argument_parse(argv=None):
description = """
Script : %(prog)s
Author : {author}
Copyright(C): {author}, 2017. All rights reserved
Version : {version}""" .format(
author=__author__,version= __version__
)
parser = argparse.ArgumentParser(
formatter_class = argparse.RawDescriptionHelpFormatter,
description = description,
)
parser.add_argument('-s','--select',
type = str,
nargs = '+',
help = """Module selection type:
a|all|full - all installed (by default),
o|out|outdated - outdated,
u|up|uptodate - uptodated
""")
group_main = parser.add_mutually_exclusive_group()
group_main.add_argument('--raw',
type = str,
help = "Raw options")
group_main.add_argument('-i','--install',
type = str,
nargs = '+',
help = """List of parameters to update.
u,U,upgrade: --upgrade,-U;
if,if-needed: --upgrade-strategy only-if-needed;
eager: --upgrade-strategy eager;
f,force: -U --force-reinstall;
i,ignore: --ignore-installed;
""")
group_i = parser.add_argument_group('install')
group_i.add_argument('-m','--modules',
type = str,
nargs = '+',
help = 'The list of modules to update.'
)
group_i.add_argument('-e','--exclude',
type = str,
nargs = '+',
help = 'The list of modules to exclude from the update. Only option -U')
group_i.add_argument('--no-deps',
action = 'store_true',
default = False,
help = "Don't install package dependencies.")
group_c = parser.add_mutually_exclusive_group()
group_c.add_argument('-c','--columns',
type = int,
nargs = '+',
help = """List of the number of columns to display in the range 0-6.
By default, when using --select=all displayed columns 0,1,2,6,
when using --select=out - extended version - 0,1,3,4,5,6 - without the Location column.""")
group_c.add_argument('-C',
type = int,
nargs = '+',
help = """List of columns to exclude.""")
parser.add_argument('--pre',
action = 'store_true',
default = False,
help = "Include pre-release and development versions. By default, pip only finds stable versions.")
group_f = parser.add_mutually_exclusive_group()
group_f.add_argument('-f','--format',
type = str,
nargs = '+',
default = ['header'],
help = """List of output formatting options:
header|head|h - to show headings,
separator|sep|s - to separate lines.""")
group_f.add_argument('-F','--no-format',
type = str,
help = """Output only a list of names.
name - for standard names,
egg - names in the format egg,
count - output only the number of modules""")
group_nr = parser.add_mutually_exclusive_group()
group_nr.add_argument('--not-req',
action = 'store_true',
default = False,
help = "List packages that are not dependencies of installed packages (original option).")
group_nr.add_argument('--not-req2',
action = 'store_true',
default = False,
help = "List of packages that do not have dependencies.")
group_t = parser.add_mutually_exclusive_group()
group_t.add_argument('--editable',
action = 'store_true',
default = False,
help = "List editable projects.")
group_t.add_argument('--user',
action = 'store_true',
default = False,
help = "Only output packages installed in user-site.")
group_t.add_argument('--local',
action = 'store_true',
default = False,
help = "If in a virtualenv that has global access, do not list globally-installed packages.")
parser.add_argument('--test',
action = 'store_true',
default = False,
help = "Test options")
parser.add_argument('-v','--version',
type = str,
help = "The version of the installed module: --version pip")
return parser,vars(parser.parse_args(argv))
#===============================
# разбор аргументов
#===============================
def main(argv=None):
parser,options = argument_parse(argv)
if options is None:return
if options['version']:
return print(get_installed_version(options['version']))
#-----------------------
# трансформация опций
#-----------------------
transforms_i = {
('u','U','upgrade'):['--upgrade'],
('if','if-needed' ):['--upgrade-strategy','only-if-needed'],
('eager' ):['--upgrade-strategy','eager'],
('f','force' ):['-U','--force-reinstall'],
('i','ignore' ):['--ignore-installed'],
}
transforms_s = {
('a','all','full' ):[],
('o','out','outdated' ):['-o'],
('u','up','uptodate' ):['-u'],
}
#-----------------------
if options['not_req']:
not_required = 1
elif options['not_req2']:
not_required = 2
else:
not_required = False
#-----------------------
select = options['select']
if select is None:
select = []
else:
matches = False
for key,val in transforms_s.items():
if set(key) & set(select):
select = val
matches = True
if not matches:
return parser.print_usage()
#-----------------------
if options['pre']:
if select:
select.append('--pre')
else:
print('The --pre option is ignored.\n')
extended_opts = not_required,options['local'], options['user'], options['editable']
#-----------------------
if options['columns']:
operation = set.union
columns = set(options['columns'])
elif options['C']:
operation = set.difference
columns = set(options['C'])
else:
operation = set.union
columns = set()
if '-o' in select:
columns = sorted(operation(set([0,1,3,4,5,6]),columns))
elif '-u' in select:
columns = sorted(operation(set([0,1,3,4,5]),columns))
else:
columns = sorted(operation(set([0,1,2,6]),columns))
if not columns: return print('Specify at least one column')
options['columns'] = columns
if not all(0 <= i < 7 for i in columns):
return print('[--columns] The column index should be in the range from 0 to 6')
#-------------------------------
modules = options['modules']
exclude = options['exclude']
install_opts = options['raw']
if install_opts:
install_opts = install_opts.split()
options['install'] = install_opts
else:
if options['install']:
install_opts = options['install']
matches = False
for key,val in transforms_i.items():
if set(key) & set(install_opts):
install_opts = val
matches = True
if not matches:
return parser.print_usage()
if options['no_deps']:
install_opts.append('--no-deps')
options['install'] = install_opts
if options['select'] is None:
select = ['-o']
else:
if options['no_deps']:
print('The --no-deps option is ignored.\n')
#-----------------------
options['select'] = select
#-----------------------
if options['test']:
return pprint(options)
elif install_opts:
print('Install|upgrade options:{}\nType:{}\nModules:{}\nExclude:{}'.format(
install_opts,select,modules,exclude)
)
print('-'*20)
return update_modules(
select,
install_opts,
modules,
exclude,
extended_opts
)
#-----------------------
# если не обновление и не тест опций - читаем опции форматирования
#-----------------------
format = set(options['format'])
noformat = options['no_format']
print_header = {'header','head','h'} & format
print_sep = {'separator','sep','s'} & format
if noformat not in ['egg','name','count',None]:
return print('[--no-format] Unknown format. Use egg, name or count.')
if noformat == 'count':
return print(len(get_list_modules(select,'name',*extended_opts)))
modules = get_list_modules(select,noformat,*extended_opts)
if not modules:
return print('No modules of the specified type were found.')
if noformat in ['egg','name']:
for m in modules:
print(m)
#-----------------------
# форматированный вывод
#-----------------------
else:
headers,sformat,sep = tabulate(modules,columns)
#-----------------------
if print_header:
print(sep)
print(sformat.format(**{h[0]:h[0] for h in headers}))
print(sep)
for m in modules:
print(sformat.format(**m))
if print_sep:
print(sep)
#===============================
# запуск модуля как скрипт
#===============================
if __name__== '__main__':
sys.exit(main()) | ru | 0.191155 | #-------------------------------------- Script : pipext.py Author : <NAME> Copyright(C): <NAME>, 2017. All rights reserved Version : 1.0.2 Date : 27.07.2017 #-------------------------------------- #-------------------------------------- #from pip import get_installed_distributions #from pip.utils import get_installed_version #from pip.commands.list import ListCommand Вспомогательная функция для стрингификации объекта Requirement.parse # [Requirement.parse('six>=1.9.0'), ...] # [('>=', '1.9.0')] Получить расширенный список всех инсталлированных модулей #------------------------------- #------------------------------ #------------------------------- Обновить все модули либо только указанные в списке >> update_modules(['-o'],['--upgrade']) >> update_modules(None,['--upgrade']) >> update_modules(None,['--upgrade'],['comtypes','cssselect','pony']) #----------------------- # вычисление отступов #----------------------- # преобразуем в список кортежей # создание строки форматирования для заголовков # создание строки сепаратора #-------------------------------------- # обработка завершения программы по Ctrl+C #-------------------------------------- #=============================== # обоработка аргументов командной строки #=============================== Script : %(prog)s Author : {author} Copyright(C): {author}, 2017. All rights reserved Version : {version} Module selection type: a|all|full - all installed (by default), o|out|outdated - outdated, u|up|uptodate - uptodated List of parameters to update. u,U,upgrade: --upgrade,-U; if,if-needed: --upgrade-strategy only-if-needed; eager: --upgrade-strategy eager; f,force: -U --force-reinstall; i,ignore: --ignore-installed; List of the number of columns to display in the range 0-6. By default, when using --select=all displayed columns 0,1,2,6, when using --select=out - extended version - 0,1,3,4,5,6 - without the Location column. List of columns to exclude. List of output formatting options: header|head|h - to show headings, separator|sep|s - to separate lines. Output only a list of names. name - for standard names, egg - names in the format egg, count - output only the number of modules #=============================== # разбор аргументов #=============================== #----------------------- # трансформация опций #----------------------- #----------------------- #----------------------- #----------------------- #----------------------- #------------------------------- #----------------------- #----------------------- #----------------------- # если не обновление и не тест опций - читаем опции форматирования #----------------------- #----------------------- # форматированный вывод #----------------------- #----------------------- #=============================== # запуск модуля как скрипт #=============================== | 2.441006 | 2 |
setup.py | Mrfanfan/ZIFA | 103 | 6616513 | <filename>setup.py
from distutils.core import setup
DESCRIPTION = "Implements Zero-Inflated Factor Analysis"
LONG_DESCRIPTION = DESCRIPTION
NAME = "ZIFA"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
DOWNLOAD_URL = 'https://github.com/epierson9/ZIFA'
LICENSE = 'MIT'
VERSION = '0.1'
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=DOWNLOAD_URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['ZIFA'],
package_data={}
)
| <filename>setup.py
from distutils.core import setup
DESCRIPTION = "Implements Zero-Inflated Factor Analysis"
LONG_DESCRIPTION = DESCRIPTION
NAME = "ZIFA"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
DOWNLOAD_URL = 'https://github.com/epierson9/ZIFA'
LICENSE = 'MIT'
VERSION = '0.1'
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=DOWNLOAD_URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['ZIFA'],
package_data={}
)
| none | 1 | 1.095022 | 1 | |
src/main/python/general_analysis/simulation_health_analysis.py | colinsheppard/beam | 0 | 6616514 | import os
from glob import glob
from shutil import copyfile
# detectors is a dictionary that consist key as health metric and
# value as lambda function that can detect health metric from line.
# if we need to add more detector then we have to add that detector in this dictionary
detectors = {
"deadLetter": lambda line: "DeadLetter" in line,
"actorDied": lambda line: "terminated unexpectedly" in line,
"warn": lambda line: " WARN " in line,
"error": lambda line: " ERROR " in line,
"stacktrace": lambda line: line.startswith("\tat ")
}
beam_home = os.getcwd()
def handleJavaErrorLine(line):
tokens = line.split("\"")
if len(tokens) > 0:
return tokens[1].replace('%s', '')
def handleScalaToken(tokens):
out_result = []
if len(tokens) > 1:
split_error_lines = tokens[1].strip().split('$')
first_word = True
for split_error_line in split_error_lines:
words = split_error_line.strip().replace('\\n', '').split(" ")
word_collector = words if first_word else words[1:]
if first_word:
first_word = False
if len(word_collector) > len(out_result):
out_result = word_collector
return " ".join(out_result)
def handleScalaErrorLine(line):
tokens = line.split("\"")
if "(s\"" in line or "(\ns\"" in line or "(f\"" in line:
return handleScalaToken(tokens)
elif len(tokens) > 1:
return tokens[1]
def detect_all_error_types():
error_list = []
for dir_path, sub_dir_path, source_files in os.walk(beam_home + '/src/main/'):
for source_file in source_files:
if source_file.endswith('.java'):
source = open(dir_path + '/' + source_file, "r")
for line in source:
if 'new RuntimeException' in line or 'new Exception' in line:
error_list.append(handleJavaErrorLine(line))
if source_file.endswith('.scala'):
source = open(dir_path + '/' + source_file, "r")
for line in source:
if 'new RuntimeException' in line or 'new Exception' in line:
error_list.append(handleScalaErrorLine(line))
return error_list
log_file_location = glob(beam_home + "/output/*/*/beamLog.out")
log_file_location.sort(key=lambda x: os.path.getmtime(x), reverse=True)
with open(log_file_location[0]) as file:
file = file.readlines()
matric_log = {}
stacktrace_count = 0
error_types = detect_all_error_types()
for line in file:
# treating stacktrace detector specially because 1 stacktrace consist multiple lines of stacktrace
if detectors["stacktrace"](line):
if stacktrace_count == 0:
matric = matric_log.get("stacktrace", [])
matric.append(stacktrace_count)
matric_log["stacktrace"] = matric
stacktrace_count = stacktrace_count + 1
continue
elif stacktrace_count > 0:
stacktrace_count = 0
continue
# iterating each detector and evaluating if line consist that detector
for key, value in detectors.items():
if value(line):
matric = matric_log.get(key, [])
matric.append(line)
matric_log[key] = matric
break
for error_type in error_types:
if error_type is not None and error_type in line:
matric = matric_log.get(error_type, [])
matric.append(line)
matric_log[error_type] = matric
with open('RunHealthAnalysis.txt', 'w') as file:
for detector in detectors:
file.write(detector + "," + str(len(matric_log.get(detector, []))) + "\n")
beam_output_path = os.path.dirname(log_file_location[0])
copyfile('RunHealthAnalysis.txt', beam_output_path + "/runHealthAnalysis.txt")
| import os
from glob import glob
from shutil import copyfile
# detectors is a dictionary that consist key as health metric and
# value as lambda function that can detect health metric from line.
# if we need to add more detector then we have to add that detector in this dictionary
detectors = {
"deadLetter": lambda line: "DeadLetter" in line,
"actorDied": lambda line: "terminated unexpectedly" in line,
"warn": lambda line: " WARN " in line,
"error": lambda line: " ERROR " in line,
"stacktrace": lambda line: line.startswith("\tat ")
}
beam_home = os.getcwd()
def handleJavaErrorLine(line):
tokens = line.split("\"")
if len(tokens) > 0:
return tokens[1].replace('%s', '')
def handleScalaToken(tokens):
out_result = []
if len(tokens) > 1:
split_error_lines = tokens[1].strip().split('$')
first_word = True
for split_error_line in split_error_lines:
words = split_error_line.strip().replace('\\n', '').split(" ")
word_collector = words if first_word else words[1:]
if first_word:
first_word = False
if len(word_collector) > len(out_result):
out_result = word_collector
return " ".join(out_result)
def handleScalaErrorLine(line):
tokens = line.split("\"")
if "(s\"" in line or "(\ns\"" in line or "(f\"" in line:
return handleScalaToken(tokens)
elif len(tokens) > 1:
return tokens[1]
def detect_all_error_types():
error_list = []
for dir_path, sub_dir_path, source_files in os.walk(beam_home + '/src/main/'):
for source_file in source_files:
if source_file.endswith('.java'):
source = open(dir_path + '/' + source_file, "r")
for line in source:
if 'new RuntimeException' in line or 'new Exception' in line:
error_list.append(handleJavaErrorLine(line))
if source_file.endswith('.scala'):
source = open(dir_path + '/' + source_file, "r")
for line in source:
if 'new RuntimeException' in line or 'new Exception' in line:
error_list.append(handleScalaErrorLine(line))
return error_list
log_file_location = glob(beam_home + "/output/*/*/beamLog.out")
log_file_location.sort(key=lambda x: os.path.getmtime(x), reverse=True)
with open(log_file_location[0]) as file:
file = file.readlines()
matric_log = {}
stacktrace_count = 0
error_types = detect_all_error_types()
for line in file:
# treating stacktrace detector specially because 1 stacktrace consist multiple lines of stacktrace
if detectors["stacktrace"](line):
if stacktrace_count == 0:
matric = matric_log.get("stacktrace", [])
matric.append(stacktrace_count)
matric_log["stacktrace"] = matric
stacktrace_count = stacktrace_count + 1
continue
elif stacktrace_count > 0:
stacktrace_count = 0
continue
# iterating each detector and evaluating if line consist that detector
for key, value in detectors.items():
if value(line):
matric = matric_log.get(key, [])
matric.append(line)
matric_log[key] = matric
break
for error_type in error_types:
if error_type is not None and error_type in line:
matric = matric_log.get(error_type, [])
matric.append(line)
matric_log[error_type] = matric
with open('RunHealthAnalysis.txt', 'w') as file:
for detector in detectors:
file.write(detector + "," + str(len(matric_log.get(detector, []))) + "\n")
beam_output_path = os.path.dirname(log_file_location[0])
copyfile('RunHealthAnalysis.txt', beam_output_path + "/runHealthAnalysis.txt")
| en | 0.885324 | # detectors is a dictionary that consist key as health metric and # value as lambda function that can detect health metric from line. # if we need to add more detector then we have to add that detector in this dictionary # treating stacktrace detector specially because 1 stacktrace consist multiple lines of stacktrace # iterating each detector and evaluating if line consist that detector | 2.677316 | 3 |
app/Util/c_code_util.py | MisakaMikoto0502/COVID-19API | 33 | 6616515 | <reponame>MisakaMikoto0502/COVID-19API<gh_stars>10-100
from app.db import c_code
from app.Exceptions import APIException
class CountryCodeUtil(object):
@staticmethod
def get_all_country_name() -> list:
data = c_code["data"]
return [{cn["name"]: cn["alpha_3"]} for cn in data]
@staticmethod
def get_country_code(alpha_3):
data = c_code["data"]
for item in data:
if item["alpha_3"].lower() == alpha_3.lower():
return item["alpha_2"].lower()
raise APIException(
status=False,
system={
"code": 422,
"message": "Country name not found"
},
source=None
) | from app.db import c_code
from app.Exceptions import APIException
class CountryCodeUtil(object):
@staticmethod
def get_all_country_name() -> list:
data = c_code["data"]
return [{cn["name"]: cn["alpha_3"]} for cn in data]
@staticmethod
def get_country_code(alpha_3):
data = c_code["data"]
for item in data:
if item["alpha_3"].lower() == alpha_3.lower():
return item["alpha_2"].lower()
raise APIException(
status=False,
system={
"code": 422,
"message": "Country name not found"
},
source=None
) | none | 1 | 2.982286 | 3 | |
Login system/signUp.py | Ved-programmer/Login-System | 4 | 6616516 | <gh_stars>1-10
from hashing import encrypt
import checks
def addToIndex(username, password):
result = checks.check(username, password, False)
# print(result)
if result.accepted:
password = encrypt(password)
with open("data.txt", "a") as f:
f.write(f"{username}-{password}\n")
return result
| from hashing import encrypt
import checks
def addToIndex(username, password):
result = checks.check(username, password, False)
# print(result)
if result.accepted:
password = encrypt(password)
with open("data.txt", "a") as f:
f.write(f"{username}-{password}\n")
return result | en | 0.338828 | # print(result) | 3.573063 | 4 |
flake8_docstrings.py | tylertrussell/flake8-docstrings-catnado | 0 | 6616517 | # -*- coding: utf-8 -*-
"""Implementation of pydocstyle integration with Flake8.
pydocstyle docstrings convention needs error code and class parser for be
included as module into flake8
"""
import fnmatch
import os
from flake8_polyfill import stdin
import pycodestyle
try:
import pydocstyle as pep257
module_name = 'pydocstyle'
except ImportError:
import pep257
module_name = 'pep257'
__version__ = '0.0.1dev1'
__all__ = ('pep257Checker',)
stdin.monkey_patch('pycodestyle')
class EnvironError(pep257.Error):
def __init__(self, err):
super(EnvironError, self).__init__(
code='D998',
short_desc='EnvironmentError: ' + str(err),
context=None,
)
@property
def line(self):
"""Return 0 as line number for EnvironmentError."""
return 0
class AllError(pep257.Error):
def __init__(self, err):
super(AllError, self).__init__(
code='D999',
short_desc=str(err).partition('\n')[0],
context=None,
)
@property
def line(self):
"""pep257.AllError does not contain line number. Return 0 instead."""
return 0
class pep257Checker(object):
"""Flake8 needs a class to check python file."""
name = 'flake8-docstrings-catnado'
version = __version__ + ', {0}: {1}'.format(
module_name, pep257.__version__
)
STDIN_NAMES = set(['stdin', '-', '(none)', None])
def __init__(self, tree, filename='(none)'):
"""Placeholder."""
self.tree = tree
self.filename = filename
self.checker = pep257.ConventionChecker()
self.load_source()
@classmethod
def parse_options(cls, options):
"""Pass options through to this plugin."""
cls.ignore_decorators = options.ignore_decorators
cls.exclude_from_doctest = options.exclude_from_doctest
if not isinstance(cls.exclude_from_doctest, list):
cls.exclude_from_doctest = [cls.exclude_from_doctest]
@classmethod
def add_options(cls, parser):
"""Add additional CLI options."""
parser.add_option('--ignore_decorators', action='store_true')
def _check_source(self):
try:
for err in self.checker.check_source(
self.source,
self.filename,
ignore_decorators=self.ignore_decorators,
):
yield err
except pep257.AllError as err:
yield AllError(err)
except EnvironmentError as err:
yield EnvironError(err)
def run(self):
"""Use directly check() api from pydocstyle."""
if self.exclude_from_doctest:
for pattern in self.exclude_from_doctest:
if fnmatch.fnmatch(self.filename, pattern):
return
checked_codes = pep257.conventions.pep257 | {'D998', 'D999'}
for error in self._check_source():
if isinstance(error, pep257.Error) and error.code in checked_codes:
# NOTE(sigmavirus24): Fixes GitLab#3
message = '%s %s' % (error.code, error.short_desc)
yield (error.line, 0, message, type(self))
def load_source(self):
"""Load the source for the specified file."""
if self.filename in self.STDIN_NAMES:
self.filename = 'stdin'
self.source = pycodestyle.stdin_get_value()
else:
with pep257.tokenize_open(self.filename) as fd:
self.source = fd.read()
| # -*- coding: utf-8 -*-
"""Implementation of pydocstyle integration with Flake8.
pydocstyle docstrings convention needs error code and class parser for be
included as module into flake8
"""
import fnmatch
import os
from flake8_polyfill import stdin
import pycodestyle
try:
import pydocstyle as pep257
module_name = 'pydocstyle'
except ImportError:
import pep257
module_name = 'pep257'
__version__ = '0.0.1dev1'
__all__ = ('pep257Checker',)
stdin.monkey_patch('pycodestyle')
class EnvironError(pep257.Error):
def __init__(self, err):
super(EnvironError, self).__init__(
code='D998',
short_desc='EnvironmentError: ' + str(err),
context=None,
)
@property
def line(self):
"""Return 0 as line number for EnvironmentError."""
return 0
class AllError(pep257.Error):
def __init__(self, err):
super(AllError, self).__init__(
code='D999',
short_desc=str(err).partition('\n')[0],
context=None,
)
@property
def line(self):
"""pep257.AllError does not contain line number. Return 0 instead."""
return 0
class pep257Checker(object):
"""Flake8 needs a class to check python file."""
name = 'flake8-docstrings-catnado'
version = __version__ + ', {0}: {1}'.format(
module_name, pep257.__version__
)
STDIN_NAMES = set(['stdin', '-', '(none)', None])
def __init__(self, tree, filename='(none)'):
"""Placeholder."""
self.tree = tree
self.filename = filename
self.checker = pep257.ConventionChecker()
self.load_source()
@classmethod
def parse_options(cls, options):
"""Pass options through to this plugin."""
cls.ignore_decorators = options.ignore_decorators
cls.exclude_from_doctest = options.exclude_from_doctest
if not isinstance(cls.exclude_from_doctest, list):
cls.exclude_from_doctest = [cls.exclude_from_doctest]
@classmethod
def add_options(cls, parser):
"""Add additional CLI options."""
parser.add_option('--ignore_decorators', action='store_true')
def _check_source(self):
try:
for err in self.checker.check_source(
self.source,
self.filename,
ignore_decorators=self.ignore_decorators,
):
yield err
except pep257.AllError as err:
yield AllError(err)
except EnvironmentError as err:
yield EnvironError(err)
def run(self):
"""Use directly check() api from pydocstyle."""
if self.exclude_from_doctest:
for pattern in self.exclude_from_doctest:
if fnmatch.fnmatch(self.filename, pattern):
return
checked_codes = pep257.conventions.pep257 | {'D998', 'D999'}
for error in self._check_source():
if isinstance(error, pep257.Error) and error.code in checked_codes:
# NOTE(sigmavirus24): Fixes GitLab#3
message = '%s %s' % (error.code, error.short_desc)
yield (error.line, 0, message, type(self))
def load_source(self):
"""Load the source for the specified file."""
if self.filename in self.STDIN_NAMES:
self.filename = 'stdin'
self.source = pycodestyle.stdin_get_value()
else:
with pep257.tokenize_open(self.filename) as fd:
self.source = fd.read()
| en | 0.760911 | # -*- coding: utf-8 -*- Implementation of pydocstyle integration with Flake8. pydocstyle docstrings convention needs error code and class parser for be included as module into flake8 Return 0 as line number for EnvironmentError. pep257.AllError does not contain line number. Return 0 instead. Flake8 needs a class to check python file. Placeholder. Pass options through to this plugin. Add additional CLI options. Use directly check() api from pydocstyle. # NOTE(sigmavirus24): Fixes GitLab#3 Load the source for the specified file. | 2.321884 | 2 |
setup.py | tonybaloney/fastapi-icontract | 21 | 6616518 | <filename>setup.py
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
import sys
from setuptools import setup, find_packages
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, "README.rst"), encoding="utf-8") as fid:
long_description = fid.read() # pylint: disable=invalid-name
with open(os.path.join(here, "requirements.txt"), encoding="utf-8") as fid:
install_requires = [line for line in fid.read().splitlines() if line.strip()]
setup(
name="fastapi-icontract",
version="0.0.2",
description="Specify contracts for FastAPI endpoints.",
long_description=long_description,
url="https://github.com/mristin/fastapi-icontract",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
license="License :: OSI Approved :: MIT License",
keywords="design-by-contract contracts automatic testing property-based",
packages=find_packages(exclude=["tests"]),
install_requires=install_requires,
# fmt: off
extras_require={
"dev": [
"black==20.8b1",
"mypy==0.812",
"pylint==2.3.1",
"pydocstyle>=2.1.1,<3",
"coverage>=4.5.1,<5",
"docutils>=0.14,<1",
"httpx>=0.16.1,<1",
"requests>=2.25.1,<3",
"uvicorn",
"asyncstdlib>=3.9.0,<4"
],
},
# fmt: on
py_modules=["fastapi_icontract"],
package_data={"fastapi_icontract": ["py.typed"]},
data_files=[(".", ["LICENSE", "README.rst", "requirements.txt"])],
)
| <filename>setup.py
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
import sys
from setuptools import setup, find_packages
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, "README.rst"), encoding="utf-8") as fid:
long_description = fid.read() # pylint: disable=invalid-name
with open(os.path.join(here, "requirements.txt"), encoding="utf-8") as fid:
install_requires = [line for line in fid.read().splitlines() if line.strip()]
setup(
name="fastapi-icontract",
version="0.0.2",
description="Specify contracts for FastAPI endpoints.",
long_description=long_description,
url="https://github.com/mristin/fastapi-icontract",
author="<NAME>",
author_email="<EMAIL>",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
license="License :: OSI Approved :: MIT License",
keywords="design-by-contract contracts automatic testing property-based",
packages=find_packages(exclude=["tests"]),
install_requires=install_requires,
# fmt: off
extras_require={
"dev": [
"black==20.8b1",
"mypy==0.812",
"pylint==2.3.1",
"pydocstyle>=2.1.1,<3",
"coverage>=4.5.1,<5",
"docutils>=0.14,<1",
"httpx>=0.16.1,<1",
"requests>=2.25.1,<3",
"uvicorn",
"asyncstdlib>=3.9.0,<4"
],
},
# fmt: on
py_modules=["fastapi_icontract"],
package_data={"fastapi_icontract": ["py.typed"]},
data_files=[(".", ["LICENSE", "README.rst", "requirements.txt"])],
)
| en | 0.684699 | A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject # pylint: disable=redefined-builtin # pylint: disable=invalid-name # pylint: disable=invalid-name # fmt: off # fmt: on | 1.829335 | 2 |
alembic/versions/0c3924deb2b7_create_category_table.py | modist-io/modist-api | 1 | 6616519 | """Create category table.
Revision ID: <KEY>
Revises: <PASSWORD>d<PASSWORD>
Create Date: 2020-04-15 17:01:38.788610
"""
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from modist.models.common import CategoryType
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "f68e6dafb977"
branch_labels = None
depends_on = None
def upgrade():
"""Pushes changes into the database."""
op.create_table(
"category",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("is_active", sa.Boolean(), server_default="true", nullable=False),
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("parent_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("type", sa.Enum(CategoryType), nullable=False),
sa.Column("name", sa.String(length=64), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("depth", sa.Integer(), server_default="0", nullable=False),
sa.Column(
"lineage",
postgresql.ARRAY(postgresql.UUID(as_uuid=True)),
server_default="{}",
nullable=False,
),
sa.ForeignKeyConstraint(["parent_id"], ["category.id"], ondelete="cascade"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("parent_id", "name", "type"),
)
op.create_refresh_updated_at_trigger("category")
op.create_refresh_depth_and_lineage_trigger("category")
def downgrade():
"""Reverts changes performed by upgrade()."""
op.drop_refresh_depth_and_lineage_trigger("category")
op.drop_refresh_updated_at_trigger("category")
op.drop_table("category")
sa.Enum(CategoryType).drop(bind=op.get_bind())
| """Create category table.
Revision ID: <KEY>
Revises: <PASSWORD>d<PASSWORD>
Create Date: 2020-04-15 17:01:38.788610
"""
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from modist.models.common import CategoryType
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "f68e6dafb977"
branch_labels = None
depends_on = None
def upgrade():
"""Pushes changes into the database."""
op.create_table(
"category",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("is_active", sa.Boolean(), server_default="true", nullable=False),
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("parent_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("type", sa.Enum(CategoryType), nullable=False),
sa.Column("name", sa.String(length=64), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("depth", sa.Integer(), server_default="0", nullable=False),
sa.Column(
"lineage",
postgresql.ARRAY(postgresql.UUID(as_uuid=True)),
server_default="{}",
nullable=False,
),
sa.ForeignKeyConstraint(["parent_id"], ["category.id"], ondelete="cascade"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("parent_id", "name", "type"),
)
op.create_refresh_updated_at_trigger("category")
op.create_refresh_depth_and_lineage_trigger("category")
def downgrade():
"""Reverts changes performed by upgrade()."""
op.drop_refresh_depth_and_lineage_trigger("category")
op.drop_refresh_updated_at_trigger("category")
op.drop_table("category")
sa.Enum(CategoryType).drop(bind=op.get_bind())
| en | 0.664474 | Create category table. Revision ID: <KEY> Revises: <PASSWORD>d<PASSWORD> Create Date: 2020-04-15 17:01:38.788610 # revision identifiers, used by Alembic. Pushes changes into the database. Reverts changes performed by upgrade(). | 1.932691 | 2 |
src/pyrefox/__init__.py | nuno-andre/pyrefox | 0 | 6616520 | <filename>src/pyrefox/__init__.py
from .main import Pyrefox
__version__ = 0, 0, 1, 'dev3'
__all__ = ['Pyrefox']
| <filename>src/pyrefox/__init__.py
from .main import Pyrefox
__version__ = 0, 0, 1, 'dev3'
__all__ = ['Pyrefox']
| none | 1 | 1.147801 | 1 | |
src/lib/datasets/build.py | nerminsamet/HoughNet-VID | 5 | 6616521 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import copy
import logging
from . import concat_dataset as D
from src.lib.datasets.dataset_factory import get_dataset
from src.lib.datasets.dataset.vid import VIDDataset
import os
DATA_LIST_TRAIN = ('DET_train_30classes', 'VID_train_15frames')
DATA_LIST_VAL = ('VID_val_videos',)
DATASETS = {
"DET_train_30classes": {
"img_dir": "ILSVRC2015/Data/DET",
"anno_path": "ILSVRC2015/Annotations/DET",
"img_index": "ILSVRC2015/ImageSets/DET_train_30classes.txt"
},
"VID_train_15frames": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_train_15frames.txt"
},
"VID_train_every10frames": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_train_every10frames.txt"
},
"VID_val_frames": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_val_frames.txt"
},
"VID_val_videos": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_val_videos.txt"
}
}
def get_data(name, data_dir, method="base"):
dataset_dict = {
"base": "VIDDataset",
"mega": "VIDMEGADataset",
}
if ("DET" in name) or ("VID" in name):
# data_dir = DATA_DIR
attrs = DATASETS[name]
args = dict(
image_set=name,
data_dir=data_dir,
img_dir=os.path.join(data_dir, attrs["img_dir"]),
anno_path=os.path.join(data_dir, attrs["anno_path"]),
img_index=os.path.join(data_dir, attrs["img_index"])
)
return dict(
factory=dataset_dict[method],
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
def build_dataset(dt, opt, is_train=True, method="base"):
if is_train:
dataset_list = DATA_LIST_TRAIN
else:
dataset_list = DATA_LIST_VAL
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list)
)
datasets = []
for dataset_name in dataset_list:
data = get_data(dataset_name, opt.data_dir, method)
# factory = getattr(D, data["factory"])
args = data["args"]
if "VID" in data["factory"]:
args["is_train"] = is_train
# dataset = VIDDataset(**args)
dataset = dt(**args, opt=opt)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.ConcatDataset(datasets)
return dataset
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import copy
import logging
from . import concat_dataset as D
from src.lib.datasets.dataset_factory import get_dataset
from src.lib.datasets.dataset.vid import VIDDataset
import os
DATA_LIST_TRAIN = ('DET_train_30classes', 'VID_train_15frames')
DATA_LIST_VAL = ('VID_val_videos',)
DATASETS = {
"DET_train_30classes": {
"img_dir": "ILSVRC2015/Data/DET",
"anno_path": "ILSVRC2015/Annotations/DET",
"img_index": "ILSVRC2015/ImageSets/DET_train_30classes.txt"
},
"VID_train_15frames": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_train_15frames.txt"
},
"VID_train_every10frames": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_train_every10frames.txt"
},
"VID_val_frames": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_val_frames.txt"
},
"VID_val_videos": {
"img_dir": "ILSVRC2015/Data/VID",
"anno_path": "ILSVRC2015/Annotations/VID",
"img_index": "ILSVRC2015/ImageSets/VID_val_videos.txt"
}
}
def get_data(name, data_dir, method="base"):
dataset_dict = {
"base": "VIDDataset",
"mega": "VIDMEGADataset",
}
if ("DET" in name) or ("VID" in name):
# data_dir = DATA_DIR
attrs = DATASETS[name]
args = dict(
image_set=name,
data_dir=data_dir,
img_dir=os.path.join(data_dir, attrs["img_dir"]),
anno_path=os.path.join(data_dir, attrs["anno_path"]),
img_index=os.path.join(data_dir, attrs["img_index"])
)
return dict(
factory=dataset_dict[method],
args=args,
)
raise RuntimeError("Dataset not available: {}".format(name))
def build_dataset(dt, opt, is_train=True, method="base"):
if is_train:
dataset_list = DATA_LIST_TRAIN
else:
dataset_list = DATA_LIST_VAL
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list)
)
datasets = []
for dataset_name in dataset_list:
data = get_data(dataset_name, opt.data_dir, method)
# factory = getattr(D, data["factory"])
args = data["args"]
if "VID" in data["factory"]:
args["is_train"] = is_train
# dataset = VIDDataset(**args)
dataset = dt(**args, opt=opt)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.ConcatDataset(datasets)
return dataset
| en | 0.727122 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # data_dir = DATA_DIR # factory = getattr(D, data["factory"]) # dataset = VIDDataset(**args) # for testing, return a list of datasets # for training, concatenate all datasets into a single one | 2.064002 | 2 |
naiveBayes/naive_bayes.py | alexstaley/machine-learning | 1 | 6616522 | """
<NAME> -- Student ID: 919519311
Assignment 3 -- February 2020
This program utilizes a naive Bayes classifier
with a Gaussian distribution to classify a data set
conforming to the format of the UCI data sets.
The user should run the program from the command line,
with the file path of the training set as the first argument
and the file path of the validation set as the second, e.g.
python naive_bayes.py /file/path/to/training_set.txt /file/path/to/validation_set.txt
Functions called here are implemented in the Experiment.py file.
"""
import sys
from naiveBayes.Experiment import *
trainingFeatures, trainingLabels = readFile(sys.argv[1], training=True)
validationFeatures, validationLabels = readFile(sys.argv[2], training=False)
print("\n")
mean, stdDev = learnData(trainingFeatures, trainingLabels)
print("\n\n\n")
classificationAccuracy = testBayes(validationFeatures, validationLabels, mean, stdDev)
print("\nClassification accuracy=%6.4f" % classificationAccuracy)
| """
<NAME> -- Student ID: 919519311
Assignment 3 -- February 2020
This program utilizes a naive Bayes classifier
with a Gaussian distribution to classify a data set
conforming to the format of the UCI data sets.
The user should run the program from the command line,
with the file path of the training set as the first argument
and the file path of the validation set as the second, e.g.
python naive_bayes.py /file/path/to/training_set.txt /file/path/to/validation_set.txt
Functions called here are implemented in the Experiment.py file.
"""
import sys
from naiveBayes.Experiment import *
trainingFeatures, trainingLabels = readFile(sys.argv[1], training=True)
validationFeatures, validationLabels = readFile(sys.argv[2], training=False)
print("\n")
mean, stdDev = learnData(trainingFeatures, trainingLabels)
print("\n\n\n")
classificationAccuracy = testBayes(validationFeatures, validationLabels, mean, stdDev)
print("\nClassification accuracy=%6.4f" % classificationAccuracy)
| en | 0.85099 | <NAME> -- Student ID: 919519311 Assignment 3 -- February 2020 This program utilizes a naive Bayes classifier with a Gaussian distribution to classify a data set conforming to the format of the UCI data sets. The user should run the program from the command line, with the file path of the training set as the first argument and the file path of the validation set as the second, e.g. python naive_bayes.py /file/path/to/training_set.txt /file/path/to/validation_set.txt Functions called here are implemented in the Experiment.py file. | 3.74664 | 4 |
tutorials/segmentation_synthetic_unet.py | civodlu/trw | 3 | 6616523 | import trw
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.unet = trw.layers.UNetBase(2, input_channels=3, output_channels=2, channels=[4, 8, 16])
def forward(self, batch):
x = self.unet(batch['image'])
return {
'segmentation': trw.train.OutputSegmentation(output=x, output_truth=batch['mask']),
'segmentation_output': trw.train.OutputEmbedding(x.argmax(dim=1, keepdim=True))
}
def per_epoch_callbacks():
return [
trw.callbacks.CallbackReportingExportSamples(max_samples=5),
trw.callbacks.CallbackEpochSummary(),
]
trainer = trw.train.TrainerV2(callbacks_per_epoch=per_epoch_callbacks())
results = trainer.fit(
trw.train.Options(num_epochs=15),
datasets=trw.datasets.create_fake_symbols_2d_dataset(
nb_samples=1000, image_shape=[256, 256], nb_classes_at_once=1, batch_size=50),
log_path='synthetic_segmentation_unet',
model=Net(),
optimizers_fn=lambda datasets, model: trw.train.create_sgd_optimizers_scheduler_step_lr_fn(
datasets=datasets, model=model, learning_rate=0.05, step_size=50, gamma=0.3))
| import trw
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.unet = trw.layers.UNetBase(2, input_channels=3, output_channels=2, channels=[4, 8, 16])
def forward(self, batch):
x = self.unet(batch['image'])
return {
'segmentation': trw.train.OutputSegmentation(output=x, output_truth=batch['mask']),
'segmentation_output': trw.train.OutputEmbedding(x.argmax(dim=1, keepdim=True))
}
def per_epoch_callbacks():
return [
trw.callbacks.CallbackReportingExportSamples(max_samples=5),
trw.callbacks.CallbackEpochSummary(),
]
trainer = trw.train.TrainerV2(callbacks_per_epoch=per_epoch_callbacks())
results = trainer.fit(
trw.train.Options(num_epochs=15),
datasets=trw.datasets.create_fake_symbols_2d_dataset(
nb_samples=1000, image_shape=[256, 256], nb_classes_at_once=1, batch_size=50),
log_path='synthetic_segmentation_unet',
model=Net(),
optimizers_fn=lambda datasets, model: trw.train.create_sgd_optimizers_scheduler_step_lr_fn(
datasets=datasets, model=model, learning_rate=0.05, step_size=50, gamma=0.3))
| none | 1 | 2.336857 | 2 | |
firestore/errors/__init__.py | Paul-Kim/firestore | 7 | 6616524 | __all__ = ("InvalidFieldError", "ValidationError")
class DuplicateError(Exception):
pass
class InvalidDocumentError(Exception):
pass
class InvalidFieldError(Exception):
pass
class NotFoundError(Exception):
pass
class OfflineDocumentError(Exception):
pass
class PKError(Exception):
pass
class UnknownFieldError(Exception):
pass
class ValidationError(Exception):
pass
| __all__ = ("InvalidFieldError", "ValidationError")
class DuplicateError(Exception):
pass
class InvalidDocumentError(Exception):
pass
class InvalidFieldError(Exception):
pass
class NotFoundError(Exception):
pass
class OfflineDocumentError(Exception):
pass
class PKError(Exception):
pass
class UnknownFieldError(Exception):
pass
class ValidationError(Exception):
pass
| none | 1 | 2.172837 | 2 | |
ts/torch_handler/video_handler.py | hahaxun/serve | 0 | 6616525 | <reponame>hahaxun/serve
# pylint: disable=W0223
# Details : https://github.com/PyCQA/pylint/issues/3098
"""
Base module for all video url handlers
"""
from abc import ABC
import io
import torch
import numpy
import concurrent.futures
import math
import torchvision.io as io
import torchvision
from PIL import Image
from .base_handler import BaseHandler
def read_video(args):
"""
split video and read specific frame
:param args->tuple(content, nfrmae, maxframe)
:return: nth frame of videos
"""
content, nframe, maxframe = args
info = io._probe_video_from_memory(content)
frament = float(info['video_duration'])/ (maxframe + 1) * nframe
start_offset = int(math.floor(frament * (1 / info["video_timebase"])))
end_offset = start_offset + int(info["video_timebase"].denominator / info['video_duration'].denominator)
return io._read_video_from_memory(content, read_audio_stream = 0,
video_pts_range=[start_offset, end_offset])
class VideoHandler(BaseHandler, ABC):
"""
Base class for all vision handlers
"""
def preprocess(self, data):
video_streams = []
for row in data:
data = row.get("data") or row.get("body")
with concurrent.futures.ProcessPoolExecutor() as executor:
results = []
#tmp add 5 frame at here
for result in zip(executor.map(read_video, [(data, i , 5) for i in range(5)])):
results.append(result[0][0])
video_streams.append(torch.stack(results))
return torch.stack(images)
| # pylint: disable=W0223
# Details : https://github.com/PyCQA/pylint/issues/3098
"""
Base module for all video url handlers
"""
from abc import ABC
import io
import torch
import numpy
import concurrent.futures
import math
import torchvision.io as io
import torchvision
from PIL import Image
from .base_handler import BaseHandler
def read_video(args):
"""
split video and read specific frame
:param args->tuple(content, nfrmae, maxframe)
:return: nth frame of videos
"""
content, nframe, maxframe = args
info = io._probe_video_from_memory(content)
frament = float(info['video_duration'])/ (maxframe + 1) * nframe
start_offset = int(math.floor(frament * (1 / info["video_timebase"])))
end_offset = start_offset + int(info["video_timebase"].denominator / info['video_duration'].denominator)
return io._read_video_from_memory(content, read_audio_stream = 0,
video_pts_range=[start_offset, end_offset])
class VideoHandler(BaseHandler, ABC):
"""
Base class for all vision handlers
"""
def preprocess(self, data):
video_streams = []
for row in data:
data = row.get("data") or row.get("body")
with concurrent.futures.ProcessPoolExecutor() as executor:
results = []
#tmp add 5 frame at here
for result in zip(executor.map(read_video, [(data, i , 5) for i in range(5)])):
results.append(result[0][0])
video_streams.append(torch.stack(results))
return torch.stack(images) | en | 0.559682 | # pylint: disable=W0223 # Details : https://github.com/PyCQA/pylint/issues/3098 Base module for all video url handlers split video and read specific frame :param args->tuple(content, nfrmae, maxframe) :return: nth frame of videos Base class for all vision handlers #tmp add 5 frame at here | 2.493271 | 2 |
config.py | loacker/my_qtile_config | 0 | 6616526 | <gh_stars>0
# Copyleft (c) 2016 <NAME> [l0aCk3r]
from libqtile.config import Key, Screen, Group, Drag, Click, hook, Match
from libqtile.command import lazy
from libqtile import layout, bar, widget
import os
import subprocess
mod = "mod4"
alt = "mod1"
printkey = "Print"
shift = "shift"
control = "control"
keys = [
# Switch between windows in current stack pane
Key([mod], "k", lazy.layout.down()),
Key([mod], "j", lazy.layout.up()),
Key([mod], "h", lazy.layout.previous()),
Key([mod], "l", lazy.layout.next()),
# MonadTall layout
Key([mod, shift], "k", lazy.layout.shuffle_up()),
Key([mod, shift], "j", lazy.layout.shuffle_down()),
Key([mod, shift], "h", lazy.layout.swap_left()),
Key([mod, shift], "l", lazy.layout.swap_right()),
Key([mod], "i", lazy.layout.grow()),
Key([mod], "o", lazy.layout.shrink()),
Key([mod, shift], "space", lazy.layout.flip()),
# Stack layout
Key([mod, shift], "j", lazy.layout.client_to_next()),
Key([mod, shift], "k", lazy.layout.client_to_previous()),
Key([mod], "a", lazy.layout.add()),
Key([mod], "d", lazy.layout.delete()),
# Vertical and MonadTall layout
Key([mod], 'm', lazy.layout.maximize()),
Key([mod], 'n', lazy.layout.normalize()),
# Toggle floating
Key([mod], "t", lazy.window.toggle_floating()),
# Toggle full screen
Key([mod], "f", lazy.window.toggle_fullscreen()),
# This should be usefull when floating windows get buried
# Select floating window
Key([alt], "Tab", lazy.group.next_window()),
# Bring to front the buried window
Key([alt], "grave", lazy.window.bring_to_front()),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key([mod], "space", lazy.layout.toggle_split()),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout()),
# Kill a window
Key([mod], "w", lazy.window.kill()),
# Restart or shutdown qtile
Key([mod, control], "r", lazy.restart()),
Key([mod, control], "q", lazy.shutdown()),
# Run command
Key([mod], "r", lazy.spawncmd()),
# Dual monitor
Key([mod, alt], "Tab", lazy.window.to_next_screen()), # Don't work
Key([mod, alt], "1", lazy.to_screen(0), lazy.group.toscreen(0)),
Key([mod, alt], "2", lazy.to_screen(1), lazy.group.toscreen(1)),
# Spin up applications
Key([mod], "Return", lazy.spawn("urxvt")),
Key([alt], "c", lazy.spawn("urxvt -e weechat-curses")),
Key([alt], "d", lazy.spawn("firefox")),
Key([alt], "g", lazy.spawn("chromium-browser -disable-prompt-on-repost")),
Key([alt], "s", lazy.spawn("skype")),
Key([alt], "t", lazy.spawn("telegram")),
Key([alt], "m", lazy.spawn("thunderbird")),
Key([alt], "p", lazy.spawn("pavucontrol")),
Key([alt], printkey, lazy.spawn("scrot -sb '%d-%m-%Y_%H-%M-%S_$wx$h_scrot_selection.png' -e 'mv $f ~/pictures/screenshots'")),
Key([mod], printkey, lazy.spawn("scrot -ub '%d-%m-%Y_%H-%M-%S_$wx$h_scrot_window.png' -e 'mv $f ~/pictures/screenshots'")),
]
# Use "xprop WM_CLASS" command to retrieve the wm_class attribute of a window
groups = [
Group("1", matches=[
Match(wm_class=["URxvt"])]),
Group("2", matches=[
Match(wm_class=["Firefox"]),
Match(wm_class=["chromium-browser-chromium"])]),
Group("3", matches=[
Match(wm_class=["Skype"]),
Match(wm_class=["telegram"])]),
Group("4", matches=[
Match(wm_class=["Thunderbird"]),
Match(wm_class=["Pavucontrol"])]),
Group("5"),
Group("6"),
Group("7"),
Group("8"),
Group("9"),
Group("0", matches=[
Match(wm_class=["rdesktop"])]),
]
for i in groups:
# mod1 + number of group = switch to group
keys.append(Key([mod], i.name, lazy.group[i.name].toscreen()))
# mod1 + shift + number of group = switch to & move focused window to group
keys.append(Key([mod, shift], i.name, lazy.window.togroup(i.name)))
layouts = [
layout.Max(),
layout.VerticalTile(),
layout.Stack(num_stacks=2),
layout.MonadTall(),
]
widget_defaults = dict(
font='Andale',
fontsize=12,
padding=3,
)
screens = [
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.Sep(),
widget.TaskList(),
widget.Prompt(),
widget.Systray(),
widget.Sep(),
widget.Mpd(reconnect=True),
widget.Sep(),
widget.Volume(),
widget.Sep(),
widget.Backlight(backlight_name='intel_backlight'),
widget.Sep(),
widget.BatteryIcon(),
widget.Sep(),
widget.Clock(format='%d/%m/%Y %a %I:%M %p'),
],
25,
),
bottom=bar.Bar(
[
widget.CurrentScreen(),
widget.Sep(),
widget.CurrentLayout(),
widget.Spacer(),
widget.Sep(),
widget.DF(partition='/', measure='M'),
widget.DF(partition='/usr'),
widget.DF(partition='/var'),
widget.DF(partition='/opt'),
widget.DF(partition='/home', visible_on_warn=False),
widget.DF(partition='/home/shared'),
widget.Sep(),
widget.TextBox('eth0:'),
widget.Net(interface='eth0'),
widget.Sep(),
widget.TextBox('wlan0:'),
widget.Net(interface='wlan0'),
widget.Sep(),
widget.TextBox('sda:'),
widget.HDDBusyGraph(device='sda'),
widget.TextBox('sdb:'),
widget.HDDBusyGraph(device='sdb'),
widget.Sep(),
widget.MemoryGraph(graph_color='FF0101'),
widget.Sep(),
widget.ThermalSensor(),
widget.CPUGraph(),
],
25,
),
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.Sep(),
widget.TaskList(),
widget.Prompt(),
widget.Systray(),
widget.Sep(),
widget.Mpd(reconnect=True),
widget.Sep(),
widget.Volume(),
widget.Sep(),
widget.Backlight(backlight_name='intel_backlight'),
widget.Sep(),
widget.BatteryIcon(),
widget.Sep(),
widget.Clock(format='%d/%m/%Y %a %I:%M %p'),
],
25,
),
bottom=bar.Bar(
[
widget.CurrentScreen(),
widget.Sep(),
widget.CurrentLayout(),
widget.Spacer(),
widget.Sep(),
widget.DF(partition='/', measure='M'),
widget.DF(partition='/usr'),
widget.DF(partition='/var'),
widget.DF(partition='/opt'),
widget.DF(partition='/home', visible_on_warn=False),
widget.DF(partition='/home/shared'),
widget.Sep(),
widget.TextBox('eth0:'),
widget.Net(interface='eth0'),
widget.Sep(),
widget.TextBox('wlan0:'),
widget.Net(interface='wlan0'),
widget.Sep(),
widget.TextBox('sda:'),
widget.HDDBusyGraph(device='sda'),
widget.TextBox('sdb:'),
widget.HDDBusyGraph(device='sdb'),
widget.Sep(),
widget.MemoryGraph(graph_color='FF0101'),
widget.Sep(),
widget.ThermalSensor(),
widget.CPUGraph(),
],
25,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
@hook.subscribe.client_new
def floating(window):
floating_types = ['notification', 'toolbar', 'splash', 'dialog']
transient = window.window.get_wm_transient_for()
if window.window.get_wm_type() in floating_types or transient:
window.floating = True
@hook.subscribe.screen_change
def restart_on_randr(qtile, ev):
qtile.cmd_restart()
# Run startup script
@hook.subscribe.startup_once
def autostart():
home = os.path.expanduser('~')
subprocess.call([home + '/.config/qtile/autostart.sh'])
dgroups_key_binder = None
dgroups_app_rules = []
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating()
auto_fullscreen = True
wmname = "LG3D"
main = None
#def main(qtile):
# qtile.cmd_debug()
# vim: set ts=8 sw=4 sts=4 ff=unix ft=python et ai :
| # Copyleft (c) 2016 <NAME> [l0aCk3r]
from libqtile.config import Key, Screen, Group, Drag, Click, hook, Match
from libqtile.command import lazy
from libqtile import layout, bar, widget
import os
import subprocess
mod = "mod4"
alt = "mod1"
printkey = "Print"
shift = "shift"
control = "control"
keys = [
# Switch between windows in current stack pane
Key([mod], "k", lazy.layout.down()),
Key([mod], "j", lazy.layout.up()),
Key([mod], "h", lazy.layout.previous()),
Key([mod], "l", lazy.layout.next()),
# MonadTall layout
Key([mod, shift], "k", lazy.layout.shuffle_up()),
Key([mod, shift], "j", lazy.layout.shuffle_down()),
Key([mod, shift], "h", lazy.layout.swap_left()),
Key([mod, shift], "l", lazy.layout.swap_right()),
Key([mod], "i", lazy.layout.grow()),
Key([mod], "o", lazy.layout.shrink()),
Key([mod, shift], "space", lazy.layout.flip()),
# Stack layout
Key([mod, shift], "j", lazy.layout.client_to_next()),
Key([mod, shift], "k", lazy.layout.client_to_previous()),
Key([mod], "a", lazy.layout.add()),
Key([mod], "d", lazy.layout.delete()),
# Vertical and MonadTall layout
Key([mod], 'm', lazy.layout.maximize()),
Key([mod], 'n', lazy.layout.normalize()),
# Toggle floating
Key([mod], "t", lazy.window.toggle_floating()),
# Toggle full screen
Key([mod], "f", lazy.window.toggle_fullscreen()),
# This should be usefull when floating windows get buried
# Select floating window
Key([alt], "Tab", lazy.group.next_window()),
# Bring to front the buried window
Key([alt], "grave", lazy.window.bring_to_front()),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key([mod], "space", lazy.layout.toggle_split()),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout()),
# Kill a window
Key([mod], "w", lazy.window.kill()),
# Restart or shutdown qtile
Key([mod, control], "r", lazy.restart()),
Key([mod, control], "q", lazy.shutdown()),
# Run command
Key([mod], "r", lazy.spawncmd()),
# Dual monitor
Key([mod, alt], "Tab", lazy.window.to_next_screen()), # Don't work
Key([mod, alt], "1", lazy.to_screen(0), lazy.group.toscreen(0)),
Key([mod, alt], "2", lazy.to_screen(1), lazy.group.toscreen(1)),
# Spin up applications
Key([mod], "Return", lazy.spawn("urxvt")),
Key([alt], "c", lazy.spawn("urxvt -e weechat-curses")),
Key([alt], "d", lazy.spawn("firefox")),
Key([alt], "g", lazy.spawn("chromium-browser -disable-prompt-on-repost")),
Key([alt], "s", lazy.spawn("skype")),
Key([alt], "t", lazy.spawn("telegram")),
Key([alt], "m", lazy.spawn("thunderbird")),
Key([alt], "p", lazy.spawn("pavucontrol")),
Key([alt], printkey, lazy.spawn("scrot -sb '%d-%m-%Y_%H-%M-%S_$wx$h_scrot_selection.png' -e 'mv $f ~/pictures/screenshots'")),
Key([mod], printkey, lazy.spawn("scrot -ub '%d-%m-%Y_%H-%M-%S_$wx$h_scrot_window.png' -e 'mv $f ~/pictures/screenshots'")),
]
# Use "xprop WM_CLASS" command to retrieve the wm_class attribute of a window
groups = [
Group("1", matches=[
Match(wm_class=["URxvt"])]),
Group("2", matches=[
Match(wm_class=["Firefox"]),
Match(wm_class=["chromium-browser-chromium"])]),
Group("3", matches=[
Match(wm_class=["Skype"]),
Match(wm_class=["telegram"])]),
Group("4", matches=[
Match(wm_class=["Thunderbird"]),
Match(wm_class=["Pavucontrol"])]),
Group("5"),
Group("6"),
Group("7"),
Group("8"),
Group("9"),
Group("0", matches=[
Match(wm_class=["rdesktop"])]),
]
for i in groups:
# mod1 + number of group = switch to group
keys.append(Key([mod], i.name, lazy.group[i.name].toscreen()))
# mod1 + shift + number of group = switch to & move focused window to group
keys.append(Key([mod, shift], i.name, lazy.window.togroup(i.name)))
layouts = [
layout.Max(),
layout.VerticalTile(),
layout.Stack(num_stacks=2),
layout.MonadTall(),
]
widget_defaults = dict(
font='Andale',
fontsize=12,
padding=3,
)
screens = [
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.Sep(),
widget.TaskList(),
widget.Prompt(),
widget.Systray(),
widget.Sep(),
widget.Mpd(reconnect=True),
widget.Sep(),
widget.Volume(),
widget.Sep(),
widget.Backlight(backlight_name='intel_backlight'),
widget.Sep(),
widget.BatteryIcon(),
widget.Sep(),
widget.Clock(format='%d/%m/%Y %a %I:%M %p'),
],
25,
),
bottom=bar.Bar(
[
widget.CurrentScreen(),
widget.Sep(),
widget.CurrentLayout(),
widget.Spacer(),
widget.Sep(),
widget.DF(partition='/', measure='M'),
widget.DF(partition='/usr'),
widget.DF(partition='/var'),
widget.DF(partition='/opt'),
widget.DF(partition='/home', visible_on_warn=False),
widget.DF(partition='/home/shared'),
widget.Sep(),
widget.TextBox('eth0:'),
widget.Net(interface='eth0'),
widget.Sep(),
widget.TextBox('wlan0:'),
widget.Net(interface='wlan0'),
widget.Sep(),
widget.TextBox('sda:'),
widget.HDDBusyGraph(device='sda'),
widget.TextBox('sdb:'),
widget.HDDBusyGraph(device='sdb'),
widget.Sep(),
widget.MemoryGraph(graph_color='FF0101'),
widget.Sep(),
widget.ThermalSensor(),
widget.CPUGraph(),
],
25,
),
),
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.Sep(),
widget.TaskList(),
widget.Prompt(),
widget.Systray(),
widget.Sep(),
widget.Mpd(reconnect=True),
widget.Sep(),
widget.Volume(),
widget.Sep(),
widget.Backlight(backlight_name='intel_backlight'),
widget.Sep(),
widget.BatteryIcon(),
widget.Sep(),
widget.Clock(format='%d/%m/%Y %a %I:%M %p'),
],
25,
),
bottom=bar.Bar(
[
widget.CurrentScreen(),
widget.Sep(),
widget.CurrentLayout(),
widget.Spacer(),
widget.Sep(),
widget.DF(partition='/', measure='M'),
widget.DF(partition='/usr'),
widget.DF(partition='/var'),
widget.DF(partition='/opt'),
widget.DF(partition='/home', visible_on_warn=False),
widget.DF(partition='/home/shared'),
widget.Sep(),
widget.TextBox('eth0:'),
widget.Net(interface='eth0'),
widget.Sep(),
widget.TextBox('wlan0:'),
widget.Net(interface='wlan0'),
widget.Sep(),
widget.TextBox('sda:'),
widget.HDDBusyGraph(device='sda'),
widget.TextBox('sdb:'),
widget.HDDBusyGraph(device='sdb'),
widget.Sep(),
widget.MemoryGraph(graph_color='FF0101'),
widget.Sep(),
widget.ThermalSensor(),
widget.CPUGraph(),
],
25,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
@hook.subscribe.client_new
def floating(window):
floating_types = ['notification', 'toolbar', 'splash', 'dialog']
transient = window.window.get_wm_transient_for()
if window.window.get_wm_type() in floating_types or transient:
window.floating = True
@hook.subscribe.screen_change
def restart_on_randr(qtile, ev):
qtile.cmd_restart()
# Run startup script
@hook.subscribe.startup_once
def autostart():
home = os.path.expanduser('~')
subprocess.call([home + '/.config/qtile/autostart.sh'])
dgroups_key_binder = None
dgroups_app_rules = []
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating()
auto_fullscreen = True
wmname = "LG3D"
main = None
#def main(qtile):
# qtile.cmd_debug()
# vim: set ts=8 sw=4 sts=4 ff=unix ft=python et ai : | en | 0.759693 | # Copyleft (c) 2016 <NAME> [l0aCk3r] # Switch between windows in current stack pane # MonadTall layout # Stack layout # Vertical and MonadTall layout # Toggle floating # Toggle full screen # This should be usefull when floating windows get buried # Select floating window # Bring to front the buried window # Toggle between split and unsplit sides of stack. # Split = all windows displayed # Unsplit = 1 window displayed, like Max layout, but still with # multiple stack panes # Toggle between different layouts as defined below # Kill a window # Restart or shutdown qtile # Run command # Dual monitor # Don't work # Spin up applications # Use "xprop WM_CLASS" command to retrieve the wm_class attribute of a window # mod1 + number of group = switch to group # mod1 + shift + number of group = switch to & move focused window to group # Drag floating layouts. # Run startup script #def main(qtile): # qtile.cmd_debug() # vim: set ts=8 sw=4 sts=4 ff=unix ft=python et ai : | 2.259311 | 2 |
src/jupyter_cell_extractor/__init__.py | jaimebw/jupyter_cell_extractor | 0 | 6616527 | <gh_stars>0
import os
from pathlib import Path
from .main import cell_extractor
this_dir = Path(__file__)
TEMPLATE_TPLX = this_dir.parents[0]/"data"/"classic.tplx"
TREEGUIDE = this_dir.parents[0]/"data"/"treeguide.txt"
os.environ["TEMPLATE_TPLX"] = str(TEMPLATE_TPLX)
os.environ["TREEGUIDE"] = str(TREEGUIDE) | import os
from pathlib import Path
from .main import cell_extractor
this_dir = Path(__file__)
TEMPLATE_TPLX = this_dir.parents[0]/"data"/"classic.tplx"
TREEGUIDE = this_dir.parents[0]/"data"/"treeguide.txt"
os.environ["TEMPLATE_TPLX"] = str(TEMPLATE_TPLX)
os.environ["TREEGUIDE"] = str(TREEGUIDE) | none | 1 | 2.20947 | 2 | |
day12.py | t-ah/adventofcode-2019 | 0 | 6616528 | moons = []
with open("input12") as f:
for line in f.readlines():
moons.append([int(x) for x in line.strip()[1:-1].replace("x", "").replace("y","").replace("z","").replace("=","").split(", ")] + [0,0,0])
for _ in range(1000):
for moon in moons:
for i in range(3):
count = 0
for other_moon in moons:
if other_moon[i] > moon[i]:
count += 1
elif other_moon[i] < moon[i]:
count -= 1
moon[i + 3] += count
for moon in moons:
for i in range(3):
moon[i] += moon[i + 3]
result = sum([sum([abs(x) for x in moon[:3]]) * sum([abs(x) for x in moon[3:]]) for moon in moons])
print(result)
# 3088 too low | moons = []
with open("input12") as f:
for line in f.readlines():
moons.append([int(x) for x in line.strip()[1:-1].replace("x", "").replace("y","").replace("z","").replace("=","").split(", ")] + [0,0,0])
for _ in range(1000):
for moon in moons:
for i in range(3):
count = 0
for other_moon in moons:
if other_moon[i] > moon[i]:
count += 1
elif other_moon[i] < moon[i]:
count -= 1
moon[i + 3] += count
for moon in moons:
for i in range(3):
moon[i] += moon[i + 3]
result = sum([sum([abs(x) for x in moon[:3]]) * sum([abs(x) for x in moon[3:]]) for moon in moons])
print(result)
# 3088 too low | en | 0.580615 | # 3088 too low | 3.129054 | 3 |
python/test-nose-1.py | li-ma/homework | 0 | 6616529 | def TestABC():
assert 1 == 2
def testabc():
assert 1 != 2
| def TestABC():
assert 1 == 2
def testabc():
assert 1 != 2
| none | 1 | 1.977424 | 2 | |
mAP/cal_mAP/detection-result.py | Ivan12138/keras-yolo3 | 0 | 6616530 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project: keras-yolo3
@File: detection-result.py.py
@Time: 2022/3/22 16:29
@Author: wanghao
@Description: TODO
"""
import re
import os
dir_project = os.path.abspath(os.path.join(os.getcwd(), "../..")) # 获取上上级目录
dir_result = '/result/result.txt' # yolo批量处理结果的目录
dir_detection_results = '/mAP/input/detection-results' # detection-results目录
surplus = 'A05_helmet/JPEGImages/' # result.txt文件中图片名称多余的部分
if __name__ == '__main__':
with open(dir_project + dir_result, 'r') as f: # 打开文件
filename = f.readlines() # 读取文件
for i in range(len(filename)):
filename[i] = re.sub(surplus, '', filename[i]) # 去除文件名多余的部分
for i in range(len(filename)): # 中按行存放的检测内容,为列表的形式
r = filename[i].split('.jpg ')
file = open(dir_project + dir_detection_results + '/' + r[0] + '.txt', 'w')
t = r[1].split(';')
# 去除空格和换行
t.remove('\n')
if len(t) == 0: # 如果没有对象
file.write('')
else:
for k in range(len(t)):
file.write(t[k] + '\n')
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project: keras-yolo3
@File: detection-result.py.py
@Time: 2022/3/22 16:29
@Author: wanghao
@Description: TODO
"""
import re
import os
dir_project = os.path.abspath(os.path.join(os.getcwd(), "../..")) # 获取上上级目录
dir_result = '/result/result.txt' # yolo批量处理结果的目录
dir_detection_results = '/mAP/input/detection-results' # detection-results目录
surplus = 'A05_helmet/JPEGImages/' # result.txt文件中图片名称多余的部分
if __name__ == '__main__':
with open(dir_project + dir_result, 'r') as f: # 打开文件
filename = f.readlines() # 读取文件
for i in range(len(filename)):
filename[i] = re.sub(surplus, '', filename[i]) # 去除文件名多余的部分
for i in range(len(filename)): # 中按行存放的检测内容,为列表的形式
r = filename[i].split('.jpg ')
file = open(dir_project + dir_detection_results + '/' + r[0] + '.txt', 'w')
t = r[1].split(';')
# 去除空格和换行
t.remove('\n')
if len(t) == 0: # 如果没有对象
file.write('')
else:
for k in range(len(t)):
file.write(t[k] + '\n')
| zh | 0.760325 | #!/usr/bin/env python # -*- coding: UTF-8 -*- @Project: keras-yolo3 @File: detection-result.py.py @Time: 2022/3/22 16:29 @Author: wanghao @Description: TODO # 获取上上级目录 # yolo批量处理结果的目录 # detection-results目录 # result.txt文件中图片名称多余的部分 # 打开文件 # 读取文件 # 去除文件名多余的部分 # 中按行存放的检测内容,为列表的形式 # 去除空格和换行 # 如果没有对象 | 2.508627 | 3 |
losses/__init__.py | yairkit/flowstep3d | 13 | 6616531 | <gh_stars>10-100
from .supervised_losses import SupervisedL1Loss, SupervisedL2Loss, SupervisedL1RegLoss
from .unsupervised_losses import UnSupervisedL1Loss
losses_dict = {
'sv_l1': SupervisedL1Loss,
'sv_l2': SupervisedL2Loss,
'sv_l1_reg': SupervisedL1RegLoss,
'unsup_l1': UnSupervisedL1Loss
}
| from .supervised_losses import SupervisedL1Loss, SupervisedL2Loss, SupervisedL1RegLoss
from .unsupervised_losses import UnSupervisedL1Loss
losses_dict = {
'sv_l1': SupervisedL1Loss,
'sv_l2': SupervisedL2Loss,
'sv_l1_reg': SupervisedL1RegLoss,
'unsup_l1': UnSupervisedL1Loss
} | none | 1 | 1.496988 | 1 | |
tests/test_space/test_space.py | shaoeric/hyperparameter_hunter | 688 | 6616532 | ##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter import Real, Categorical, Integer
from hyperparameter_hunter.feature_engineering import EngineerStep
from hyperparameter_hunter.space.dimensions import RejectedOptional
from hyperparameter_hunter.space.space_core import Space
##################################################
# Import Miscellaneous Assets
##################################################
import pytest
from sys import maxsize
##################################################
# `Space.rvs` with `Categorical` Strings
##################################################
def test_space_rvs():
"""Test that calling `Space.rvs` returns expected values. This is specifically
aimed at ensuring `Categorical` instances containing strings produce the entire
string, rather than the first character, for example"""
space = Space([Integer(50, 100), Categorical(["glorot_normal", "orthogonal"])])
sample_0 = space.rvs(random_state=32)
sample_1 = space.rvs(n_samples=1, random_state=32)
sample_2 = space.rvs(n_samples=2, random_state=32)
sample_3 = space.rvs(n_samples=3, random_state=32)
assert sample_0 == [[73, "glorot_normal"]]
assert sample_1 == [[73, "glorot_normal"]]
assert sample_2 == [[73, "glorot_normal"], [93, "orthogonal"]]
assert sample_3 == [[73, "glorot_normal"], [93, "glorot_normal"], [55, "orthogonal"]]
##################################################
# Dimension Name Error
##################################################
def test_dimension_name_value_error():
with pytest.raises(ValueError, match="Dimension's name must be one of: string, tuple, or .*"):
Real(0.3, 0.9, name=14)
##################################################
# Dimension Contains Tests
##################################################
@pytest.mark.parametrize(
["value", "is_in"], [(1, True), (5, True), (10, True), (0, False), (11, False), ("x", False)]
)
def test_integer_contains(value, is_in):
assert (value in Integer(1, 10)) is is_in
##################################################
# Space Size Tests
##################################################
@pytest.mark.parametrize(
["space", "size"],
[
(Space([Categorical(["a", "b"]), Real(0.1, 0.7)]), maxsize),
(Space([Categorical(["a", "b"]), Integer(1, 5)]), 10),
],
)
def test_space_len(space, size):
assert len(space) == size
##################################################
# Dimension `get_params` Tests
##################################################
#################### `Real.get_params` ####################
@pytest.mark.parametrize(
["given_params", "expected_params"],
[
(
dict(low=0.1, high=0.9),
dict(low=0.1, high=0.9, prior="uniform", transform="identity", name=None),
),
(
dict(low=0.1, high=0.9, transform="normalize", name="Reginald"),
dict(low=0.1, high=0.9, prior="uniform", transform="normalize", name="Reginald"),
),
],
)
def test_real_get_params(given_params, expected_params):
assert Real(**given_params).get_params() == expected_params
#################### `Integer.get_params` ####################
@pytest.mark.parametrize(
["given_params", "expected_params"],
[
(dict(low=17, high=32), dict(low=17, high=32, transform="identity", name=None)),
(
dict(low=32, high=117, transform="normalize", name="Isabella"),
dict(low=32, high=117, transform="normalize", name="Isabella"),
),
],
)
def test_integer_get_params(given_params, expected_params):
assert Integer(**given_params).get_params() == expected_params
#################### `Categorical.get_params` ####################
def dummy_engineer_a(train_inputs, train_targets):
return train_inputs, train_targets
def dummy_engineer_b(train_inputs, non_train_inputs):
return train_inputs, non_train_inputs
def dummy_engineer_c(train_targets, non_train_targets):
return train_targets, non_train_targets
@pytest.mark.parametrize(
["given_params", "expected_params"],
[
(
dict(categories=["a", "b", "c"]),
dict(
categories=("a", "b", "c"),
prior=None,
transform="onehot",
optional=False,
name=None,
),
),
(
dict(categories=["a"], name="Cornelius"),
dict(
categories=("a",), prior=None, transform="onehot", optional=False, name="Cornelius"
),
),
(
dict(categories=[5, 10, 15], prior=[0.6, 0.2, 0.2], transform="identity"),
dict(
categories=(5, 10, 15),
prior=[0.6, 0.2, 0.2],
transform="identity",
optional=False,
name=None,
),
),
(
dict(categories=[dummy_engineer_a, dummy_engineer_b]),
dict(
categories=(dummy_engineer_a, dummy_engineer_b),
prior=None,
transform="onehot",
optional=False,
name=None,
),
),
(
dict(
categories=[
EngineerStep(dummy_engineer_a),
EngineerStep(dummy_engineer_b),
EngineerStep(dummy_engineer_c),
]
),
dict(
categories=(
EngineerStep(dummy_engineer_a),
EngineerStep(dummy_engineer_b),
EngineerStep(dummy_engineer_c),
),
prior=None,
transform="onehot",
optional=False,
name=None,
),
),
],
)
def test_categorical_get_params(given_params, expected_params):
assert Categorical(**given_params).get_params() == expected_params
@pytest.mark.parametrize(
["given_params", "unexpected_params"],
[
(
dict(categories=[EngineerStep(dummy_engineer_a), EngineerStep(dummy_engineer_b)]),
dict(
categories=(EngineerStep(dummy_engineer_a), EngineerStep(dummy_engineer_c)),
prior=None,
transform="onehot",
name=None,
),
),
(
dict(categories=[EngineerStep(dummy_engineer_a, name="some_other_name")]),
dict(
categories=(EngineerStep(dummy_engineer_a),),
prior=None,
transform="onehot",
name=None,
),
),
(
dict(categories=[EngineerStep(dummy_engineer_a, stage="intra_cv")]),
dict(
categories=(EngineerStep(dummy_engineer_a),),
prior=None,
transform="onehot",
name=None,
),
),
(
dict(categories=[EngineerStep(dummy_engineer_a), EngineerStep(dummy_engineer_b)]),
dict(
categories=(EngineerStep(dummy_engineer_b), EngineerStep(dummy_engineer_a)),
prior=None,
transform="onehot",
name=None,
),
),
],
ids=["different_f", "different_name", "different_stage", "different_order"],
)
def test_categorical_not_get_params(given_params, unexpected_params):
"""Silly sanity tests ensuring `Categorical` doesn't think two similar things are the same"""
assert Categorical(**given_params).get_params() != unexpected_params
##################################################
# `Space.get_by_name` Tests
##################################################
GBN_DEFAULT = object()
def space_gbn_0():
return Space([Real(0.1, 0.9, name="foo"), Integer(3, 15, name="bar")])
def space_gbn_1():
return Space([Real(0.1, 0.9, name=("i am", "foo")), Integer(3, 15, name=("i am", "bar"))])
@pytest.mark.parametrize(
["space", "name", "expected"],
[
(space_gbn_0(), "bar", Integer(3, 15, name="bar")),
(space_gbn_1(), ("i am", "bar"), Integer(3, 15, name=("i am", "bar"))),
],
)
def test_get_by_name(space, name, expected):
actual = space.get_by_name(name)
assert actual == expected
@pytest.mark.parametrize(["space", "name"], [(space_gbn_0(), "does_not_exist")])
def test_get_by_name_key_error(space, name):
with pytest.raises(KeyError, match=f"{name} not found in dimensions"):
space.get_by_name(name)
@pytest.mark.parametrize(
["space", "name", "default", "expected"],
[
(space_gbn_0(), "does not exist", GBN_DEFAULT, GBN_DEFAULT),
(space_gbn_1(), ("does", "not", "exist"), GBN_DEFAULT, GBN_DEFAULT),
(space_gbn_0(), "bar", GBN_DEFAULT, Integer(3, 15, name="bar")),
(space_gbn_1(), ("i am", "bar"), GBN_DEFAULT, Integer(3, 15, name=("i am", "bar"))),
],
)
def test_get_by_name_default(space, name, default, expected):
actual = space.get_by_name(name, default=default)
assert actual == expected
@pytest.mark.parametrize(
["space", "name", "expected"],
[
(space_gbn_0(), ("some", "loc", "bar"), Integer(3, 15, name="bar")),
(space_gbn_1(), ("some", "loc", "i am", "bar"), Integer(3, 15, name=("i am", "bar"))),
],
)
def test_get_by_name_use_location(space, name, expected):
for dim in space.dimensions:
if isinstance(dim.name, str):
setattr(dim, "location", ("some", "loc", dim.name))
else:
setattr(dim, "location", ("some", "loc") + dim.name)
actual = space.get_by_name(name, use_location=True)
assert actual == expected
##################################################
# `RejectedOptional` Tests
##################################################
def test_rejected_optional_repr():
assert "{!r}".format(RejectedOptional()) == "RejectedOptional()"
| ##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter import Real, Categorical, Integer
from hyperparameter_hunter.feature_engineering import EngineerStep
from hyperparameter_hunter.space.dimensions import RejectedOptional
from hyperparameter_hunter.space.space_core import Space
##################################################
# Import Miscellaneous Assets
##################################################
import pytest
from sys import maxsize
##################################################
# `Space.rvs` with `Categorical` Strings
##################################################
def test_space_rvs():
"""Test that calling `Space.rvs` returns expected values. This is specifically
aimed at ensuring `Categorical` instances containing strings produce the entire
string, rather than the first character, for example"""
space = Space([Integer(50, 100), Categorical(["glorot_normal", "orthogonal"])])
sample_0 = space.rvs(random_state=32)
sample_1 = space.rvs(n_samples=1, random_state=32)
sample_2 = space.rvs(n_samples=2, random_state=32)
sample_3 = space.rvs(n_samples=3, random_state=32)
assert sample_0 == [[73, "glorot_normal"]]
assert sample_1 == [[73, "glorot_normal"]]
assert sample_2 == [[73, "glorot_normal"], [93, "orthogonal"]]
assert sample_3 == [[73, "glorot_normal"], [93, "glorot_normal"], [55, "orthogonal"]]
##################################################
# Dimension Name Error
##################################################
def test_dimension_name_value_error():
with pytest.raises(ValueError, match="Dimension's name must be one of: string, tuple, or .*"):
Real(0.3, 0.9, name=14)
##################################################
# Dimension Contains Tests
##################################################
@pytest.mark.parametrize(
["value", "is_in"], [(1, True), (5, True), (10, True), (0, False), (11, False), ("x", False)]
)
def test_integer_contains(value, is_in):
assert (value in Integer(1, 10)) is is_in
##################################################
# Space Size Tests
##################################################
@pytest.mark.parametrize(
["space", "size"],
[
(Space([Categorical(["a", "b"]), Real(0.1, 0.7)]), maxsize),
(Space([Categorical(["a", "b"]), Integer(1, 5)]), 10),
],
)
def test_space_len(space, size):
assert len(space) == size
##################################################
# Dimension `get_params` Tests
##################################################
#################### `Real.get_params` ####################
@pytest.mark.parametrize(
["given_params", "expected_params"],
[
(
dict(low=0.1, high=0.9),
dict(low=0.1, high=0.9, prior="uniform", transform="identity", name=None),
),
(
dict(low=0.1, high=0.9, transform="normalize", name="Reginald"),
dict(low=0.1, high=0.9, prior="uniform", transform="normalize", name="Reginald"),
),
],
)
def test_real_get_params(given_params, expected_params):
assert Real(**given_params).get_params() == expected_params
#################### `Integer.get_params` ####################
@pytest.mark.parametrize(
["given_params", "expected_params"],
[
(dict(low=17, high=32), dict(low=17, high=32, transform="identity", name=None)),
(
dict(low=32, high=117, transform="normalize", name="Isabella"),
dict(low=32, high=117, transform="normalize", name="Isabella"),
),
],
)
def test_integer_get_params(given_params, expected_params):
assert Integer(**given_params).get_params() == expected_params
#################### `Categorical.get_params` ####################
def dummy_engineer_a(train_inputs, train_targets):
return train_inputs, train_targets
def dummy_engineer_b(train_inputs, non_train_inputs):
return train_inputs, non_train_inputs
def dummy_engineer_c(train_targets, non_train_targets):
return train_targets, non_train_targets
@pytest.mark.parametrize(
["given_params", "expected_params"],
[
(
dict(categories=["a", "b", "c"]),
dict(
categories=("a", "b", "c"),
prior=None,
transform="onehot",
optional=False,
name=None,
),
),
(
dict(categories=["a"], name="Cornelius"),
dict(
categories=("a",), prior=None, transform="onehot", optional=False, name="Cornelius"
),
),
(
dict(categories=[5, 10, 15], prior=[0.6, 0.2, 0.2], transform="identity"),
dict(
categories=(5, 10, 15),
prior=[0.6, 0.2, 0.2],
transform="identity",
optional=False,
name=None,
),
),
(
dict(categories=[dummy_engineer_a, dummy_engineer_b]),
dict(
categories=(dummy_engineer_a, dummy_engineer_b),
prior=None,
transform="onehot",
optional=False,
name=None,
),
),
(
dict(
categories=[
EngineerStep(dummy_engineer_a),
EngineerStep(dummy_engineer_b),
EngineerStep(dummy_engineer_c),
]
),
dict(
categories=(
EngineerStep(dummy_engineer_a),
EngineerStep(dummy_engineer_b),
EngineerStep(dummy_engineer_c),
),
prior=None,
transform="onehot",
optional=False,
name=None,
),
),
],
)
def test_categorical_get_params(given_params, expected_params):
assert Categorical(**given_params).get_params() == expected_params
@pytest.mark.parametrize(
["given_params", "unexpected_params"],
[
(
dict(categories=[EngineerStep(dummy_engineer_a), EngineerStep(dummy_engineer_b)]),
dict(
categories=(EngineerStep(dummy_engineer_a), EngineerStep(dummy_engineer_c)),
prior=None,
transform="onehot",
name=None,
),
),
(
dict(categories=[EngineerStep(dummy_engineer_a, name="some_other_name")]),
dict(
categories=(EngineerStep(dummy_engineer_a),),
prior=None,
transform="onehot",
name=None,
),
),
(
dict(categories=[EngineerStep(dummy_engineer_a, stage="intra_cv")]),
dict(
categories=(EngineerStep(dummy_engineer_a),),
prior=None,
transform="onehot",
name=None,
),
),
(
dict(categories=[EngineerStep(dummy_engineer_a), EngineerStep(dummy_engineer_b)]),
dict(
categories=(EngineerStep(dummy_engineer_b), EngineerStep(dummy_engineer_a)),
prior=None,
transform="onehot",
name=None,
),
),
],
ids=["different_f", "different_name", "different_stage", "different_order"],
)
def test_categorical_not_get_params(given_params, unexpected_params):
"""Silly sanity tests ensuring `Categorical` doesn't think two similar things are the same"""
assert Categorical(**given_params).get_params() != unexpected_params
##################################################
# `Space.get_by_name` Tests
##################################################
GBN_DEFAULT = object()
def space_gbn_0():
return Space([Real(0.1, 0.9, name="foo"), Integer(3, 15, name="bar")])
def space_gbn_1():
return Space([Real(0.1, 0.9, name=("i am", "foo")), Integer(3, 15, name=("i am", "bar"))])
@pytest.mark.parametrize(
["space", "name", "expected"],
[
(space_gbn_0(), "bar", Integer(3, 15, name="bar")),
(space_gbn_1(), ("i am", "bar"), Integer(3, 15, name=("i am", "bar"))),
],
)
def test_get_by_name(space, name, expected):
actual = space.get_by_name(name)
assert actual == expected
@pytest.mark.parametrize(["space", "name"], [(space_gbn_0(), "does_not_exist")])
def test_get_by_name_key_error(space, name):
with pytest.raises(KeyError, match=f"{name} not found in dimensions"):
space.get_by_name(name)
@pytest.mark.parametrize(
["space", "name", "default", "expected"],
[
(space_gbn_0(), "does not exist", GBN_DEFAULT, GBN_DEFAULT),
(space_gbn_1(), ("does", "not", "exist"), GBN_DEFAULT, GBN_DEFAULT),
(space_gbn_0(), "bar", GBN_DEFAULT, Integer(3, 15, name="bar")),
(space_gbn_1(), ("i am", "bar"), GBN_DEFAULT, Integer(3, 15, name=("i am", "bar"))),
],
)
def test_get_by_name_default(space, name, default, expected):
actual = space.get_by_name(name, default=default)
assert actual == expected
@pytest.mark.parametrize(
["space", "name", "expected"],
[
(space_gbn_0(), ("some", "loc", "bar"), Integer(3, 15, name="bar")),
(space_gbn_1(), ("some", "loc", "i am", "bar"), Integer(3, 15, name=("i am", "bar"))),
],
)
def test_get_by_name_use_location(space, name, expected):
for dim in space.dimensions:
if isinstance(dim.name, str):
setattr(dim, "location", ("some", "loc", dim.name))
else:
setattr(dim, "location", ("some", "loc") + dim.name)
actual = space.get_by_name(name, use_location=True)
assert actual == expected
##################################################
# `RejectedOptional` Tests
##################################################
def test_rejected_optional_repr():
assert "{!r}".format(RejectedOptional()) == "RejectedOptional()"
| de | 0.679538 | ################################################## # Import Own Assets ################################################## ################################################## # Import Miscellaneous Assets ################################################## ################################################## # `Space.rvs` with `Categorical` Strings ################################################## Test that calling `Space.rvs` returns expected values. This is specifically aimed at ensuring `Categorical` instances containing strings produce the entire string, rather than the first character, for example ################################################## # Dimension Name Error ################################################## ################################################## # Dimension Contains Tests ################################################## ################################################## # Space Size Tests ################################################## ################################################## # Dimension `get_params` Tests ################################################## #################### `Real.get_params` #################### #################### `Integer.get_params` #################### #################### `Categorical.get_params` #################### Silly sanity tests ensuring `Categorical` doesn't think two similar things are the same ################################################## # `Space.get_by_name` Tests ################################################## ################################################## # `RejectedOptional` Tests ################################################## | 2.729318 | 3 |
imgrecall.py | user-74/imgrecall | 0 | 6616533 | <filename>imgrecall.py
import os
import sys
import time
from shutil import copy2
from threading import Timer
from datetime import datetime
def img_recall(i):
year_month = get_time()
image_folder = i + year_month
print("Starting up... Looking for new files in:", image_folder)
code = 0
chopping_list = []
before = dict([(f, None) for f in os.listdir(image_folder)])
while 1:
time.sleep(1)
after = dict([(f, None) for f in os.listdir(image_folder)])
added = [f for f in after if not (f in before)]
removed = [f for f in before if not (f in after)]
if added:
chopping_list.extend(added)
if code == 0:
temp_path = os.path.join(image_folder, added[0])
code = get_code(temp_path)
print("Added: ", ", ".join(added))
for file in added:
orig = os.path.join(image_folder, file)
copy2(orig, output_path)
t = Timer(310.0, cleaner, [file, chopping_list])
t.start()
if removed:
for file in removed:
if file in chopping_list:
temp_path = os.path.join(output_path, file)
image_decode(temp_path, file, code)
print("Decoded:", file)
chopping_list.remove(file)
delete_file(temp_path)
print("Removed: ", ", ".join(removed))
before = after
def cleaner(file, chopping_list):
if file in chopping_list:
print("Timer up, deleting file:", file)
temp_path = os.path.join(output_path, file)
delete_file(temp_path)
chopping_list.remove(file)
def delete_file(file):
if os.path.isfile(file):
os.remove(file)
print("Deleted:", file)
else: # Show an error
print("Error: %s file not found" % file)
def get_code(f):
file = open(f, "rb")
for a in file:
for byte in a:
code = byte ^ 0xFF
return code
def image_decode(f, fn, code):
file = open(f, "rb")
out = output_path + "\\" + fn + ".png"
png = open(out, "wb")
for a in file:
for byte in a:
new_byte = byte ^ code
png.write(bytes([new_byte]))
file.close()
png.close()
def get_time():
today = datetime.today()
year_month = datetime(today.year, today.month, 1)
return str(year_month)[:7]
if __name__ == "__main__":
global output_path
output_path = input("output path:")
if not os.path.isdir(output_path):
sys.exit("output path is an invalid directory")
img_folder = input("wechat image folder path:")
if not os.path.isdir(img_folder):
sys.exit("the image folder is invalid")
img_recall(img_folder + "\\")
| <filename>imgrecall.py
import os
import sys
import time
from shutil import copy2
from threading import Timer
from datetime import datetime
def img_recall(i):
year_month = get_time()
image_folder = i + year_month
print("Starting up... Looking for new files in:", image_folder)
code = 0
chopping_list = []
before = dict([(f, None) for f in os.listdir(image_folder)])
while 1:
time.sleep(1)
after = dict([(f, None) for f in os.listdir(image_folder)])
added = [f for f in after if not (f in before)]
removed = [f for f in before if not (f in after)]
if added:
chopping_list.extend(added)
if code == 0:
temp_path = os.path.join(image_folder, added[0])
code = get_code(temp_path)
print("Added: ", ", ".join(added))
for file in added:
orig = os.path.join(image_folder, file)
copy2(orig, output_path)
t = Timer(310.0, cleaner, [file, chopping_list])
t.start()
if removed:
for file in removed:
if file in chopping_list:
temp_path = os.path.join(output_path, file)
image_decode(temp_path, file, code)
print("Decoded:", file)
chopping_list.remove(file)
delete_file(temp_path)
print("Removed: ", ", ".join(removed))
before = after
def cleaner(file, chopping_list):
if file in chopping_list:
print("Timer up, deleting file:", file)
temp_path = os.path.join(output_path, file)
delete_file(temp_path)
chopping_list.remove(file)
def delete_file(file):
if os.path.isfile(file):
os.remove(file)
print("Deleted:", file)
else: # Show an error
print("Error: %s file not found" % file)
def get_code(f):
file = open(f, "rb")
for a in file:
for byte in a:
code = byte ^ 0xFF
return code
def image_decode(f, fn, code):
file = open(f, "rb")
out = output_path + "\\" + fn + ".png"
png = open(out, "wb")
for a in file:
for byte in a:
new_byte = byte ^ code
png.write(bytes([new_byte]))
file.close()
png.close()
def get_time():
today = datetime.today()
year_month = datetime(today.year, today.month, 1)
return str(year_month)[:7]
if __name__ == "__main__":
global output_path
output_path = input("output path:")
if not os.path.isdir(output_path):
sys.exit("output path is an invalid directory")
img_folder = input("wechat image folder path:")
if not os.path.isdir(img_folder):
sys.exit("the image folder is invalid")
img_recall(img_folder + "\\")
| en | 0.298294 | # Show an error | 2.989413 | 3 |
emissary/__init__.py | VPH-Share/VPHOP_NMSLoads | 0 | 6616534 | from .core import Response
from .core import create_response, run
# from .envoy import expand_args, run, connect
from .core import __version__
| from .core import Response
from .core import create_response, run
# from .envoy import expand_args, run, connect
from .core import __version__
| en | 0.614134 | # from .envoy import expand_args, run, connect | 1.097014 | 1 |
taxi/__init__.py | abagaria/visgrid | 0 | 6616535 | <reponame>abagaria/visgrid<filename>taxi/__init__.py
from .taxi import Taxi5x5, VisTaxi5x5, Taxi10x10
| from .taxi import Taxi5x5, VisTaxi5x5, Taxi10x10 | none | 1 | 1.065343 | 1 | |
recursion.py | GeorgeHY/learn_python | 1 | 6616536 | def move(n,a,b,c):
if n == 1:
print(a,'--',c)
else
move(n-1,a,c,b)
move(1,a,b,c)
move(n-1,b,a,c)
| def move(n,a,b,c):
if n == 1:
print(a,'--',c)
else
move(n-1,a,c,b)
move(1,a,b,c)
move(n-1,b,a,c)
| none | 1 | 3.521336 | 4 | |
web/server/api/api.py | Tyill/zmey | 5 | 6616537 | <reponame>Tyill/zmey
from flask import(
Blueprint
)
bp = Blueprint('api', __name__, url_prefix='/api/v1')
from . import api_user
from . import api_event
from . import api_pipeline
from . import api_pipeline_task
from . import api_scheduler
from . import api_worker
from . import api_task
from . import api_task_template | from flask import(
Blueprint
)
bp = Blueprint('api', __name__, url_prefix='/api/v1')
from . import api_user
from . import api_event
from . import api_pipeline
from . import api_pipeline_task
from . import api_scheduler
from . import api_worker
from . import api_task
from . import api_task_template | none | 1 | 1.443375 | 1 | |
scratch/pyclingo/main.py | potassco/gringo | 423 | 6616538 | import clingo
import sys
def inject_one(val):
return clingo.create_num(val.num() + val.num())
def inject_two(val):
return [val, clingo.create_num(val.num() + val.num())]
def on_model(m):
print m
c = clingo.Control(sys.argv)
c.add("base", [], "#external c. a :- c. a :- not b. b :- not a. r(@inject_one(2)). r(@inject_two(3)). #program p(k). q(k).")
c.ground([("base", []), ("p", [clingo.create_num(17)])], sys.modules[__name__])
print c.solve(on_model=on_model)
vals = [clingo.create_id("p"), clingo.create_str("q"), clingo.create_num(42), clingo.Sup, clingo.Inf]
vals.append(clingo.create_fun("f", vals, True))
vals.append(clingo.create_fun("", vals))
print vals
print vals[2].num()
print vals[0].name()
print vals[1].str()
print vals[3].type() == clingo.Value.SUP
print vals[5].args()
print vals[0] == vals[0], vals[0] != vals[0], vals[0] < vals[0], vals[0] <= vals[0], vals[0] > vals[0], vals[0] >= vals[0]
print vals[5] == vals[6], vals[5] < vals[6], vals[5] > vals[6]
c.assign_external(clingo.create_id("c"), True)
with c.solve_iter() as it:
for m in it:
print m
print it.get(), it.get() == clingo.SolveResult.SAT
program = c.parse("""
#program base(k,t).
p(|1;2|).
a :- b.
a(1,2) :- b(-3,4;5+6).
#true :- #false, 1 < 2. a.
a :- b:c,d.
a;b;c:d. p((1,2)).
1 <= #count { a : b : c, d; e : #true } <= 2.
1 { a; b : c } != 2.
:- 1 <= #count { a : b, c; d } <= 2.
:- 1 <= { a : b, c; d } <= 2.
#const xxx = 17.
#minimize {1@2 : a; 3@4,5 : b, c }.
#show a/2.
#show $a/2.
#show a : b.
#show $a : b.
#edge (a, b) : c.
#edge (a, b; c, d) : e.
#external a : b.
#heuristic a : b, c. [x@y,z]
#heuristic a(1;2) : b, c. [x@y,z]
#project a : b, c.
#project a/3.
:- 1 $* $a $+ $b $+ 2 $< $c $- 1 $< 3*3.
#disjoint { a, b : $c $* 1 $+ 2 : c; 1 : $d : e }.
p(X) :- q(X).
#theory x {
a {
+ : 1, unary;
* : 2, binary, left;
^ : 3, binary, right
};
&b/0 : a, any;
&c/0 : a, {+,-}, a, directive
}.
&b { f(a) : q } <= a * -b * c.
""")
for node in program:
print node
c.add_ast(program)
| import clingo
import sys
def inject_one(val):
return clingo.create_num(val.num() + val.num())
def inject_two(val):
return [val, clingo.create_num(val.num() + val.num())]
def on_model(m):
print m
c = clingo.Control(sys.argv)
c.add("base", [], "#external c. a :- c. a :- not b. b :- not a. r(@inject_one(2)). r(@inject_two(3)). #program p(k). q(k).")
c.ground([("base", []), ("p", [clingo.create_num(17)])], sys.modules[__name__])
print c.solve(on_model=on_model)
vals = [clingo.create_id("p"), clingo.create_str("q"), clingo.create_num(42), clingo.Sup, clingo.Inf]
vals.append(clingo.create_fun("f", vals, True))
vals.append(clingo.create_fun("", vals))
print vals
print vals[2].num()
print vals[0].name()
print vals[1].str()
print vals[3].type() == clingo.Value.SUP
print vals[5].args()
print vals[0] == vals[0], vals[0] != vals[0], vals[0] < vals[0], vals[0] <= vals[0], vals[0] > vals[0], vals[0] >= vals[0]
print vals[5] == vals[6], vals[5] < vals[6], vals[5] > vals[6]
c.assign_external(clingo.create_id("c"), True)
with c.solve_iter() as it:
for m in it:
print m
print it.get(), it.get() == clingo.SolveResult.SAT
program = c.parse("""
#program base(k,t).
p(|1;2|).
a :- b.
a(1,2) :- b(-3,4;5+6).
#true :- #false, 1 < 2. a.
a :- b:c,d.
a;b;c:d. p((1,2)).
1 <= #count { a : b : c, d; e : #true } <= 2.
1 { a; b : c } != 2.
:- 1 <= #count { a : b, c; d } <= 2.
:- 1 <= { a : b, c; d } <= 2.
#const xxx = 17.
#minimize {1@2 : a; 3@4,5 : b, c }.
#show a/2.
#show $a/2.
#show a : b.
#show $a : b.
#edge (a, b) : c.
#edge (a, b; c, d) : e.
#external a : b.
#heuristic a : b, c. [x@y,z]
#heuristic a(1;2) : b, c. [x@y,z]
#project a : b, c.
#project a/3.
:- 1 $* $a $+ $b $+ 2 $< $c $- 1 $< 3*3.
#disjoint { a, b : $c $* 1 $+ 2 : c; 1 : $d : e }.
p(X) :- q(X).
#theory x {
a {
+ : 1, unary;
* : 2, binary, left;
^ : 3, binary, right
};
&b/0 : a, any;
&c/0 : a, {+,-}, a, directive
}.
&b { f(a) : q } <= a * -b * c.
""")
for node in program:
print node
c.add_ast(program)
| en | 0.252822 | #program p(k). q(k).") #program base(k,t). p(|1;2|). a :- b. a(1,2) :- b(-3,4;5+6). #true :- #false, 1 < 2. a. a :- b:c,d. a;b;c:d. p((1,2)). 1 <= #count { a : b : c, d; e : #true } <= 2. 1 { a; b : c } != 2. :- 1 <= #count { a : b, c; d } <= 2. :- 1 <= { a : b, c; d } <= 2. #const xxx = 17. #minimize {1@2 : a; 3@4,5 : b, c }. #show a/2. #show $a/2. #show a : b. #show $a : b. #edge (a, b) : c. #edge (a, b; c, d) : e. #external a : b. #heuristic a : b, c. [x@y,z] #heuristic a(1;2) : b, c. [x@y,z] #project a : b, c. #project a/3. :- 1 $* $a $+ $b $+ 2 $< $c $- 1 $< 3*3. #disjoint { a, b : $c $* 1 $+ 2 : c; 1 : $d : e }. p(X) :- q(X). #theory x { a { + : 1, unary; * : 2, binary, left; ^ : 3, binary, right }; &b/0 : a, any; &c/0 : a, {+,-}, a, directive }. &b { f(a) : q } <= a * -b * c. | 3.086674 | 3 |
demo_sceneanalysis/stats.py | angus-ai/angus-demos | 0 | 6616539 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
class Stats(object):
def __init__(self, f=None):
self.stats = self.load(f)
self.file = f
def save(self, f=None):
if f is None and self.file is not None:
f = self.file
with open(f, "w") as myfile:
json.dump(self.stats, myfile)
def load(self, f):
if os.path.isfile(f):
with open(f, "r") as myfile:
return json.load(myfile)
return dict()
def add_engaged(self, engaged):
if engaged is True:
value = self.stats.setdefault("engaged", 0)
value += 1
self.stats["engaged"] = value
value = self.stats.setdefault("not_engaged", 0)
value -= 1
self.stats["not_engaged"] = value
else:
value = self.stats.setdefault("not_engaged", 0)
value += 1
self.stats["not_engaged"] = value
def add_age(self, age):
label = "20-30"
if age < 10:
label = "0-10"
elif age < 20:
label = "10-20"
elif age < 30:
label = "20-30"
elif age < 40:
label = "30-40"
elif age < 50:
label = "40-50"
elif age > 50:
label = "50+"
value = self.stats.setdefault(label, 0)
value += 1
self.stats[label] = value
def add_gender(self, gender):
value = self.stats.setdefault(gender, 0)
value += 1
self.stats[gender] = value
def genders(self):
result = {
"female": self.stats.get("female", 0),
"male": self.stats.get("male", 0),
}
return result
def ages(self):
categories = ["0-10", "10-20", "20-30", "30-40", "40-50", "50+"]
result = dict([(cat, self.stats.get(cat, 0)) for cat in categories])
return result
def engaged(self):
result = {
"engaged": self.stats.get("engaged", 0),
"not_engaged": self.stats.get("not_engaged", 0),
}
return result
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import json
class Stats(object):
def __init__(self, f=None):
self.stats = self.load(f)
self.file = f
def save(self, f=None):
if f is None and self.file is not None:
f = self.file
with open(f, "w") as myfile:
json.dump(self.stats, myfile)
def load(self, f):
if os.path.isfile(f):
with open(f, "r") as myfile:
return json.load(myfile)
return dict()
def add_engaged(self, engaged):
if engaged is True:
value = self.stats.setdefault("engaged", 0)
value += 1
self.stats["engaged"] = value
value = self.stats.setdefault("not_engaged", 0)
value -= 1
self.stats["not_engaged"] = value
else:
value = self.stats.setdefault("not_engaged", 0)
value += 1
self.stats["not_engaged"] = value
def add_age(self, age):
label = "20-30"
if age < 10:
label = "0-10"
elif age < 20:
label = "10-20"
elif age < 30:
label = "20-30"
elif age < 40:
label = "30-40"
elif age < 50:
label = "40-50"
elif age > 50:
label = "50+"
value = self.stats.setdefault(label, 0)
value += 1
self.stats[label] = value
def add_gender(self, gender):
value = self.stats.setdefault(gender, 0)
value += 1
self.stats[gender] = value
def genders(self):
result = {
"female": self.stats.get("female", 0),
"male": self.stats.get("male", 0),
}
return result
def ages(self):
categories = ["0-10", "10-20", "20-30", "30-40", "40-50", "50+"]
result = dict([(cat, self.stats.get(cat, 0)) for cat in categories])
return result
def engaged(self):
result = {
"engaged": self.stats.get("engaged", 0),
"not_engaged": self.stats.get("not_engaged", 0),
}
return result
| en | 0.851962 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. | 2.880731 | 3 |
RQ3/Autonomous_robot/AmbieGen_mo/Optimize.py | dgumenyuk/Environment_generation | 0 | 6616540 | import config as cf
import numpy as np
from pymoo.optimize import minimize
from MyProblem import MyProblem
from MyTcMutation import MyTcMutation
from MyTcCrossOver import MyTcCrossover
from MyDuplicates import MyDuplicateElimination
from pymoo.util.termination.f_tol import MultiObjectiveSpaceToleranceTermination
from MyTcSampling import MyTcSampling
import matplotlib.pyplot as plt
import os
import shutil
from shutil import make_archive
from zipfile import ZipFile
import json
import time
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.visualization.scatter import Scatter
from pymoo.factory import get_performance_indicator
from RDP import rdp
def build_convergence(res):
#n_evals = np.array([e.evaluator.n_eval/cf.ga["n_gen"] for e in res.history])
n_evals = np.arange(0, len(res.history), 1)
opt = np.array([e.opt[0].F for e in res.history])
fig, ax1 = plt.subplots(figsize=(12, 4))
plt.title("Convergence")
plt.plot(n_evals, opt, "o--")
plt.xlabel("Number of generations")
plt.ylabel("Fitness function value")
#plt.show()
fig.savefig(cf.files["ga_conv"] + "conv.png")
plt.close(fig)
def save_results(res):
build_convergence(res)
if not(os.path.exists(cf.files["tc_img"])):
os.mkdir(cf.files["tc_img"])
if not(os.path.exists(cf.files["tc_file"])):
os.mkdir(cf.files["tc_file"])
if not(os.path.exists(cf.files["ga_archive"])):
os.mkdir(cf.files["ga_archive"])
#if os.listdir(cf.files["tc_img"]):
#now = datetime.now()
#dt_string = str(now.strftime("%d/%m/%Y %H:%M:%S"))
dt_string = str(int(time.time()))
# create dirtectory
#os.mkdir(cf.files["ga_archive"] + dt_string)
# prepare files
shutil.make_archive(dt_string + "_tc_img", 'zip', cf.files["tc_img"] )
shutil.make_archive(dt_string + "_tc_file", 'zip', cf.files["tc_file"] )
shutil.copyfile(".\\config.py", ".\\"+dt_string+"_config.py")
shutil.copyfile(".\\conv.png", ".\\"+dt_string+"_conv.png")
#shutil.copyfile(".\\vehicle.py", ".\\"+dt_string+"_vehicle.py")
zipObj = ZipFile(dt_string + '_results.zip', 'w')
# Add multiple files to the zip
zipObj.write(dt_string + "_tc_img.zip")
zipObj.write(dt_string + "_tc_file.zip")
zipObj.write(dt_string + "_conv.png")
zipObj.write(dt_string + "_config.py")
#zipObj.write(dt_string + "_vehicle.py")
zipObj.close()
# move the archive to the destination folder
shutil.move(dt_string + '_results.zip', cf.files["ga_archive"] + dt_string + '_results.zip')
# remove files
os.remove(".\\" + dt_string + "_config.py")
#os.remove(".\\" + dt_string + "_vehicle.py")
os.remove(".\\" + dt_string + "_conv.png")
os.remove(".\\" + "conv.png")
os.remove(".\\" + dt_string + "_tc_img.zip")
os.remove(".\\" + dt_string + "_tc_file.zip")
for folder in os.listdir(cf.files["tc_img"]):
shutil.rmtree(cf.files["tc_img"] + folder)
for file in os.listdir(cf.files["tc_file"]):
os.remove(cf.files["tc_file"] + file)
# create new folders
#for gen in range(cf.ga["n_gen"]):
for gen in [0, len(res.history) - 1]:
if not(os.path.exists(cf.files["tc_img"] + "generation_" + str(gen))):
os.mkdir(cf.files["tc_img"] + "generation_" + str(gen))
# build images and write tc to file
#for gen in range(cf.ga["n_gen"]):
for gen in [0, len(res.history) - 1]:
test_cases = {}
states_tc = {}
all_routes = {}
for i, x in enumerate(res.history[gen].pop.get("X")):
#road_points = x[0].road_points
map_points = x[0].map_points
robot_path_x = x[0].robot_path_x
robot_path_y = x[0].robot_path_y
fitness = x[0].fitness
states = x[0].states
points = rdp(list(zip(robot_path_x, robot_path_y)), epsilon=1)
image_car_path(map_points, robot_path_x, robot_path_y, points, fitness, gen, i)
#build_scenario(map_points, robot_path_x, robot_path_y, gen, i)
test_cases["tc" + str(i)] = map_points
states_tc["tc" + str(i)] = states
routes = []
p = 0
while p < len(robot_path_x):
p_set = {}
p_set["x"] = robot_path_x[p] - 24
p_set["y"] = robot_path_y[p] - 24
routes.append(p_set)
p += 1
all_routes["tc" + str(i)] = routes
save_path3 = os.path.join(cf.files["tc_file"], "solutions_" + str(gen) + ".json")
with open(save_path3, "w") as outfile:
json.dump(all_routes, outfile, indent=4)
def build_scenario2(road_points, car_path_x, car_path_y, generation, i):
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_frame_on(False)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
road_x = []
road_y = []
for p in road_points:
road_x.append(p[0])
road_y.append(p[1])
ax.scatter(road_x, road_y, s=150, marker='s', color='k')
top = cf.model["map_size"] + 1
bottom = 0 -1
save_path = os.path.join(cf.files["tc_img"], "generation_" + str(generation))
fig.savefig(save_path +"\\" + "map_" + str(i) + ".png", bbox_inches='tight',pad_inches = 0)
plt.close(fig)
def build_scenario(road_points, path, run):
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_frame_on(False)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
road_x = []
road_y = []
for p in road_points:
road_x.append(p[0])
road_y.append(p[1])
ax.scatter(road_x, road_y, s=150, marker='s', color='k')
top = cf.model["map_size"] + 1
bottom = - 1
fig.savefig(os.path.join(path, "map" + str(i) + ".png"), bbox_inches='tight', pad_inches=0)
plt.close(fig)
def image_car_path(road_points, car_path_x, car_path_y, points, fitness, generation, i):
fig, ax = plt.subplots(figsize=(12, 12))
#, nodes[closest_index][0], nodes[closest_index][1], 'go'
#road_points2 = zip(*road_points)
road_x = []
road_y = []
for p in road_points:
road_x.append(p[0])
road_y.append(p[1])
_x = []
_y = []
for p in points:
_x.append(p[0])
_y.append(p[1])
ax.plot(car_path_x, car_path_y, 'or', label="Robot path")
ax.plot(_x, _y, "ob", label="Approximated path")
ax.plot(road_x, road_y, '.k', label="Map")
top = cf.model["map_size"] + 1
bottom = -1
ax.set_title( "Test case fitenss " + str(fitness) , fontsize=17)
ax.set_ylim(bottom, top)
ax.set_xlim(bottom, top)
ax.xaxis.set_ticks(np.arange(bottom, top, 5))
ax.yaxis.set_ticks(np.arange(bottom, top, 5))
ax.legend()
save_path = os.path.join(cf.files["tc_img"], "generation_" + str(generation))
fig.savefig(save_path +"\\" + "tc_" + str(i) + ".png")
plt.close(fig)
algorithm = NSGA2(
n_offsprings=10,
pop_size=cf.ga["population"],
sampling=MyTcSampling(),
crossover=MyTcCrossover(cf.ga["cross_rate"]),
mutation=MyTcMutation(cf.ga["mut_rate"]),
eliminate_duplicates=MyDuplicateElimination(),
)
termination = MultiObjectiveSpaceToleranceTermination(tol=0.0025,
n_last=15,
nth_gen=5,
n_max_gen=cf.ga["n_gen"],
n_max_evals=None)
def calc_novelty(old, new):
novelty = 0
#print("OLD", old)
#print("NEW", new)
if len(new) <= len(old):
shorter = new
else:
shorter = old
for tc in shorter:
if old[tc]["state"] == new[tc]["state"]:
value_list = [old[tc]["value"], new[tc]["value"]]
ratio = max(value_list)/min(value_list)
if ratio >= 2:
novelty += 0.5
else:
novelty += 1
#print("NOVELTY", novelty)
return -novelty
if __name__ == "__main__":
path_folder = ".\\results"
if not(os.path.exists(path_folder)):
os.mkdir(path_folder)
it = 0
for it in range(0, 30):
res_dict = {}
time_list = []
m = 0
base_path = ".\\results" + str(it) + "\\"
base_path = os.path.join(base_path)
if not(os.path.exists(base_path)):
os.mkdir(base_path)
start_time = time.time()
alg_time = 0
while alg_time < 7200:
#for m in range(30):
fit_list = []
print("Evaluation new", m)
t = int(time.time() * 1000)
seed = ((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) + ((t & 0x0000ff00) << 8) + ((t & 0x000000ff) << 24)
print("Seed ", seed)
start_time = time.time()
res = minimize(
MyProblem(), algorithm, ("n_gen", cf.ga["n_gen"]), seed=seed, verbose=True, save_history=True, eliminate_duplicates=True
)
end_time = time.time()
alg_time += end_time - start_time
print("time", res.exec_time)
print("time, sec ", res.exec_time)
time_list.append(res.exec_time)
res_dict["run" + str(m)] = {}
res_dict["run" + str(m)]["time"] = res.exec_time
hv_values = []
hv = get_performance_indicator("hv", ref_point=np.array([0, 0]))
path = os.path.join(base_path, "run" + str(m))
if not(os.path.exists(path)):
os.mkdir(path)
#path = ".\\results\\run" + str(m)
gen = len(res.history) - 1
#for gen in range(0, len(res.history)):
i = 0
#print(gen)
minim = 0
hv_list = []
all_routes = {}
all_maps = {}
while i < len(res.history[gen].opt):
result = res.history[gen].pop.get("X")[i]
hv_item = res.history[gen].pop.get("F")[i]
hv_list.append(hv_item)
fit = result[0].fitness
if fit < minim:
minim = fit
map_points = result[0].map_points
robot_path_x = result[0].robot_path_x
robot_path_y = result[0].robot_path_y
routes = []
p = 0
while p < len(robot_path_x):
p_set = {}
p_set["x"] = robot_path_x[p] - 25
p_set["y"] = robot_path_y[p] - 25
routes.append(p_set)
p += 1
all_routes["map" + str(i)] = routes
all_maps["map" + str(i)] = map_points
out_path = os.path.join(path, "scenarios.json")
out_path_maps = os.path.join(path, "maps.json")
#save_path3 = os.path.join(cf.files["tc_file"], "solutions_" + str(gen) + ".json")
with open(out_path, "w") as outfile:
json.dump(all_routes, outfile, indent=4)
with open(out_path_maps, "w") as outfile:
json.dump(all_maps, outfile, indent=4)
build_scenario(map_points, path, i)
i += 1
fit_list.append(minim)
#print(min(fit_list))
hv_values.append(hv.calc(np.array(hv_list)))
gen = len(res.history) - 1
reference = res.history[gen].pop.get("X")[0]
novelty_list = []
for i in range(1, len(res.history[gen].opt)):
current = res.history[gen].pop.get("X")[i]
nov = calc_novelty(reference[0].states, current[0].states)
novelty_list.append(nov)
res_dict["run" + str(m)]["fitness"] = fit_list
print(min(fit_list))
res_dict["run" + str(m)]["hv"] = hv_values
if len(novelty_list) > 0:
res_dict["run" + str(m)]["novelty"] = sum(novelty_list)/len(novelty_list)
else:
res_dict["run" + str(m)]["novelty"] = 0
with open("Results.json", "w") as f:
json.dump(res_dict, f, indent=4)
save_results(res)
print("Time remaining ", (7200 - (alg_time))/3600, " hours")
m += 1 | import config as cf
import numpy as np
from pymoo.optimize import minimize
from MyProblem import MyProblem
from MyTcMutation import MyTcMutation
from MyTcCrossOver import MyTcCrossover
from MyDuplicates import MyDuplicateElimination
from pymoo.util.termination.f_tol import MultiObjectiveSpaceToleranceTermination
from MyTcSampling import MyTcSampling
import matplotlib.pyplot as plt
import os
import shutil
from shutil import make_archive
from zipfile import ZipFile
import json
import time
from pymoo.algorithms.nsga2 import NSGA2
from pymoo.visualization.scatter import Scatter
from pymoo.factory import get_performance_indicator
from RDP import rdp
def build_convergence(res):
#n_evals = np.array([e.evaluator.n_eval/cf.ga["n_gen"] for e in res.history])
n_evals = np.arange(0, len(res.history), 1)
opt = np.array([e.opt[0].F for e in res.history])
fig, ax1 = plt.subplots(figsize=(12, 4))
plt.title("Convergence")
plt.plot(n_evals, opt, "o--")
plt.xlabel("Number of generations")
plt.ylabel("Fitness function value")
#plt.show()
fig.savefig(cf.files["ga_conv"] + "conv.png")
plt.close(fig)
def save_results(res):
build_convergence(res)
if not(os.path.exists(cf.files["tc_img"])):
os.mkdir(cf.files["tc_img"])
if not(os.path.exists(cf.files["tc_file"])):
os.mkdir(cf.files["tc_file"])
if not(os.path.exists(cf.files["ga_archive"])):
os.mkdir(cf.files["ga_archive"])
#if os.listdir(cf.files["tc_img"]):
#now = datetime.now()
#dt_string = str(now.strftime("%d/%m/%Y %H:%M:%S"))
dt_string = str(int(time.time()))
# create dirtectory
#os.mkdir(cf.files["ga_archive"] + dt_string)
# prepare files
shutil.make_archive(dt_string + "_tc_img", 'zip', cf.files["tc_img"] )
shutil.make_archive(dt_string + "_tc_file", 'zip', cf.files["tc_file"] )
shutil.copyfile(".\\config.py", ".\\"+dt_string+"_config.py")
shutil.copyfile(".\\conv.png", ".\\"+dt_string+"_conv.png")
#shutil.copyfile(".\\vehicle.py", ".\\"+dt_string+"_vehicle.py")
zipObj = ZipFile(dt_string + '_results.zip', 'w')
# Add multiple files to the zip
zipObj.write(dt_string + "_tc_img.zip")
zipObj.write(dt_string + "_tc_file.zip")
zipObj.write(dt_string + "_conv.png")
zipObj.write(dt_string + "_config.py")
#zipObj.write(dt_string + "_vehicle.py")
zipObj.close()
# move the archive to the destination folder
shutil.move(dt_string + '_results.zip', cf.files["ga_archive"] + dt_string + '_results.zip')
# remove files
os.remove(".\\" + dt_string + "_config.py")
#os.remove(".\\" + dt_string + "_vehicle.py")
os.remove(".\\" + dt_string + "_conv.png")
os.remove(".\\" + "conv.png")
os.remove(".\\" + dt_string + "_tc_img.zip")
os.remove(".\\" + dt_string + "_tc_file.zip")
for folder in os.listdir(cf.files["tc_img"]):
shutil.rmtree(cf.files["tc_img"] + folder)
for file in os.listdir(cf.files["tc_file"]):
os.remove(cf.files["tc_file"] + file)
# create new folders
#for gen in range(cf.ga["n_gen"]):
for gen in [0, len(res.history) - 1]:
if not(os.path.exists(cf.files["tc_img"] + "generation_" + str(gen))):
os.mkdir(cf.files["tc_img"] + "generation_" + str(gen))
# build images and write tc to file
#for gen in range(cf.ga["n_gen"]):
for gen in [0, len(res.history) - 1]:
test_cases = {}
states_tc = {}
all_routes = {}
for i, x in enumerate(res.history[gen].pop.get("X")):
#road_points = x[0].road_points
map_points = x[0].map_points
robot_path_x = x[0].robot_path_x
robot_path_y = x[0].robot_path_y
fitness = x[0].fitness
states = x[0].states
points = rdp(list(zip(robot_path_x, robot_path_y)), epsilon=1)
image_car_path(map_points, robot_path_x, robot_path_y, points, fitness, gen, i)
#build_scenario(map_points, robot_path_x, robot_path_y, gen, i)
test_cases["tc" + str(i)] = map_points
states_tc["tc" + str(i)] = states
routes = []
p = 0
while p < len(robot_path_x):
p_set = {}
p_set["x"] = robot_path_x[p] - 24
p_set["y"] = robot_path_y[p] - 24
routes.append(p_set)
p += 1
all_routes["tc" + str(i)] = routes
save_path3 = os.path.join(cf.files["tc_file"], "solutions_" + str(gen) + ".json")
with open(save_path3, "w") as outfile:
json.dump(all_routes, outfile, indent=4)
def build_scenario2(road_points, car_path_x, car_path_y, generation, i):
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_frame_on(False)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
road_x = []
road_y = []
for p in road_points:
road_x.append(p[0])
road_y.append(p[1])
ax.scatter(road_x, road_y, s=150, marker='s', color='k')
top = cf.model["map_size"] + 1
bottom = 0 -1
save_path = os.path.join(cf.files["tc_img"], "generation_" + str(generation))
fig.savefig(save_path +"\\" + "map_" + str(i) + ".png", bbox_inches='tight',pad_inches = 0)
plt.close(fig)
def build_scenario(road_points, path, run):
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_frame_on(False)
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
road_x = []
road_y = []
for p in road_points:
road_x.append(p[0])
road_y.append(p[1])
ax.scatter(road_x, road_y, s=150, marker='s', color='k')
top = cf.model["map_size"] + 1
bottom = - 1
fig.savefig(os.path.join(path, "map" + str(i) + ".png"), bbox_inches='tight', pad_inches=0)
plt.close(fig)
def image_car_path(road_points, car_path_x, car_path_y, points, fitness, generation, i):
fig, ax = plt.subplots(figsize=(12, 12))
#, nodes[closest_index][0], nodes[closest_index][1], 'go'
#road_points2 = zip(*road_points)
road_x = []
road_y = []
for p in road_points:
road_x.append(p[0])
road_y.append(p[1])
_x = []
_y = []
for p in points:
_x.append(p[0])
_y.append(p[1])
ax.plot(car_path_x, car_path_y, 'or', label="Robot path")
ax.plot(_x, _y, "ob", label="Approximated path")
ax.plot(road_x, road_y, '.k', label="Map")
top = cf.model["map_size"] + 1
bottom = -1
ax.set_title( "Test case fitenss " + str(fitness) , fontsize=17)
ax.set_ylim(bottom, top)
ax.set_xlim(bottom, top)
ax.xaxis.set_ticks(np.arange(bottom, top, 5))
ax.yaxis.set_ticks(np.arange(bottom, top, 5))
ax.legend()
save_path = os.path.join(cf.files["tc_img"], "generation_" + str(generation))
fig.savefig(save_path +"\\" + "tc_" + str(i) + ".png")
plt.close(fig)
algorithm = NSGA2(
n_offsprings=10,
pop_size=cf.ga["population"],
sampling=MyTcSampling(),
crossover=MyTcCrossover(cf.ga["cross_rate"]),
mutation=MyTcMutation(cf.ga["mut_rate"]),
eliminate_duplicates=MyDuplicateElimination(),
)
termination = MultiObjectiveSpaceToleranceTermination(tol=0.0025,
n_last=15,
nth_gen=5,
n_max_gen=cf.ga["n_gen"],
n_max_evals=None)
def calc_novelty(old, new):
novelty = 0
#print("OLD", old)
#print("NEW", new)
if len(new) <= len(old):
shorter = new
else:
shorter = old
for tc in shorter:
if old[tc]["state"] == new[tc]["state"]:
value_list = [old[tc]["value"], new[tc]["value"]]
ratio = max(value_list)/min(value_list)
if ratio >= 2:
novelty += 0.5
else:
novelty += 1
#print("NOVELTY", novelty)
return -novelty
if __name__ == "__main__":
path_folder = ".\\results"
if not(os.path.exists(path_folder)):
os.mkdir(path_folder)
it = 0
for it in range(0, 30):
res_dict = {}
time_list = []
m = 0
base_path = ".\\results" + str(it) + "\\"
base_path = os.path.join(base_path)
if not(os.path.exists(base_path)):
os.mkdir(base_path)
start_time = time.time()
alg_time = 0
while alg_time < 7200:
#for m in range(30):
fit_list = []
print("Evaluation new", m)
t = int(time.time() * 1000)
seed = ((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) + ((t & 0x0000ff00) << 8) + ((t & 0x000000ff) << 24)
print("Seed ", seed)
start_time = time.time()
res = minimize(
MyProblem(), algorithm, ("n_gen", cf.ga["n_gen"]), seed=seed, verbose=True, save_history=True, eliminate_duplicates=True
)
end_time = time.time()
alg_time += end_time - start_time
print("time", res.exec_time)
print("time, sec ", res.exec_time)
time_list.append(res.exec_time)
res_dict["run" + str(m)] = {}
res_dict["run" + str(m)]["time"] = res.exec_time
hv_values = []
hv = get_performance_indicator("hv", ref_point=np.array([0, 0]))
path = os.path.join(base_path, "run" + str(m))
if not(os.path.exists(path)):
os.mkdir(path)
#path = ".\\results\\run" + str(m)
gen = len(res.history) - 1
#for gen in range(0, len(res.history)):
i = 0
#print(gen)
minim = 0
hv_list = []
all_routes = {}
all_maps = {}
while i < len(res.history[gen].opt):
result = res.history[gen].pop.get("X")[i]
hv_item = res.history[gen].pop.get("F")[i]
hv_list.append(hv_item)
fit = result[0].fitness
if fit < minim:
minim = fit
map_points = result[0].map_points
robot_path_x = result[0].robot_path_x
robot_path_y = result[0].robot_path_y
routes = []
p = 0
while p < len(robot_path_x):
p_set = {}
p_set["x"] = robot_path_x[p] - 25
p_set["y"] = robot_path_y[p] - 25
routes.append(p_set)
p += 1
all_routes["map" + str(i)] = routes
all_maps["map" + str(i)] = map_points
out_path = os.path.join(path, "scenarios.json")
out_path_maps = os.path.join(path, "maps.json")
#save_path3 = os.path.join(cf.files["tc_file"], "solutions_" + str(gen) + ".json")
with open(out_path, "w") as outfile:
json.dump(all_routes, outfile, indent=4)
with open(out_path_maps, "w") as outfile:
json.dump(all_maps, outfile, indent=4)
build_scenario(map_points, path, i)
i += 1
fit_list.append(minim)
#print(min(fit_list))
hv_values.append(hv.calc(np.array(hv_list)))
gen = len(res.history) - 1
reference = res.history[gen].pop.get("X")[0]
novelty_list = []
for i in range(1, len(res.history[gen].opt)):
current = res.history[gen].pop.get("X")[i]
nov = calc_novelty(reference[0].states, current[0].states)
novelty_list.append(nov)
res_dict["run" + str(m)]["fitness"] = fit_list
print(min(fit_list))
res_dict["run" + str(m)]["hv"] = hv_values
if len(novelty_list) > 0:
res_dict["run" + str(m)]["novelty"] = sum(novelty_list)/len(novelty_list)
else:
res_dict["run" + str(m)]["novelty"] = 0
with open("Results.json", "w") as f:
json.dump(res_dict, f, indent=4)
save_results(res)
print("Time remaining ", (7200 - (alg_time))/3600, " hours")
m += 1 | en | 0.323744 | #n_evals = np.array([e.evaluator.n_eval/cf.ga["n_gen"] for e in res.history]) #plt.show() #if os.listdir(cf.files["tc_img"]): #now = datetime.now() #dt_string = str(now.strftime("%d/%m/%Y %H:%M:%S")) # create dirtectory #os.mkdir(cf.files["ga_archive"] + dt_string) # prepare files #shutil.copyfile(".\\vehicle.py", ".\\"+dt_string+"_vehicle.py") # Add multiple files to the zip #zipObj.write(dt_string + "_vehicle.py") # move the archive to the destination folder # remove files #os.remove(".\\" + dt_string + "_vehicle.py") # create new folders #for gen in range(cf.ga["n_gen"]): # build images and write tc to file #for gen in range(cf.ga["n_gen"]): #road_points = x[0].road_points #build_scenario(map_points, robot_path_x, robot_path_y, gen, i) #, nodes[closest_index][0], nodes[closest_index][1], 'go' #road_points2 = zip(*road_points) #print("OLD", old) #print("NEW", new) #print("NOVELTY", novelty) #for m in range(30): #path = ".\\results\\run" + str(m) #for gen in range(0, len(res.history)): #print(gen) #save_path3 = os.path.join(cf.files["tc_file"], "solutions_" + str(gen) + ".json") #print(min(fit_list)) | 1.916127 | 2 |
Aula 05/Q5_ComplejidadCuadratica.py | diexmontana/LabADAGrupoC | 0 | 6616541 | <filename>Aula 05/Q5_ComplejidadCuadratica.py
#Q5: Cual es la complejidad de
def imprimir(arr):
# Muestra los elementos de un arreglo n veces
# La primera vez muestra 0 elementos y en cada vuelta muestra un elemento más
n = len(arr)
for i in range(n):
for j in range(i):
print(arr[j])
arreglo = ["a","b","c","d","e"]
imprimir(arreglo)
# j: 0
# j: 0,1
# j: 0, 1, 2
# -> n(n+1) / 2
# Complejidad O(n^2) | <filename>Aula 05/Q5_ComplejidadCuadratica.py
#Q5: Cual es la complejidad de
def imprimir(arr):
# Muestra los elementos de un arreglo n veces
# La primera vez muestra 0 elementos y en cada vuelta muestra un elemento más
n = len(arr)
for i in range(n):
for j in range(i):
print(arr[j])
arreglo = ["a","b","c","d","e"]
imprimir(arreglo)
# j: 0
# j: 0,1
# j: 0, 1, 2
# -> n(n+1) / 2
# Complejidad O(n^2) | es | 0.971683 | #Q5: Cual es la complejidad de # Muestra los elementos de un arreglo n veces # La primera vez muestra 0 elementos y en cada vuelta muestra un elemento más # j: 0 # j: 0,1 # j: 0, 1, 2 # -> n(n+1) / 2 # Complejidad O(n^2) | 3.75268 | 4 |
tools/devperm.py | PThierry/ewok-kernel | 65 | 6616542 | <gh_stars>10-100
#!/usr/bin/env python3
import sys
# with collection, we keep the same device order as the json file
import json, collections
import re
if len(sys.argv) != 2:
print("usage: ", sys.argv[0], "<filename.json>\n");
sys.exit(1);
filename = sys.argv[1];
########################################################
# Ada file header and footer
########################################################
ada_header = """
-- @file devperm.ads
--
-- Copyright 2018 The wookey project team <<EMAIL>>
-- - <NAME>
-- - <NAME>
-- - <NAME>
-- - <NAME>
-- - <NAME>
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- This file has been generated by tools/devmap.py
--
--
with ewok.perm; use ewok.perm;
with soc.devmap; use soc.devmap;
package ewok.devices.perms
with spark_mode => off
is
""";
ada_footer= """);
end ewok.devices.perms;
""";
with open(filename, "r") as jsonfile:
data = json.load(jsonfile, object_pairs_hook=collections.OrderedDict);
def hex_to_adahex(val):
if not re.match(r'^0$', val):
hexa = re.sub(r'0x', '16#', val);
hexa = re.sub(r'$', '#', hexa);
else:
hexa = val;
return hexa;
def bin_to_adabin(val):
if not re.match(r'^0$', val):
hexa = re.sub(r'0b', '2#', val);
hexa = re.sub(r'$', '#', hexa);
else:
hexa = val;
return hexa;
def lookahead(iterable):
"""Pass through all values from the given iterable, augmented by the
information if there are more values to come after the current one
(True), or if it is the last value (False).
"""
# Get an iterator and pull the first value.
it = iter(iterable)
last = next(it)
# Run the iterator to exhaustion (starting from the second value).
for val in it:
# Report the *previous* value (more to come).
yield last, True
last = val
# Report the last value.
yield last, False
def generate_ada():
print(" permissions : constant array (t_periph_id range t_periph_id'succ (t_periph_id'first) .. t_periph_id'last) of ewok.perm.t_perm_name := (");
counter = 1
for device, has_more in lookahead(data):
if device["type"] != "block":
continue;
dev_id = device["name"].upper();
dev_id = re.sub(r'-', '_', dev_id);
if counter > 1:
print(" ,%s => " % dev_id, end='');
else:
print(" %s => " % dev_id, end='');
counter = counter + 1;
# device permissions
print("%s" % device["permission"]);
#print data;
print(ada_header);
generate_ada();
print(ada_footer);
| #!/usr/bin/env python3
import sys
# with collection, we keep the same device order as the json file
import json, collections
import re
if len(sys.argv) != 2:
print("usage: ", sys.argv[0], "<filename.json>\n");
sys.exit(1);
filename = sys.argv[1];
########################################################
# Ada file header and footer
########################################################
ada_header = """
-- @file devperm.ads
--
-- Copyright 2018 The wookey project team <<EMAIL>>
-- - <NAME>
-- - <NAME>
-- - <NAME>
-- - <NAME>
-- - <NAME>
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- This file has been generated by tools/devmap.py
--
--
with ewok.perm; use ewok.perm;
with soc.devmap; use soc.devmap;
package ewok.devices.perms
with spark_mode => off
is
""";
ada_footer= """);
end ewok.devices.perms;
""";
with open(filename, "r") as jsonfile:
data = json.load(jsonfile, object_pairs_hook=collections.OrderedDict);
def hex_to_adahex(val):
if not re.match(r'^0$', val):
hexa = re.sub(r'0x', '16#', val);
hexa = re.sub(r'$', '#', hexa);
else:
hexa = val;
return hexa;
def bin_to_adabin(val):
if not re.match(r'^0$', val):
hexa = re.sub(r'0b', '2#', val);
hexa = re.sub(r'$', '#', hexa);
else:
hexa = val;
return hexa;
def lookahead(iterable):
"""Pass through all values from the given iterable, augmented by the
information if there are more values to come after the current one
(True), or if it is the last value (False).
"""
# Get an iterator and pull the first value.
it = iter(iterable)
last = next(it)
# Run the iterator to exhaustion (starting from the second value).
for val in it:
# Report the *previous* value (more to come).
yield last, True
last = val
# Report the last value.
yield last, False
def generate_ada():
print(" permissions : constant array (t_periph_id range t_periph_id'succ (t_periph_id'first) .. t_periph_id'last) of ewok.perm.t_perm_name := (");
counter = 1
for device, has_more in lookahead(data):
if device["type"] != "block":
continue;
dev_id = device["name"].upper();
dev_id = re.sub(r'-', '_', dev_id);
if counter > 1:
print(" ,%s => " % dev_id, end='');
else:
print(" %s => " % dev_id, end='');
counter = counter + 1;
# device permissions
print("%s" % device["permission"]);
#print data;
print(ada_header);
generate_ada();
print(ada_footer); | en | 0.626887 | #!/usr/bin/env python3 # with collection, we keep the same device order as the json file ######################################################## # Ada file header and footer ######################################################## -- @file devperm.ads -- -- Copyright 2018 The wookey project team <<EMAIL>> -- - <NAME> -- - <NAME> -- - <NAME> -- - <NAME> -- - <NAME> -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. -- You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- -- Unless required by applicable law or agreed to in writing, software -- distributed under the License is distributed on an "AS IS" BASIS, -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -- See the License for the specific language governing permissions and -- limitations under the License. -- -- This file has been generated by tools/devmap.py -- -- with ewok.perm; use ewok.perm; with soc.devmap; use soc.devmap; package ewok.devices.perms with spark_mode => off is ); end ewok.devices.perms; #', val); #', val); Pass through all values from the given iterable, augmented by the information if there are more values to come after the current one (True), or if it is the last value (False). # Get an iterator and pull the first value. # Run the iterator to exhaustion (starting from the second value). # Report the *previous* value (more to come). # Report the last value. # device permissions #print data; | 2.259711 | 2 |
data-hacking/src/task2/lastfm_api/__init__.py | b1r3k/recruitment-challanges | 0 | 6616543 | <reponame>b1r3k/recruitment-challanges
'''
* Author: <NAME>
* Date: 9/14/13
* Time: 2:03 PM
*
* This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
* To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/.
'''
from client import Client | '''
* Author: <NAME>
* Date: 9/14/13
* Time: 2:03 PM
*
* This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
* To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/.
'''
from client import Client | en | 0.669282 | * Author: <NAME> * Date: 9/14/13 * Time: 2:03 PM * * This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. * To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/. | 1.500366 | 2 |
smicli/_common_options.py | KSchopmeyer/smipyping | 0 | 6616544 | <reponame>KSchopmeyer/smipyping
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines click options that are used for multiple subcommands and that have
the same definition throughout the environment. This allows the characteristics
and help to be defined once and used multiple times.
"""
from __future__ import absolute_import
import click
def add_options(options):
"""
Accumulate multiple options into a list. This list can be referenced as
a click decorator @att_options(name_of_list)
The list is reversed because of the way click processes options
Parameters:
options: list of click.option definitions
Returns:
Reversed list
"""
def _add_options(func):
""" Reverse options list"""
for option in reversed(options):
func = option(func)
return func
return _add_options
# Common options
sort_option = [ # pylint: disable=invalid-name
click.option('-s', '--sort', is_flag=True, required=False,
help='Sort into alphabetical order by classname.')]
namespace_option = [ # pylint: disable=invalid-name
click.option('-n', '--namespace', type=str,
required=False, metavar='<name>',
help='Namespace to use for this operation. If not defined '
'all namespaces are used')]
no_verify_option = [ # pylint: disable=invalid-name
click.option('-N', '--no_verify', default=False, is_flag=True,
help='Disable verification prompt before the change is '
'executed.')]
| # (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines click options that are used for multiple subcommands and that have
the same definition throughout the environment. This allows the characteristics
and help to be defined once and used multiple times.
"""
from __future__ import absolute_import
import click
def add_options(options):
"""
Accumulate multiple options into a list. This list can be referenced as
a click decorator @att_options(name_of_list)
The list is reversed because of the way click processes options
Parameters:
options: list of click.option definitions
Returns:
Reversed list
"""
def _add_options(func):
""" Reverse options list"""
for option in reversed(options):
func = option(func)
return func
return _add_options
# Common options
sort_option = [ # pylint: disable=invalid-name
click.option('-s', '--sort', is_flag=True, required=False,
help='Sort into alphabetical order by classname.')]
namespace_option = [ # pylint: disable=invalid-name
click.option('-n', '--namespace', type=str,
required=False, metavar='<name>',
help='Namespace to use for this operation. If not defined '
'all namespaces are used')]
no_verify_option = [ # pylint: disable=invalid-name
click.option('-N', '--no_verify', default=False, is_flag=True,
help='Disable verification prompt before the change is '
'executed.')] | en | 0.794809 | # (C) Copyright 2017 Inova Development Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Defines click options that are used for multiple subcommands and that have the same definition throughout the environment. This allows the characteristics and help to be defined once and used multiple times. Accumulate multiple options into a list. This list can be referenced as a click decorator @att_options(name_of_list) The list is reversed because of the way click processes options Parameters: options: list of click.option definitions Returns: Reversed list Reverse options list # Common options # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=invalid-name | 2.310963 | 2 |
main.py | philipperemy/which-of-your-friends-are-on-tinder | 14 | 6616545 | import json
import sys
import tinder_api as ti
from facebook_utils import query_profile_with_graph_api
from tinder_token import get_access_token
if __name__ == '__main__':
credentials = json.load(open('credentials.json', 'r'))
fb_id = credentials['FB_ID']
fb_auth_token = get_access_token(credentials['FB_EMAIL_ADDRESS'], credentials['FB_PASSWORD'])
token = ti.auth_token(fb_auth_token, fb_id)
print('Which of your friends are on Tinder?')
print('----------')
print('FB_ID = {}'.format(fb_id))
print('FB_AUTH_TOKEN = {}'.format(fb_auth_token))
print('TINDER_TOKEN = {}'.format(token))
print('----------')
if not token:
print('could not get Tinder token. Program will exit.')
sys.exit(0)
print('Successfully connected to Tinder servers.')
my_profile = ti.profile(token)
def get(profile_obj, field_name):
try:
return profile[field_name]
except:
return 'N/A'
print('-' * 130)
pattern = '%20s %20s %10s %20s %50s'
print(pattern % ('First Name', 'Last Name', 'Gender', 'Last Facebook Update Time', 'URL'))
for friend in my_profile['friends']:
profile = query_profile_with_graph_api(profile_id=friend, access_token=fb_auth_token)
request = 'https://www.facebook.com/{}'.format(friend)
print(pattern % (get(profile, 'first_name'),
get(profile, 'last_name'),
get(profile, 'gender'),
get(profile, 'updated_time'),
request))
print('-' * 130)
| import json
import sys
import tinder_api as ti
from facebook_utils import query_profile_with_graph_api
from tinder_token import get_access_token
if __name__ == '__main__':
credentials = json.load(open('credentials.json', 'r'))
fb_id = credentials['FB_ID']
fb_auth_token = get_access_token(credentials['FB_EMAIL_ADDRESS'], credentials['FB_PASSWORD'])
token = ti.auth_token(fb_auth_token, fb_id)
print('Which of your friends are on Tinder?')
print('----------')
print('FB_ID = {}'.format(fb_id))
print('FB_AUTH_TOKEN = {}'.format(fb_auth_token))
print('TINDER_TOKEN = {}'.format(token))
print('----------')
if not token:
print('could not get Tinder token. Program will exit.')
sys.exit(0)
print('Successfully connected to Tinder servers.')
my_profile = ti.profile(token)
def get(profile_obj, field_name):
try:
return profile[field_name]
except:
return 'N/A'
print('-' * 130)
pattern = '%20s %20s %10s %20s %50s'
print(pattern % ('First Name', 'Last Name', 'Gender', 'Last Facebook Update Time', 'URL'))
for friend in my_profile['friends']:
profile = query_profile_with_graph_api(profile_id=friend, access_token=fb_auth_token)
request = 'https://www.facebook.com/{}'.format(friend)
print(pattern % (get(profile, 'first_name'),
get(profile, 'last_name'),
get(profile, 'gender'),
get(profile, 'updated_time'),
request))
print('-' * 130)
| none | 1 | 2.904611 | 3 | |
service_utils/__init__.py | jms/compress_service | 1 | 6616546 | <gh_stars>1-10
__author__ = 'jms'
| __author__ = 'jms' | none | 1 | 1.076801 | 1 | |
app/urls.py | underlost/ulost.net | 1 | 6616547 | from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from coreExtend import views as core_views
admin.autodiscover()
admin.site.site_header = 'ulost.net'
urlpatterns = [
url(r'^admin96/', include(admin.site.urls)),
url(r'^', include('coreExtend.urls', namespace='CoreExtend')),
url(r'^', include('redirection.urls', namespace='Redirection')),
#Static
url(r'^$', TemplateView.as_view(template_name="index.html"), name="Index_page"),
url(r'^404/$', TemplateView.as_view(template_name="404.html"), name="404_page"),
url(r'^500/$', TemplateView.as_view(template_name="500.html"), name="500_page"),
url(r'^robots\.txt$', TemplateView.as_view(template_name="robots.txt", content_type='text/plain')),
url(r'^humans\.txt$', TemplateView.as_view(template_name="humans.txt", content_type='text/plain')),
#API
url(r'^api/v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
#url(r'^api/v1/', include('redirection.api', namespace='RedirectionAPI')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from coreExtend import views as core_views
admin.autodiscover()
admin.site.site_header = 'ulost.net'
urlpatterns = [
url(r'^admin96/', include(admin.site.urls)),
url(r'^', include('coreExtend.urls', namespace='CoreExtend')),
url(r'^', include('redirection.urls', namespace='Redirection')),
#Static
url(r'^$', TemplateView.as_view(template_name="index.html"), name="Index_page"),
url(r'^404/$', TemplateView.as_view(template_name="404.html"), name="404_page"),
url(r'^500/$', TemplateView.as_view(template_name="500.html"), name="500_page"),
url(r'^robots\.txt$', TemplateView.as_view(template_name="robots.txt", content_type='text/plain')),
url(r'^humans\.txt$', TemplateView.as_view(template_name="humans.txt", content_type='text/plain')),
#API
url(r'^api/v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
#url(r'^api/v1/', include('redirection.api', namespace='RedirectionAPI')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| en | 0.374447 | #Static #API #url(r'^api/v1/', include('redirection.api', namespace='RedirectionAPI')), | 1.906769 | 2 |
model.py | malllabiisc/kg-geometry | 18 | 6616548 | <filename>model.py
import sys
import os
import numpy as np
import cPickle as pickle
import argparse
class Model:
def __init__(self):
self.E = None
self.R = None
self.eNames = None
self.rNames = None
self.modelName = None
self.other_params = {}
def setValues(self, E, R, enames, rnames, mname):
self.E = E
self.R = R
self.eNames = enames
self.rNames = rnames
self.modelName = mname
def saveModel(self, filename):
outdict = {"model":self.modelName, "E":self.E, "R":self.R, "eNames":self.eNames, "rNames":self.rNames}
outdict.update(self.other_params)
pickle.dump(outdict, open(filename, 'wb'))
def loadModel(self, filename):
x = pickle.load(open(filename, 'rb'))
if type(x['model']) == str:
self.E = x['E']
self.R = x['R']
self.eNames = x['eNames']
self.rNames = x['rNames']
self.modelName = x['model']
self.fpos_test = x.get('fpos test', [])
#self.fpos_test = x['fpos test']
x = None
else:
self.E = x['model'].E
self.R = x['model'].R
self.fpos_test = x['fpos test']
self.modelName = "hole"
self.eNames = [str(i) for i in range(self.E.shape[0])]
self.rNames = [str(i) for i in range(self.R.shape[0])]
self.model = x
def loadHolEModel(modelFile, dataFile, outputFile, mname):
holEModel = pickle.load(open(modelFile, 'rb'))['model']
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(holEModel.E, holEModel.R, data['entities'], data['relations'], mname)
model.saveModel(outputFile)
def loadComplexModel(modelFile, dataFile, outputFile, mname):
complexModel = pickle.load(open(modelFile, 'rb'))
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(complexModel['E'], complexModel['R'], data['entities'], data['relations'], mname)
model.saveModel(outputFile)
def loadTransEModel(entFile, relFile, dataFile, outputFile, mname):
ent2vec = np.array(np.genfromtxt(entFile), dtype=np.float32)
rel2vec = np.array(np.genfromtxt(relFile), dtype=np.float32)
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(ent2vec, rel2vec, data['entities'], data['relations'], mname)
model.saveModel(outputFile)
def loadSTransEModel(entFile, relFile, mhFile, mtFile, dataFile, outputFile, mname):
ent2vec = np.array(np.genfromtxt(entFile), dtype=np.float32)
rel2vec = np.array(np.genfromtxt(relFile), dtype=np.float32)
ne, de = ent2vec.shape
nr, dr = rel2vec.shape
mat_h = np.reshape(np.array(np.genfromtxt(mhFile), dtype=np.float32), [nr, dr, de])
mat_t = np.reshape(np.array(np.genfromtxt(mtFile), dtype=np.float32), [nr, dr, de])
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(ent2vec, rel2vec, data['entities'], data['relations'], mname)
model.other_params = {"MH":mat_h, "MT":mat_t}
model.saveModel(outputFile)
def loadTransRModel(entFile, relFile, mFile, dataFile, outputFile, mname):
ent2vec = np.array(np.genfromtxt(entFile), dtype=np.float32)
rel2vec = np.array(np.genfromtxt(relFile), dtype=np.float32)
ne, de = ent2vec.shape
nr, dr = rel2vec.shape
mat = np.reshape(np.array(np.genfromtxt(mFile), dtype=np.float32), [nr, dr, de])
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(ent2vec, rel2vec, data['entities'], data['relations'], mname)
model.other_params = {"M":mat}
model.saveModel(outputFile)
def getParser():
parser = argparse.ArgumentParser(description="parser for arguments")
parser.add_argument("-m", "--mdir", type=str, help="directory containing model file(s)", required=True)
parser.add_argument("-o", "--odir", type=str, help="output directory to dump new pickle", required=True)
parser.add_argument("-n", "--name", type=str, help="model name", required=True)
parser.add_argument("-d", "--dataname", type=str, help="name of dataset", required=True)
parser.add_argument("--nnegs", type=int, nargs="+", help="#negatives", default=[1,50,100])
parser.add_argument("--dims", type=int, nargs="+", help="#dimensions", default=[50,100,200])
return parser
def main():
parser = getParser()
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
basedir = args.mdir
modelFn = { "hole":loadHolEModel,
"complex":loadComplexModel,
"distmult":loadComplexModel,
"transe":loadTransEModel,
"stranse":loadSTransEModel,
"transr":loadTransRModel
}
#for nneg in [1, 50, 100]:
#for dim in [50, 100, 200]:
for nneg in args.nnegs:
for dim in args.dims:
#modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "STransE.s%d.neg%d.r0.0001.m1.l1_1.e2000.entity2vec"%(dim, nneg))
if args.name.lower() in ['transe']:
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern")
modelargs = {"entFile":entFile, "relFile":relFile, "mname":args.name}
modelFile = entFile
elif args.name.lower() in ['transr']:
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern")
matFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "A.bern")
modelargs = {"entFile":entFile, "relFile":relFile, "mname":args.name, "mFile":matFile}
modelFile = entFile
elif args.name.lower() in ['stranse']:
"STransE.s50.r0.0001.m1.0.l1_1.i_0.e2000"
"STransE.s%d.r0.0005.m5.0.l1_1.e2000"
#prefix = "STransE.s%d.r0.0001.m1.0.l1_1.i_0.e2000"
prefix = "STransE.s%d.neg%d.r0.0001.m1.0.l1_1.i_0.e2000"
#prefix= "STransE.s%d.r0.0005.m5.0.l1_1.e2000"
#prefix = "STransE.s%d.neg%d.r%s.m%s.l%s.e%s"
#prefix = "STransE.s%d.r%s.m%s.l%s.i_0.e%s"
m = "1"
r = "0.0001"
l = "1_1"
e = "2000"
#name = prefix%(dim, nneg, r, m, l, e)
name = prefix%(dim, nneg)
#name = prefix%(dim, r, m, l, e)
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".entity2vec")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".relation2vec")
mhFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".W1")
mtFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".W2")
modelargs = {"entFile":entFile, "relFile":relFile, "mname":args.name, "mhFile":mhFile, "mtFile":mtFile}
modelFile = entFile
elif args.name.lower() in ['hole']:
modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "%s.%s.p"%(args.dataname, args.name))
modelargs = {"modelFile":modelFile, "mname":args.name}
elif args.name.lower() in ['complex', 'distmult']:
"model.l2_0.030000.e_200.lr_0.500000.lc_0.000000.p"
"model.l2_0.030000.e_100.lr_0.500000.lc_0.000000.p"
"model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p"
"model.l2_0.030000.e_200.lr_0.500000.lc_0.000000.p"
modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "model.l2_0.030000.e_%d.lr_0.500000.lc_0.000000.p"%dim)
modelargs = {"modelFile":modelFile, "mname":args.name}
dataFile = "./data/%s.%s.bin" % (args.dataname, args.name)
outputFile = os.path.join(args.odir, "%s.%s.n%d.d%d.p" % (args.dataname, args.name, nneg, dim))
if not os.path.exists(modelFile):
print "file not there"
print modelFile
continue
if os.path.exists(outputFile):
print "File already exists"
print outputFile
continue
modelargs['dataFile'] = dataFile
modelargs['outputFile'] = outputFile
#load the model
print "Model : %s\tData : %s\tDim : %d\tNeg : %d" % (args.name, args.dataname, dim, nneg)
modelFn[args.name](**modelargs)
"""
baseDir = "/scratch/home/chandrahas/JointEmbedding.2017/complEx/complex/new_results"
modelList = {
"./DM/neg_1/dim_50/model.l2_0.010000.e_50.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':50},
"./DM/neg_1/dim_100/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':100},
"./DM/neg_1/dim_200/model.l2_0.010000.e_200.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':200},
"./ComplEx/neg_1/dim_50/model.l2_0.010000.e_50.lr_0.100000.lc_0.000000.p" : { 'model': "complex", 'neg':1, 'dim':50},
"./ComplEx/neg_1/dim_100/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "complex", 'neg':1, 'dim':100}
#"./DM/neg_1/dim_50/model.l2_0.010000.e_200.lr_0.100000.lc_0.000000.p" : { 'model': "DistMult", 'neg':1, 'dim':50},
#"./DM/neg_1/dim_50/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "DistMult", 'neg':1, 'dim':50},
}
for filename, mdesc in modelList.iteritems():
outputFile = "./data/fb15k.%s.n%d.d%d.p" % (mdesc['model'], mdesc['neg'], mdesc['dim'])
print "Saving model %s to %s ..." %(filename, outputFile)
loadComplexModel(os.path.join(baseDir, filename), "./data/fb15k.complex.bin", outputFile)
baseDir = "/scratch/home/chandrahas/JointEmbedding/TransR/Relation_Extraction"
modelList = [
#"TransR" : {'ent':"TransR/entity2vec.bern", "rel":"TransR/relation2vec.bern", "neg":1, "dim":100},
("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":50}),
("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":100}),
("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":200}),
#"TransH" : {'ent':"TransH/entity2vec.txtbern", "rel":"TransH/relation2vec.txtbern", "neg":1, "dim":100},
]
for mname, mdesc in modelList:
outputfile = "./data/fb15k.%s.n%d.d%d.p" % (mname, mdesc['neg'], mdesc['dim'])
entfile = os.path.join(baseDir, mname, "results", "neg_%d"%mdesc['neg'], "dim_%d"%mdesc['dim'], mdesc['ent'])
relfile = os.path.join(baseDir, mname, "results", "neg_%d"%mdesc['neg'], "dim_%d"%mdesc['dim'], mdesc['rel'])
datafile = "./data/fb15k.TransE.bin"
print "Saving model %s to %s ..." % (mname, outputfile)
loadTransModel(entfile, relfile, datafile, outputfile, mname)
basedir = "/scratch/home/chandrahas/JointEmbedding.2017/STransE/Datasets/FB15k"
entfile = os.path.join(basedir, "STransE.s100.r0.0001.m1.l1_1.e2000.entity2vec")
relfile = os.path.join(basedir, "STransE.s100.r0.0001.m1.l1_1.e2000.relation2vec")
neg = 1
dim = 100
mname = "STransE"
datafile = "./data/fb15k.TransE.bin"
outputfile = "./data/fb15k.%s.n%d.d%d.p" % (mname, neg, dim)
loadSTransEModel(entfile, relfile, datafile, outputfile, mname)
basedir = "/scratch/home/chandrahas/JointEmbedding.2017/HOLE/holographic-embeddings/results_geometry"
for i in [50, 100, 200]:
modelFile = os.path.join(basedir, "neg_1", "dim_%d"%i, "fb15k.hole.p")
dataFile = "./data/fb15k.bin"
outputFile = "./data/fb15k.hole.n1.d%d.p" % i
loadHolEModel(modelFile, dataFile, outputFile)
basedir = "./embeddings/fb15k/TransR"
for nneg in [1, 50, 100]:
for dim in [50, 100, 200]:
#modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "STransE.s%d.neg%d.r0.0001.m1.l1_1.e2000.entity2vec"%(dim, nneg))
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern")
dataFile = "./data/fb15k.TransE.bin"
outputFile = "./data/fb15k.TransR.n%d.d%d.p" % (nneg, dim)
if not os.path.exists(entFile):
print "file not there"
print entFile
continue
if os.path.exists(outputFile):
print "File already exists"
print outputFile
continue
print "Setting dim : %d\t neg : %d" % (dim, nneg)
mname="TransR"
loadTransRModel(entFile, relFile, dataFile, outputFile, mname)
"""
if __name__ == "__main__":
main()
| <filename>model.py
import sys
import os
import numpy as np
import cPickle as pickle
import argparse
class Model:
def __init__(self):
self.E = None
self.R = None
self.eNames = None
self.rNames = None
self.modelName = None
self.other_params = {}
def setValues(self, E, R, enames, rnames, mname):
self.E = E
self.R = R
self.eNames = enames
self.rNames = rnames
self.modelName = mname
def saveModel(self, filename):
outdict = {"model":self.modelName, "E":self.E, "R":self.R, "eNames":self.eNames, "rNames":self.rNames}
outdict.update(self.other_params)
pickle.dump(outdict, open(filename, 'wb'))
def loadModel(self, filename):
x = pickle.load(open(filename, 'rb'))
if type(x['model']) == str:
self.E = x['E']
self.R = x['R']
self.eNames = x['eNames']
self.rNames = x['rNames']
self.modelName = x['model']
self.fpos_test = x.get('fpos test', [])
#self.fpos_test = x['fpos test']
x = None
else:
self.E = x['model'].E
self.R = x['model'].R
self.fpos_test = x['fpos test']
self.modelName = "hole"
self.eNames = [str(i) for i in range(self.E.shape[0])]
self.rNames = [str(i) for i in range(self.R.shape[0])]
self.model = x
def loadHolEModel(modelFile, dataFile, outputFile, mname):
holEModel = pickle.load(open(modelFile, 'rb'))['model']
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(holEModel.E, holEModel.R, data['entities'], data['relations'], mname)
model.saveModel(outputFile)
def loadComplexModel(modelFile, dataFile, outputFile, mname):
complexModel = pickle.load(open(modelFile, 'rb'))
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(complexModel['E'], complexModel['R'], data['entities'], data['relations'], mname)
model.saveModel(outputFile)
def loadTransEModel(entFile, relFile, dataFile, outputFile, mname):
ent2vec = np.array(np.genfromtxt(entFile), dtype=np.float32)
rel2vec = np.array(np.genfromtxt(relFile), dtype=np.float32)
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(ent2vec, rel2vec, data['entities'], data['relations'], mname)
model.saveModel(outputFile)
def loadSTransEModel(entFile, relFile, mhFile, mtFile, dataFile, outputFile, mname):
ent2vec = np.array(np.genfromtxt(entFile), dtype=np.float32)
rel2vec = np.array(np.genfromtxt(relFile), dtype=np.float32)
ne, de = ent2vec.shape
nr, dr = rel2vec.shape
mat_h = np.reshape(np.array(np.genfromtxt(mhFile), dtype=np.float32), [nr, dr, de])
mat_t = np.reshape(np.array(np.genfromtxt(mtFile), dtype=np.float32), [nr, dr, de])
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(ent2vec, rel2vec, data['entities'], data['relations'], mname)
model.other_params = {"MH":mat_h, "MT":mat_t}
model.saveModel(outputFile)
def loadTransRModel(entFile, relFile, mFile, dataFile, outputFile, mname):
ent2vec = np.array(np.genfromtxt(entFile), dtype=np.float32)
rel2vec = np.array(np.genfromtxt(relFile), dtype=np.float32)
ne, de = ent2vec.shape
nr, dr = rel2vec.shape
mat = np.reshape(np.array(np.genfromtxt(mFile), dtype=np.float32), [nr, dr, de])
model = Model()
data = pickle.load(open(dataFile, 'rb'))
model.setValues(ent2vec, rel2vec, data['entities'], data['relations'], mname)
model.other_params = {"M":mat}
model.saveModel(outputFile)
def getParser():
parser = argparse.ArgumentParser(description="parser for arguments")
parser.add_argument("-m", "--mdir", type=str, help="directory containing model file(s)", required=True)
parser.add_argument("-o", "--odir", type=str, help="output directory to dump new pickle", required=True)
parser.add_argument("-n", "--name", type=str, help="model name", required=True)
parser.add_argument("-d", "--dataname", type=str, help="name of dataset", required=True)
parser.add_argument("--nnegs", type=int, nargs="+", help="#negatives", default=[1,50,100])
parser.add_argument("--dims", type=int, nargs="+", help="#dimensions", default=[50,100,200])
return parser
def main():
parser = getParser()
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
basedir = args.mdir
modelFn = { "hole":loadHolEModel,
"complex":loadComplexModel,
"distmult":loadComplexModel,
"transe":loadTransEModel,
"stranse":loadSTransEModel,
"transr":loadTransRModel
}
#for nneg in [1, 50, 100]:
#for dim in [50, 100, 200]:
for nneg in args.nnegs:
for dim in args.dims:
#modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "STransE.s%d.neg%d.r0.0001.m1.l1_1.e2000.entity2vec"%(dim, nneg))
if args.name.lower() in ['transe']:
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern")
modelargs = {"entFile":entFile, "relFile":relFile, "mname":args.name}
modelFile = entFile
elif args.name.lower() in ['transr']:
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern")
matFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "A.bern")
modelargs = {"entFile":entFile, "relFile":relFile, "mname":args.name, "mFile":matFile}
modelFile = entFile
elif args.name.lower() in ['stranse']:
"STransE.s50.r0.0001.m1.0.l1_1.i_0.e2000"
"STransE.s%d.r0.0005.m5.0.l1_1.e2000"
#prefix = "STransE.s%d.r0.0001.m1.0.l1_1.i_0.e2000"
prefix = "STransE.s%d.neg%d.r0.0001.m1.0.l1_1.i_0.e2000"
#prefix= "STransE.s%d.r0.0005.m5.0.l1_1.e2000"
#prefix = "STransE.s%d.neg%d.r%s.m%s.l%s.e%s"
#prefix = "STransE.s%d.r%s.m%s.l%s.i_0.e%s"
m = "1"
r = "0.0001"
l = "1_1"
e = "2000"
#name = prefix%(dim, nneg, r, m, l, e)
name = prefix%(dim, nneg)
#name = prefix%(dim, r, m, l, e)
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".entity2vec")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".relation2vec")
mhFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".W1")
mtFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, name+".W2")
modelargs = {"entFile":entFile, "relFile":relFile, "mname":args.name, "mhFile":mhFile, "mtFile":mtFile}
modelFile = entFile
elif args.name.lower() in ['hole']:
modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "%s.%s.p"%(args.dataname, args.name))
modelargs = {"modelFile":modelFile, "mname":args.name}
elif args.name.lower() in ['complex', 'distmult']:
"model.l2_0.030000.e_200.lr_0.500000.lc_0.000000.p"
"model.l2_0.030000.e_100.lr_0.500000.lc_0.000000.p"
"model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p"
"model.l2_0.030000.e_200.lr_0.500000.lc_0.000000.p"
modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "model.l2_0.030000.e_%d.lr_0.500000.lc_0.000000.p"%dim)
modelargs = {"modelFile":modelFile, "mname":args.name}
dataFile = "./data/%s.%s.bin" % (args.dataname, args.name)
outputFile = os.path.join(args.odir, "%s.%s.n%d.d%d.p" % (args.dataname, args.name, nneg, dim))
if not os.path.exists(modelFile):
print "file not there"
print modelFile
continue
if os.path.exists(outputFile):
print "File already exists"
print outputFile
continue
modelargs['dataFile'] = dataFile
modelargs['outputFile'] = outputFile
#load the model
print "Model : %s\tData : %s\tDim : %d\tNeg : %d" % (args.name, args.dataname, dim, nneg)
modelFn[args.name](**modelargs)
"""
baseDir = "/scratch/home/chandrahas/JointEmbedding.2017/complEx/complex/new_results"
modelList = {
"./DM/neg_1/dim_50/model.l2_0.010000.e_50.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':50},
"./DM/neg_1/dim_100/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':100},
"./DM/neg_1/dim_200/model.l2_0.010000.e_200.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':200},
"./ComplEx/neg_1/dim_50/model.l2_0.010000.e_50.lr_0.100000.lc_0.000000.p" : { 'model': "complex", 'neg':1, 'dim':50},
"./ComplEx/neg_1/dim_100/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "complex", 'neg':1, 'dim':100}
#"./DM/neg_1/dim_50/model.l2_0.010000.e_200.lr_0.100000.lc_0.000000.p" : { 'model': "DistMult", 'neg':1, 'dim':50},
#"./DM/neg_1/dim_50/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "DistMult", 'neg':1, 'dim':50},
}
for filename, mdesc in modelList.iteritems():
outputFile = "./data/fb15k.%s.n%d.d%d.p" % (mdesc['model'], mdesc['neg'], mdesc['dim'])
print "Saving model %s to %s ..." %(filename, outputFile)
loadComplexModel(os.path.join(baseDir, filename), "./data/fb15k.complex.bin", outputFile)
baseDir = "/scratch/home/chandrahas/JointEmbedding/TransR/Relation_Extraction"
modelList = [
#"TransR" : {'ent':"TransR/entity2vec.bern", "rel":"TransR/relation2vec.bern", "neg":1, "dim":100},
("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":50}),
("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":100}),
("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":200}),
#"TransH" : {'ent':"TransH/entity2vec.txtbern", "rel":"TransH/relation2vec.txtbern", "neg":1, "dim":100},
]
for mname, mdesc in modelList:
outputfile = "./data/fb15k.%s.n%d.d%d.p" % (mname, mdesc['neg'], mdesc['dim'])
entfile = os.path.join(baseDir, mname, "results", "neg_%d"%mdesc['neg'], "dim_%d"%mdesc['dim'], mdesc['ent'])
relfile = os.path.join(baseDir, mname, "results", "neg_%d"%mdesc['neg'], "dim_%d"%mdesc['dim'], mdesc['rel'])
datafile = "./data/fb15k.TransE.bin"
print "Saving model %s to %s ..." % (mname, outputfile)
loadTransModel(entfile, relfile, datafile, outputfile, mname)
basedir = "/scratch/home/chandrahas/JointEmbedding.2017/STransE/Datasets/FB15k"
entfile = os.path.join(basedir, "STransE.s100.r0.0001.m1.l1_1.e2000.entity2vec")
relfile = os.path.join(basedir, "STransE.s100.r0.0001.m1.l1_1.e2000.relation2vec")
neg = 1
dim = 100
mname = "STransE"
datafile = "./data/fb15k.TransE.bin"
outputfile = "./data/fb15k.%s.n%d.d%d.p" % (mname, neg, dim)
loadSTransEModel(entfile, relfile, datafile, outputfile, mname)
basedir = "/scratch/home/chandrahas/JointEmbedding.2017/HOLE/holographic-embeddings/results_geometry"
for i in [50, 100, 200]:
modelFile = os.path.join(basedir, "neg_1", "dim_%d"%i, "fb15k.hole.p")
dataFile = "./data/fb15k.bin"
outputFile = "./data/fb15k.hole.n1.d%d.p" % i
loadHolEModel(modelFile, dataFile, outputFile)
basedir = "./embeddings/fb15k/TransR"
for nneg in [1, 50, 100]:
for dim in [50, 100, 200]:
#modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "STransE.s%d.neg%d.r0.0001.m1.l1_1.e2000.entity2vec"%(dim, nneg))
entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern")
relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern")
dataFile = "./data/fb15k.TransE.bin"
outputFile = "./data/fb15k.TransR.n%d.d%d.p" % (nneg, dim)
if not os.path.exists(entFile):
print "file not there"
print entFile
continue
if os.path.exists(outputFile):
print "File already exists"
print outputFile
continue
print "Setting dim : %d\t neg : %d" % (dim, nneg)
mname="TransR"
loadTransRModel(entFile, relFile, dataFile, outputFile, mname)
"""
if __name__ == "__main__":
main()
| en | 0.332365 | #self.fpos_test = x['fpos test'] #for nneg in [1, 50, 100]: #for dim in [50, 100, 200]: #modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "STransE.s%d.neg%d.r0.0001.m1.l1_1.e2000.entity2vec"%(dim, nneg)) #prefix = "STransE.s%d.r0.0001.m1.0.l1_1.i_0.e2000" #prefix= "STransE.s%d.r0.0005.m5.0.l1_1.e2000" #prefix = "STransE.s%d.neg%d.r%s.m%s.l%s.e%s" #prefix = "STransE.s%d.r%s.m%s.l%s.i_0.e%s" #name = prefix%(dim, nneg, r, m, l, e) #name = prefix%(dim, r, m, l, e) #load the model baseDir = "/scratch/home/chandrahas/JointEmbedding.2017/complEx/complex/new_results" modelList = { "./DM/neg_1/dim_50/model.l2_0.010000.e_50.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':50}, "./DM/neg_1/dim_100/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':100}, "./DM/neg_1/dim_200/model.l2_0.010000.e_200.lr_0.100000.lc_0.000000.p" : { 'model': "distmult", 'neg':1, 'dim':200}, "./ComplEx/neg_1/dim_50/model.l2_0.010000.e_50.lr_0.100000.lc_0.000000.p" : { 'model': "complex", 'neg':1, 'dim':50}, "./ComplEx/neg_1/dim_100/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "complex", 'neg':1, 'dim':100} #"./DM/neg_1/dim_50/model.l2_0.010000.e_200.lr_0.100000.lc_0.000000.p" : { 'model': "DistMult", 'neg':1, 'dim':50}, #"./DM/neg_1/dim_50/model.l2_0.010000.e_100.lr_0.100000.lc_0.000000.p" : { 'model': "DistMult", 'neg':1, 'dim':50}, } for filename, mdesc in modelList.iteritems(): outputFile = "./data/fb15k.%s.n%d.d%d.p" % (mdesc['model'], mdesc['neg'], mdesc['dim']) print "Saving model %s to %s ..." %(filename, outputFile) loadComplexModel(os.path.join(baseDir, filename), "./data/fb15k.complex.bin", outputFile) baseDir = "/scratch/home/chandrahas/JointEmbedding/TransR/Relation_Extraction" modelList = [ #"TransR" : {'ent':"TransR/entity2vec.bern", "rel":"TransR/relation2vec.bern", "neg":1, "dim":100}, ("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":50}), ("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":100}), ("TransE", {'ent':"entity2vec.bern", "rel":"relation2vec.bern", "neg":100, "dim":200}), #"TransH" : {'ent':"TransH/entity2vec.txtbern", "rel":"TransH/relation2vec.txtbern", "neg":1, "dim":100}, ] for mname, mdesc in modelList: outputfile = "./data/fb15k.%s.n%d.d%d.p" % (mname, mdesc['neg'], mdesc['dim']) entfile = os.path.join(baseDir, mname, "results", "neg_%d"%mdesc['neg'], "dim_%d"%mdesc['dim'], mdesc['ent']) relfile = os.path.join(baseDir, mname, "results", "neg_%d"%mdesc['neg'], "dim_%d"%mdesc['dim'], mdesc['rel']) datafile = "./data/fb15k.TransE.bin" print "Saving model %s to %s ..." % (mname, outputfile) loadTransModel(entfile, relfile, datafile, outputfile, mname) basedir = "/scratch/home/chandrahas/JointEmbedding.2017/STransE/Datasets/FB15k" entfile = os.path.join(basedir, "STransE.s100.r0.0001.m1.l1_1.e2000.entity2vec") relfile = os.path.join(basedir, "STransE.s100.r0.0001.m1.l1_1.e2000.relation2vec") neg = 1 dim = 100 mname = "STransE" datafile = "./data/fb15k.TransE.bin" outputfile = "./data/fb15k.%s.n%d.d%d.p" % (mname, neg, dim) loadSTransEModel(entfile, relfile, datafile, outputfile, mname) basedir = "/scratch/home/chandrahas/JointEmbedding.2017/HOLE/holographic-embeddings/results_geometry" for i in [50, 100, 200]: modelFile = os.path.join(basedir, "neg_1", "dim_%d"%i, "fb15k.hole.p") dataFile = "./data/fb15k.bin" outputFile = "./data/fb15k.hole.n1.d%d.p" % i loadHolEModel(modelFile, dataFile, outputFile) basedir = "./embeddings/fb15k/TransR" for nneg in [1, 50, 100]: for dim in [50, 100, 200]: #modelFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "STransE.s%d.neg%d.r0.0001.m1.l1_1.e2000.entity2vec"%(dim, nneg)) entFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "entity2vec.bern") relFile = os.path.join(basedir, "neg_%d"%nneg, "dim_%d"%dim, "relation2vec.bern") dataFile = "./data/fb15k.TransE.bin" outputFile = "./data/fb15k.TransR.n%d.d%d.p" % (nneg, dim) if not os.path.exists(entFile): print "file not there" print entFile continue if os.path.exists(outputFile): print "File already exists" print outputFile continue print "Setting dim : %d\t neg : %d" % (dim, nneg) mname="TransR" loadTransRModel(entFile, relFile, dataFile, outputFile, mname) | 2.774267 | 3 |
back/bookclub/models/invitation.py | wonesy/bookclub | 0 | 6616549 | <reponame>wonesy/bookclub
from pydantic import BaseModel
class Invitation(BaseModel):
club: int
class InvitationResponse(BaseModel):
club: int
token: str
| from pydantic import BaseModel
class Invitation(BaseModel):
club: int
class InvitationResponse(BaseModel):
club: int
token: str | none | 1 | 2.229146 | 2 | |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/python/TimeRange.py | srcarter3/awips2 | 0 | 6616550 | <reponame>srcarter3/awips2
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Provides a AWIPS I GFE compatible wrapper to TimeRange
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------- -------- --------- ---------------------------------------------
# Apr 10, 2008 chammack Initial Creation.
# Sep 30, 2008 1566 wdougher Quit returning TimeRange from overlaps(), etc.
# Sep 16, 2009 2899 njensen Huge performance boost by caching
# Apr 04, 2013 1787 randerso Removed isValid check to allow 0 duration
# time ranges to be used in python
# Feb 06, 2017 5959 randerso Removed Java .toString() calls
#
##
##
# This is a base file that is not intended to be overridden.
##
from com.raytheon.uf.common.time import TimeRange as JavaTimeRange
import AbsTime
import JUtil
class TimeRange(JUtil.JavaWrapperClass):
def __init__(self, time1, time2=None):
# these vars are only calculated if requested
self.__hash = None
self.__start = None
self.__end = None
self.__duration = None
# Single argument assumes a passed in java timerange
if time2 == None:
self.__tr = time1
return
# Check to see if abstimes or java classes are passed in
if isinstance(time1, AbsTime.AbsTime):
time1_java = time1.javaDate()
self.__start = time1
else:
time1_java = time1
if isinstance(time2, AbsTime.AbsTime):
time2_java = time2.javaDate()
self.__end = time2
else:
time2_java = time2
self.__tr = JavaTimeRange(time1_java, time2_java)
def startTime(self):
if not self.__start:
self.__start = self.__asAbsTime(self.__tr.getStart())
return self.__start
def endTime(self):
if not self.__end:
self.__end = self.__asAbsTime(self.__tr.getEnd())
return self.__end
def __asAbsTime(self, date):
if date is None:
return None
A = date.getTime()
return AbsTime.AbsTime(A / 1000);
def duration(self):
if not self.__duration:
self.__duration = self.__tr.getDuration() / 1000
return self.__duration
def toJavaObj(self):
return self.__tr
def contains(self, timeOrTimeRange):
if isinstance(timeOrTimeRange, AbsTime.AbsTime):
return self.__tr.contains(timeOrTimeRange.javaDate())
if isinstance(timeOrTimeRange, TimeRange):
return self.__tr.contains(timeOrTimeRange.toJavaObj())
def overlaps(self, timeRange):
return self.__tr.overlaps(timeRange.toJavaObj())
def isAdjacentTo(self, timeRange):
return self.__tr.isAdjacentTo(timeRange.toJavaObj())
def join(self, timeRange):
return TimeRange(self.__tr.join(timeRange.toJavaObj()))
def intersection(self, timeRange):
return TimeRange(self.__tr.intersection(timeRange.toJavaObj()))
def gap(self, timeRange):
return TimeRange(self.__tr.gap(timeRange.toJavaObj()))
def span(self, timeRange):
return TimeRange(self.__tr.span(timeRange.toJavaObj()))
def combineWith(self, timeRange):
return TimeRange(self.__tr.combineWith(timeRange.toJavaObj()))
def isValid(self):
return self.__tr.isValid()
def __eq__(self, other):
return self.__tr.equals(other.toJavaObj())
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.__tr.compareTo(other.toJavaObj()) < 0
def __le__(self, other):
return self.__tr.compareTo(other.toJavaObj()) <= 0
def __gt__(self, other):
return self.__tr.compareTo(other.toJavaObj()) > 0
def __ge__(self, other):
return self.__tr.compareTo(other.toJavaObj()) >= 0
def __hash__(self):
if not self.__hash:
self.__hash = self.startTime().unixTime() ^ self.endTime().unixTime()
return self.__hash
def __str__(self):
return str(self.__tr)
def __repr__(self):
return str(self.__tr)
def javaTimeRangeListToPyList(timeRanges):
pylist = []
size = timeRanges.size()
for i in range(size):
jtr = timeRanges.get(i)
timelist = encodeJavaTimeRange(jtr)
pylist.append(timelist)
return pylist
def encodeJavaTimeRange(javaTimeRange):
time = TimeRange(javaTimeRange)
start = time.startTime()
end = time.endTime()
return (start.unixTime(), end.unixTime())
def allTimes():
tr = JavaTimeRange.allTimes()
return TimeRange(tr.getStart(), tr.getEnd())
def default():
tr = JavaTimeRange()
return TimeRange(tr)
| ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Provides a AWIPS I GFE compatible wrapper to TimeRange
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------- -------- --------- ---------------------------------------------
# Apr 10, 2008 chammack Initial Creation.
# Sep 30, 2008 1566 wdougher Quit returning TimeRange from overlaps(), etc.
# Sep 16, 2009 2899 njensen Huge performance boost by caching
# Apr 04, 2013 1787 randerso Removed isValid check to allow 0 duration
# time ranges to be used in python
# Feb 06, 2017 5959 randerso Removed Java .toString() calls
#
##
##
# This is a base file that is not intended to be overridden.
##
from com.raytheon.uf.common.time import TimeRange as JavaTimeRange
import AbsTime
import JUtil
class TimeRange(JUtil.JavaWrapperClass):
def __init__(self, time1, time2=None):
# these vars are only calculated if requested
self.__hash = None
self.__start = None
self.__end = None
self.__duration = None
# Single argument assumes a passed in java timerange
if time2 == None:
self.__tr = time1
return
# Check to see if abstimes or java classes are passed in
if isinstance(time1, AbsTime.AbsTime):
time1_java = time1.javaDate()
self.__start = time1
else:
time1_java = time1
if isinstance(time2, AbsTime.AbsTime):
time2_java = time2.javaDate()
self.__end = time2
else:
time2_java = time2
self.__tr = JavaTimeRange(time1_java, time2_java)
def startTime(self):
if not self.__start:
self.__start = self.__asAbsTime(self.__tr.getStart())
return self.__start
def endTime(self):
if not self.__end:
self.__end = self.__asAbsTime(self.__tr.getEnd())
return self.__end
def __asAbsTime(self, date):
if date is None:
return None
A = date.getTime()
return AbsTime.AbsTime(A / 1000);
def duration(self):
if not self.__duration:
self.__duration = self.__tr.getDuration() / 1000
return self.__duration
def toJavaObj(self):
return self.__tr
def contains(self, timeOrTimeRange):
if isinstance(timeOrTimeRange, AbsTime.AbsTime):
return self.__tr.contains(timeOrTimeRange.javaDate())
if isinstance(timeOrTimeRange, TimeRange):
return self.__tr.contains(timeOrTimeRange.toJavaObj())
def overlaps(self, timeRange):
return self.__tr.overlaps(timeRange.toJavaObj())
def isAdjacentTo(self, timeRange):
return self.__tr.isAdjacentTo(timeRange.toJavaObj())
def join(self, timeRange):
return TimeRange(self.__tr.join(timeRange.toJavaObj()))
def intersection(self, timeRange):
return TimeRange(self.__tr.intersection(timeRange.toJavaObj()))
def gap(self, timeRange):
return TimeRange(self.__tr.gap(timeRange.toJavaObj()))
def span(self, timeRange):
return TimeRange(self.__tr.span(timeRange.toJavaObj()))
def combineWith(self, timeRange):
return TimeRange(self.__tr.combineWith(timeRange.toJavaObj()))
def isValid(self):
return self.__tr.isValid()
def __eq__(self, other):
return self.__tr.equals(other.toJavaObj())
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.__tr.compareTo(other.toJavaObj()) < 0
def __le__(self, other):
return self.__tr.compareTo(other.toJavaObj()) <= 0
def __gt__(self, other):
return self.__tr.compareTo(other.toJavaObj()) > 0
def __ge__(self, other):
return self.__tr.compareTo(other.toJavaObj()) >= 0
def __hash__(self):
if not self.__hash:
self.__hash = self.startTime().unixTime() ^ self.endTime().unixTime()
return self.__hash
def __str__(self):
return str(self.__tr)
def __repr__(self):
return str(self.__tr)
def javaTimeRangeListToPyList(timeRanges):
pylist = []
size = timeRanges.size()
for i in range(size):
jtr = timeRanges.get(i)
timelist = encodeJavaTimeRange(jtr)
pylist.append(timelist)
return pylist
def encodeJavaTimeRange(javaTimeRange):
time = TimeRange(javaTimeRange)
start = time.startTime()
end = time.endTime()
return (start.unixTime(), end.unixTime())
def allTimes():
tr = JavaTimeRange.allTimes()
return TimeRange(tr.getStart(), tr.getEnd())
def default():
tr = JavaTimeRange()
return TimeRange(tr) | en | 0.745542 | ## # This software was developed and / or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S. persons whether in the United States or abroad requires # an export license or other authorization. # # Contractor Name: <NAME> # Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 # # See the AWIPS II Master Rights File ("Master Rights File.pdf") for # further licensing information. ## # # Provides a AWIPS I GFE compatible wrapper to TimeRange # # SOFTWARE HISTORY # # Date Ticket# Engineer Description # ------------- -------- --------- --------------------------------------------- # Apr 10, 2008 chammack Initial Creation. # Sep 30, 2008 1566 wdougher Quit returning TimeRange from overlaps(), etc. # Sep 16, 2009 2899 njensen Huge performance boost by caching # Apr 04, 2013 1787 randerso Removed isValid check to allow 0 duration # time ranges to be used in python # Feb 06, 2017 5959 randerso Removed Java .toString() calls # ## ## # This is a base file that is not intended to be overridden. ## # these vars are only calculated if requested # Single argument assumes a passed in java timerange # Check to see if abstimes or java classes are passed in | 1.610426 | 2 |