text string | size int64 | token_count int64 |
|---|---|---|
import io
import os
import pandas as pd
import re
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
# Set Google API authentication
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "INSERT_FULL_KEY_FILE_PATH"
# ---------------------------------------------------------------------
# Retrieve labels for a batch of images and create a dataframe
# Folder where images are stored
ImageFolder = "INSERT_FULL_FOLDER_PATH"
# Placeholders to store data
ImageID = []
Description = []
# Instantiates a client
ImageLabels = pd.DataFrame()
client = vision.ImageAnnotatorClient()
# Get labels and scores for every image in folder
for file in os.listdir(ImageFolder):
filename = os.path.basename(file).split('.jpg')[0] # Get image ID
image_file = io.open(ImageFolder+file, 'rb') # Open image
content = image_file.read() # Read image into memory
image = types.Image(content=content)
response = client.label_detection(image=image) # Gets response from API for image
labels = response.label_annotations # Get labels from response
Nlabels = len(labels) # Get the number of labels that were returned
for i in range(0, Nlabels): # For each label we will store the MID, label, and score
ImageID.append(filename) # Keep track Image ID
Description.append(labels[i].description) # Store label
# Put Image ID and label into data frame
ImageLabels["imageid"] = ImageID
ImageLabels["desc"] = Description
ImageLabels.groupby(ImageID)
# print(ImageLabels)
Export = ImageLabels.to_json (r'test2.json',orient='records')
| 1,580 | 460 |
# Converted from Elasticsearch Domain example located at:
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#d0e51519
from troposphere import Template, constants
from troposphere.elasticsearch import Domain, EBSOptions
from troposphere.elasticsearch import ElasticsearchClusterConfig
from troposphere.elasticsearch import SnapshotOptions
templ = Template()
templ.add_description('Elasticsearch Domain example')
es_domain = templ.add_resource(Domain(
'ElasticsearchDomain',
DomainName="ExampleElasticsearchDomain",
ElasticsearchClusterConfig=ElasticsearchClusterConfig(
DedicatedMasterEnabled=True,
InstanceCount=2,
ZoneAwarenessEnabled=True,
InstanceType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterCount=3
),
EBSOptions=EBSOptions(EBSEnabled=True,
Iops=0,
VolumeSize=20,
VolumeType="gp2"),
SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0),
AccessPolicies={'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Principal': {
'AWS': '*'
},
'Action': 'es:*',
'Resource': '*'
}]},
AdvancedOptions={"rest.action.multi.allow_explicit_index": "true"}
))
print(templ.to_json())
| 1,556 | 455 |
def dataops():
print('doing data ops')
| 43 | 16 |
import time
class FritzScraperCargo(object):
def __init__(self, cargo):
self.timestamp = time.time()
self.cargo = cargo
| 133 | 47 |
from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('', views.account_login, name='Login'),
path('logout/',views.logout_view,name='Logout'),
path('dashboard/profile/',views.user_profile_view, name='Userprofile'),
path('dashboard/profile/change-password/',views.changepassword,name='Changepassword'),
path('dashboard/employees/create-user/', views.register_user_view,name='Register'),
] | 453 | 144 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
# Starts with a lowercase letter, followed by a sequence of lowercase letters
# and underscores.
FUNCTION_PATTERN = re.compile(r'^[a-z][a-z_]*$')
PROPERTY_PATTERN = FUNCTION_PATTERN
# Starts with an uppercase letter, followed by a sequence of uppercase letters
# and underscores.
CONSTANT_PATTERN = re.compile(r'^[A-Z][A-Z_]*$')
# Like FUNCTION_PATTERN, but starts with 'get_' or 'set_'
GETTER_PATTERN = re.compile(r'^get_[a-z][a-z_]*$')
SETTER_PATTERN = re.compile(r'^set_[a-z][a-z_]*$')
# Starts with an uppercase letter, followed by a sequence of letters.
CLASS_PATTERN = re.compile(r'^[A-Z][A-Za-z]*$')
class AttributeNotFound(AttributeError):
pass
class PropertyDescriptor(dict):
pass
def _camelcase(name, capitalize=False):
# foo_bar_baz => fooBarBaz (or FooBarBaz if capitalize=True)
comps = name.split('_')
if capitalize:
return ''.join(c.title() for c in comps)
return ''.join(comps[:1] + [c.title() for c in comps[1:]])
def find_module_attribute(qt_module, name):
if CONSTANT_PATTERN.match(name):
# Constant macro name (e.g. VERSION_STR => QT_VERSION_STR)
qt_name = 'QT_' + name
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
if CLASS_PATTERN.match(name):
# Class name (e.g. MutexLocker => QMutexLocker)
qt_name = 'Q' + name
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
# Some special cases (e.g. Signal => pyqtSignal)
if qt_module.__name__.startswith('PyQt'):
qt_name = 'pyqt' + name
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
if GETTER_PATTERN.match(name):
# Global function name (e.g. get_version => qVersion)
qt_name = 'q' + _camelcase(name[4:], capitalize=True)
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
if FUNCTION_PATTERN.match(name):
# Global function name (e.g. float_distance => qFloatDistance)
qt_name = 'q' + _camelcase(name, capitalize=True)
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
# Macro name (e.g. return_arg => Q_RETURN_ARG)
qt_name = 'Q_' + name.upper()
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
# Macro name (e.g. translate_noop => QT_TRANSLATE_NOOP)
qt_name = 'QT_' + name.upper()
try:
return getattr(qt_module, qt_name)
except AttributeError:
pass
raise AttributeNotFound
def find_object_attribute(obj, name, old_getattribute):
# Property name (e.g. object_name => objectName)
if PROPERTY_PATTERN.match(name):
qt_name = _camelcase(name)
meta = obj.metaObject()
if meta.indexOfProperty(qt_name) >= 0:
# Has matching Qt property. Build a Python property to match.
idx = meta.indexOfProperty(qt_name)
qt_prop = meta.property(idx)
descriptor = PropertyDescriptor()
if qt_prop.isReadable():
descriptor['fget'] = qt_prop.read
if qt_prop.isWritable():
descriptor['fset'] = qt_prop.write
if qt_prop.isResettable():
descriptor['fdel'] = qt_prop.reset
return descriptor
if GETTER_PATTERN.match(name):
qt_name = _camelcase(name[4:]) # Strip "get_" prefix
try:
return old_getattribute(obj, qt_name)
except AttributeError:
pass
if FUNCTION_PATTERN.match(name): # Either a setter or a regular method.
qt_name = _camelcase(name)
try:
return old_getattribute(obj, qt_name)
except AttributeError:
pass
if CONSTANT_PATTERN.match(name):
# Enum names (e.g. READ_WRITE => ReadWrite)
qt_name = _camelcase(name, capitalize=True)
try:
return old_getattribute(obj, qt_name)
except AttributeError:
pass
raise AttributeNotFound
| 4,222 | 1,428 |
from collections import defaultdict
import librosa
import os
import numpy as np
import h5py
class Audio:
filename = None
project = None
bucket = None
tempo = None
beats = None
features = None
tokens = None
loaded_from_cache = False
has_changed = False
def __init__(self, filename, project):
self.filename = filename
self.project = project
self.features = defaultdict()
self.tokens = defaultdict()
self.signal_has_changed = False
self.feature_has_changed = False
self.token_has_changed = False
self.y = None
self.sr = None
def load(self):
if self.project.cache_features:
self.__load_features_from_cache()
self.__load_tokens_from_cache()
def add_feature(self, feature_name, feature):
self.features[feature_name] = feature
self.feature_has_changed = True
def add_tokens(self, tokens_key, tokens):
self.tokens[tokens_key] = tokens
self.token_has_changed = True
def persist(self):
if self.project.cache_features and self.feature_has_changed:
self.persist_features()
if self.project.cache_tokens and self.token_has_changed:
self.persist_tokens()
if self.project.cache_signal and self.signal_has_changed:
self.persist_signal()
def signal(self):
if self.y is None:
self.y, self.sr = self.__load_signal()
return (self.y, self.sr)
def cleanup(self):
self.y = None
self.sr = None
def persist_features(self):
self.__create_cache_folder()
print('dumping features', self.filename)
with h5py.File(self.cache_filename('features'), "w") as f:
for key in self.features.keys():
f.create_dataset(key, data=self.features[key])
self.feature_has_changed = False
def persist_tokens(self):
print('dumping tokens', self.filename)
with h5py.File(self.cache_filename('tokens'), "w") as f:
for key in self.tokens.keys():
f.attrs[key] = self.tokens[key]
self.token_has_changed = False
def persist_signal(self):
self.__create_cache_folder()
print('dumping audio', self.filename)
with h5py.File(self.cache_filename('audio'), "w") as f:
f.create_dataset('y', data=self.y)
f.attrs["sr"] = self.sr
self.signal_has_changed = False
def clean_cache(self, file_type_str):
if self.cache_filename_exists():
os.remove(self.cache_filename(file_type_str))
def __load_signal(self):
return self.__load_signal_from_cache() or self.__load_signal_from_file()
def __load_signal_from_file(self):
print('loading signal from file - %s' % self.filename)
self.y, self.sr = librosa.load(self.filename)
self.signal_has_changed = True
return (self.y, self.sr)
def __load_signal_from_cache(self):
if not self.cache_filename_exists('audio'):
return None
print('loading signal from cache - %s' % self.filename)
with h5py.File(self.cache_filename('audio'), 'r') as f:
self.y = np.array(f['y'])
self.sr = f.attrs["sr"]
return (self.y, self.sr)
def __load_features_from_cache(self):
if not self.cache_filename_exists('features'):
return
with h5py.File(self.cache_filename('features'), 'r') as f:
for k in f.keys():
self.features[k] = np.array(f[k])
def __load_tokens_from_cache(self):
if not self.cache_filename_exists('tokens'):
return
with h5py.File(self.cache_filename('tokens'), 'r') as f:
for k in f.attrs.keys():
self.tokens[k] = f.attrs[k]
self.token_has_changed = False
def cache_filename(self, file_type_str):
return self.__cache_folder() + ('/%s.hdf5' % file_type_str)
def cache_filename_exists(self, file_type_str):
return os.path.isfile(self.cache_filename(file_type_str))
def __create_cache_folder(self):
os.makedirs(self.__cache_folder(), exist_ok=True)
def __cache_folder(self):
fld = self.project.cache_folder
return fld + self.filename
| 3,917 | 1,332 |
import json
import os
import urllib.request, urllib.parse
from collections import OrderedDict
from typing import List, Union, Dict
class SOLR:
"""
Wrapper for the SOLR retrieval engine.
"""
def __init__(
self,
host: str = "localhost",
port: int = 8983,
cookie: dict = None,
collection: str = "trec",
file_number: int = 100,
ir_model: str = "DFR",
):
self.host = host
if self.host is None:
self.host = (
os.environ["SOLR_HOST"] if "SOLR_HOST" in os.environ else "localhost"
)
self.port = port
if self.port is None:
self.port = (
int(os.environ["SOLR_PORT"]) if "SOLR_PORT" in os.environ else 8983
)
self.cookie = cookie
self.collection = collection
if self.collection is None:
self.collection = (
os.environ["SOLR_COLLECTION"]
if "SOLR_COLLECTION" in os.environ
else "trec"
)
self.file_number = file_number
if self.file_number is None:
self.file_number = (
int(os.environ["SOLR_FILENUMBER"])
if "SOLR_FILENUMBER" in os.environ
else 100
)
self.ir_model = ir_model
if self.ir_model is None:
self.ir_model = (
os.environ["SOLR_IRMODEL"] if "SOLR_IRMODEL" in os.environ else "DFR"
)
def __rget(self, path: str = "select?", params: Union[Dict, None] = None):
url = f"http://{self.host}:{str(self.port)}/solr/{self.collection}/{path}"
def __format_param(key):
values = params[key]
if type(values) is not list:
values = [values]
return "&".join(map(lambda value: key + "=" + str(value), values))
if len(params) > 0:
url = f"{url}?{'&'.join(map(__format_param, params.keys()))}"
request = urllib.request.urlopen(url)
return json.load(request)["response"]["docs"]
def query(
self,
query: str,
fields: List[str] = None,
rows: int = 15,
sort: str = "score asc",
):
params = {
"fl": (
urllib.parse.quote(" ".join(fields))
if fields is not None
else "docno%2Cscore%2Cdoctext&"
),
"q": f"doctext%3A({query})",
"rows": f"{rows}",
"sort": urllib.parse.quote(sort),
}
return self.__rget(path="select", params=params)
| 2,634 | 824 |
__author__ = 'tinglev@kth.se'
import os
import re
import logging
from slackclient import SlackClient
def init():
#global CLIENT, BOT_ID
log = logging.getLogger(__name__)
client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
auth_test = client.api_call("auth.test")
log.debug('Auth test response: %s', auth_test)
bot_id = auth_test["user_id"]
log.debug('Bot ID is "%s"', bot_id)
client.rtm_connect(with_team_state=False, auto_reconnect=True)
return client
def mention_to_user_id(mention):
mention_regex = r'^<@(.+)>$'
matches = re.search(mention_regex, mention)
if matches:
return matches.group(1)
return None
def user_id_to_mention(user_id):
return f'<@{user_id}>'
def get_rtm_messages(events):
messages = []
for event in events:
if event["type"] == "message":
messages.append(event)
return messages
def message_is_command(message):
try:
trigger_text = os.environ.get('BOT_TRIGGER') or '!pingis'
log = logging.getLogger(__name__)
trigger_regex = r'^{0} (.+)'.format(trigger_text)
matches = re.search(trigger_regex, message['text'])
if matches and matches.group(1):
return matches.group(1).strip(), message['user'], message['channel']
except Exception as err:
log.debug('Edited message ignored "%s". Error: "%s".', message, err)
return (None, None, None)
def send_ephemeral(slack_client, channel, user, message, default_message=None):
log = logging.getLogger(__name__)
log.debug('Sending eph to ch "%s" user "%s" msg "%s"', channel, user, message)
slack_client.api_call(
"chat.postEphemeral",
channel=channel,
user=user,
text=message or default_message
)
def get_user_info(slack_client, slack_user_id):
log = logging.getLogger(__name__)
log.debug('Calling "users.info" on slack api')
user = slack_client.api_call(
'users.info',
user=slack_user_id
)
log.debug('Got user %s', user)
return user
def get_user_list(slack_client):
log = logging.getLogger(__name__)
log.debug('Calling "users.list" on slack api')
result = slack_client.api_call(
'users.list'
)
#log.debug('Response from api was: %s', result)
return result
def get_user_from_user_list(user_list, user_id):
log = logging.getLogger(__name__)
if not 'members' in user_list:
return None
for user in user_list['members']:
if 'id' in user and user['id'] == user_id:
log.debug('Found user %s in user_list', user_id)
return user
return None
def get_user_image_url(user):
imv_version = 'image_192'
log = logging.getLogger(__name__)
if 'user' in user and 'profile' in user['user']:
if imv_version in user['user']['profile']:
log.debug('Found user image for user %s', user['user']['id'])
return user['user']['profile'][imv_version]
return None
def send_message(slack_client, channel, message, default_message=None):
log = logging.getLogger(__name__)
log.debug('Sending msg to ch "%s" msg "%s"', channel, message)
response = slack_client.api_call(
"chat.postMessage",
channel=channel,
text=message or default_message
)
log.debug('Response from api was: %s', response)
def send_block_message(slack_client, channel, blocks):
log = logging.getLogger(__name__)
log.debug('Sending block message to ch "%s" blocks "%s"', channel, blocks)
response = slack_client.api_call(
"chat.postMessage",
channel=channel,
blocks=blocks
)
log.debug('Response from api was: %s', response)
def rtm_read(slack_client):
return slack_client.rtm_read()
| 3,773 | 1,242 |
""" PLT tools module imports to create a better module interface """
from aggregationtools.ep_curve import EPCurve, EPType
from aggregationtools.plt import PLT
from aggregationtools.elt import ELT | 196 | 53 |
""":mod:`gitconfig_parser.parser` -- Parser implementation
"""
from pyparsing import (
OneOrMore, restOfLine, Group, ZeroOrMore,
CharsNotIn, Suppress, Word, alphanums, Literal, pythonStyleComment)
def build_parser():
key = Word(alphanums).setResultsName('key')
value = restOfLine.setParseAction(
lambda string, location, tokens: tokens[0].strip()
).setResultsName('value')
property_ = Group(key + Suppress(Literal('=')) + value)
properties = Group(OneOrMore(property_)).setResultsName('properties')
section_name = (Suppress('[') + OneOrMore(CharsNotIn(']')) +
Suppress(']')).setResultsName('section')
section = Group(section_name + properties)
ini_file = ZeroOrMore(section).setResultsName('sections')
ini_file.ignore(pythonStyleComment)
return ini_file
def parse_file(file_):
parser = build_parser()
return parser.parseWithTabs().parseFile(file_, parseAll=True)
| 952 | 296 |
from chroma_core.models import Command
from chroma_core.models.host import ManagedHost
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
import mock
from tests.unit.chroma_api.chroma_api_test_case import ChromaApiTestCase
class TestCommandResource(ChromaApiTestCase):
def test_host_lists(self):
"""Test that commands which take a list of hosts as an argument
are get the host URIs converted to host IDs (for use with HostListMixin)"""
from chroma_api.urls import api
hosts = []
for i in range(0, 2):
address = 'myserver_%d' % i
host = ManagedHost.objects.create(
address = address,
fqdn = address,
nodename = address)
hosts.append(host)
with mock.patch("chroma_core.services.job_scheduler.job_scheduler_client.JobSchedulerClient.command_run_jobs",
mock.Mock(return_value = Command.objects.create().id)):
response = self.api_client.post("/api/command/", data={
'message': "Test command",
'jobs': [
{
'class_name': 'UpdateNidsJob',
'args': {'hosts': [api.get_resource_uri(h) for h in hosts]}
}
]
})
self.assertEqual(response.status_code, 201)
host_ids = "[%s]" % ", ".join([str(h.id) for h in hosts])
JobSchedulerClient.command_run_jobs.assert_called_once_with([{'class_name': 'UpdateNidsJob', 'args': {'host_ids': host_ids}}], 'Test command')
| 1,641 | 470 |
from django import forms
from problems.models import ExportPackage
class ExportForm(forms.ModelForm):
class Meta:
model = ExportPackage
fields = ('exporter', 'export_format',)
def __init__(self, *args, **kwargs):
self.problem = kwargs.pop('problem')
self.revision = kwargs.pop('revision')
self.creator = kwargs.pop('user')
super(ExportForm, self).__init__(*args, **kwargs)
def save(self, **kwargs):
export_package = super(ExportForm, self).save(commit=False)
export_package.problem = self.problem
export_package.commit_id = self.revision.commit_id
export_package.creator = self.creator
export_package.save()
return export_package
| 743 | 216 |
class ValidationError(Exception):
def __init__(self, schema: 'Schema', errors: dict):
super().__init__(str(errors))
self.schema = schema
self.errors = errors
| 186 | 51 |
import socket
import sys
from pynput.keyboard import Key, Controller
import time
TCP_IP = sys.argv[1]
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
keyboard = Controller()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print("Server: ", TCP_IP)
while 1:
try:
print("Waiting...")
conn, addr = s.accept()
print('Connection address: ', addr)
data = conn.recv(BUFFER_SIZE)
if not data: break
print("received data:", data,)
if data == b'forward':
keyboard.press(Key.right)
keyboard.release(Key.right)
elif data == b'backward':
keyboard.press(Key.left)
keyboard.release(Key.left)
except KeyboardInterrupt:
if conn:
conn.close()
print("W: interrupt received, stopping…")
break
finally:
# clean up
conn.close()
#conn.close() | 851 | 351 |
xyw = input().split()
X = int(xyw[0])-1
Y = int(xyw[1])-1
C = []
for _ in range(9):
li = list(input())
C.append(li)
ans = ''
flg = True
if xyw[2] == 'R':
x_add = 1
for i in range(4):
ans += C[Y][X]
if X == 8:
x_add = -1
X += x_add
elif xyw[2] == 'L':
x_add = -1
for i in range(4):
ans += C[Y][X]
if X == 0:
x_add = 1
X += x_add
elif xyw[2] == 'U':
y_add = -1
for i in range(4):
ans += C[Y][X]
if Y == 0:
y_add = 1
Y += y_add
elif xyw[2] == 'D':
y_add = 1
for i in range(4):
ans += C[Y][X]
if Y == 8:
y_add = -1
Y += y_add
elif xyw[2] == 'RU':
x_add = 1
y_add = -1
for i in range(4):
ans += C[Y][X]
if X == 8 and Y == 0:
x_add = -1
y_add = 1
elif X == 8 and Y != 0:
x_add = -1
elif X != 8 and Y == 0:
y_add = 1
X += x_add
Y += y_add
elif xyw[2] == 'RD':
x_add = 1
y_add = 1
for i in range(4):
ans += C[Y][X]
if X == 8 and Y == 8:
x_add = -1
y_add = -1
elif X == 8 and Y != 8:
x_add = -1
elif X != 8 and Y == 8:
y_add = -1
X += x_add
Y += y_add
elif xyw[2] == 'LU':
x_add = -1
y_add = -1
for i in range(4):
ans += C[Y][X]
if X == 0 and Y == 0:
x_add = 1
y_add = 1
elif X == 0 and Y != 0:
x_add = 1
elif X != 0 and Y == 0:
y_add = 1
X += x_add
Y += y_add
elif xyw[2] == 'LD':
x_add = -1
y_add = 1
for i in range(4):
ans += C[Y][X]
if X == 0 and Y == 8:
x_add = 1
y_add = -1
elif X == 0 and Y != 8:
x_add = 1
elif X != 0 and Y == 8:
y_add = -1
X += x_add
Y += y_add
print(ans)
| 1,991 | 847 |
# -*- coding: utf-8 -*-
# @Time : 2020/7/7 14:16
# @Author : CoderCharm
# @File : auth.py
# @Software: PyCharm
# @Desc :
"""
用户模块
"""
from datetime import datetime
from sqlalchemy import BIGINT, Column, DateTime, Integer, SmallInteger, VARCHAR
from app.api.db.baseClass import Base
from app.api.db.baseClass import gen_uuid
class AdminUser(Base):
"""
管理员表
"""
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(VARCHAR(32), default=gen_uuid, unique=True, comment="用户id")
email = Column(VARCHAR(128), unique=True, index=True, nullable=False, comment="邮箱")
phone = Column(VARCHAR(16), unique=True, index=True, nullable=True, comment="手机号")
nickname = Column(VARCHAR(128), comment="管理员昵称")
avatar = Column(VARCHAR(256), comment="管理员头像")
hashed_password = Column(VARCHAR(128), nullable=False, comment="密码")
is_active = Column(Integer, default=False, comment="邮箱是否激活 0=未激活 1=激活", server_default="0")
role_id = Column(Integer, comment="角色表")
__table_args__ = ({'comment': '管理员表'})
class AdminRole(Base):
"""
简单的用户角色表设计
"""
id = Column(Integer, primary_key=True, autoincrement=True)
role_id = Column(Integer, primary_key=True, index=True, comment="角色Id")
role_name = Column(VARCHAR(64), comment="角色名字")
permission_id = Column(BIGINT, comment="权限ID")
re_mark = Column(VARCHAR(128), comment="备注信息")
__table_args__ = ({'comment': '管理员角色'})
class MallUser(Base):
"""
用户表
"""
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(VARCHAR(32), default=gen_uuid, index=True, unique=True, comment="用户id")
nickname = Column(VARCHAR(128), comment="用户昵称(显示用可更改)")
username = Column(VARCHAR(128), comment="用户名(不可更改)")
avatar = Column(VARCHAR(256), nullable=True, comment="用户头像")
hashed_password = Column(VARCHAR(128), nullable=False, comment="密码")
phone = Column(VARCHAR(16), unique=True, index=True, nullable=True, comment="手机号")
gender = Column(SmallInteger, default=0, comment="性别 0=未知 1=男 2=女", server_default="0")
register_time = Column(DateTime, default=datetime.now, comment="注册事件")
last_login_time = Column(DateTime, default=datetime.now, comment="上次登录时间")
last_login_ip = Column(VARCHAR(64), nullable=True, comment="上次登录IP")
register_ip = Column(VARCHAR(64), nullable=True, comment="注册IP")
weixin_openid = Column(VARCHAR(64), nullable=True, comment="微信openId")
country = Column(VARCHAR(64), nullable=True, comment="国家")
province = Column(VARCHAR(64), nullable=True, comment="省")
city = Column(VARCHAR(64), nullable=True, comment="市")
__table_args__ = ({'comment': '用户表'})
class MallAddress(Base):
"""
用户地址列表
"""
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(VARCHAR(64), comment="用户昵称")
user_id = Column(VARCHAR(32), comment="用户id")
country_id = Column(Integer, comment="国家Id")
province_id = Column(Integer, comment="省id")
city_id = Column(Integer, comment="市id")
district_id = Column(Integer, comment="区id")
address = Column(VARCHAR(128), comment="详细地址")
phone = Column(VARCHAR(64), comment="手机号")
is_default = Column(SmallInteger, default=0, comment="是否默认地址", server_default="0")
__table_args__ = ({'comment': '地址表'})
class MallSearchHistory(Base):
"""
搜索记录
"""
id = Column(Integer, primary_key=True, autoincrement=True)
keyword = Column(VARCHAR(64), comment="搜索关键词")
search_origin = Column(SmallInteger, default=1, comment="搜索来源 1=小程序 2=APP 3=PC", server_default="1")
user_id = Column(VARCHAR(32), index=True, comment="用户id")
__table_args__ = ({'comment': '搜索记录'})
class MallSiteNotice(Base):
"""
站点消息
"""
id = Column(Integer, primary_key=True, autoincrement=True)
enabled = Column(SmallInteger, default=1, comment="是否开启 0=为开启 1=开启", server_default="1")
content = Column(VARCHAR(256), comment="全局消息通知")
start_time = Column(DateTime, comment="开始时间")
end_time = Column(DateTime, comment="结束时间")
__table_args__ = ({'comment': '站点消息'})
| 4,119 | 1,619 |
# -*- coding: utf-8 -*-
"""Implementation of CMSPluginBase class for ``cmsplugin-markdown``."""
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from djangocms_markdown.models import Markdown
class MarkdownPlugin(CMSPluginBase):
model = Markdown
name = _('Markdown')
render_template = 'djangocms_markdown/markdown.html'
change_form_template = 'djangocms_markdown/change_form.html'
def render(self, context, instance, placeholder):
super(MarkdownPlugin, self).render(context, instance, placeholder)
context['text'] = instance.body
return context
plugin_pool.register_plugin(MarkdownPlugin)
| 729 | 227 |
from concurrent.futures import ProcessPoolExecutor
from storage import ClientStorage
from aiohttp import web, hdrs
import aiofiles
import tempfile
import asyncio
import logging
import shutil
import os
MAX_FILE_SIZE = 20 * 1024 * 1024
PROCESS_POOL_SIZE = 8
STATIC_PATH = '../client/dist'
TEMP_PATH = 'temp'
logger = logging.getLogger('Server')
logging.basicConfig(
level=logging.CRITICAL,
format='[%(levelname)s] %(name)s: %(message)s',
)
clients = ClientStorage
clients.loop = loop = asyncio.get_event_loop()
clients.pool = pool = ProcessPoolExecutor(PROCESS_POOL_SIZE)
app = web.Application()
routes = web.RouteTableDef()
# Index page
@routes.get('/')
async def index(_):
return web.FileResponse(STATIC_PATH + '/index.html')
# Upload multiple files
@routes.post('/upload')
@routes.route('OPTIONS', '/upload')
async def upload(request: web.Request):
if request.method == 'OPTIONS':
return web.Response(status=200)
reader = await request.multipart()
tempdir = tempfile.mkdtemp(dir=TEMP_PATH)
files = []
while True:
field = await reader.next()
if field is None: break
if field.name != 'files[]': continue
size = 0
path = os.path.join(tempdir, str(len(files)))
file = await aiofiles.open(path, mode='wb')
while True:
if size > MAX_FILE_SIZE:
shutil.rmtree(tempdir, ignore_errors=True)
raise web.Response(status=403, text='Too large file')
chunk = await field.read_chunk()
if not chunk: break
size += len(chunk)
await file.write(chunk)
await file.flush()
await file.close()
files.append(path)
if not files: return web.Response(status=400, text='No files')
client = ClientStorage()
future = client.handle_upload(files)
asyncio.ensure_future(future, loop=loop)
logging.critical('New client storage: ' + client.uid)
return web.Response(status=200, text=client.uid)
@routes.post('/next')
async def get_next(request):
try: data = await request.json()
except: data = {'time': 0}
if not clients.clients: return web.HTTPNotFound()
# Could get client by uid
client = next(iter(clients.clients.values()))
result = {
'status': client.status,
'ready': client.status == 'ready',
}
if client.status == 'ready':
from_, to = client.next_jump(data['time'])
result['from'], result['to'] = from_, to
return web.json_response(data=result)
# Disable CORS globally
@app.on_response_prepare.append
async def on_prepare(_, response):
response.headers[hdrs.ACCESS_CONTROL_ALLOW_ORIGIN] = '*'
response.headers[hdrs.ACCESS_CONTROL_ALLOW_METHODS] = 'OPTIONS, GET, POST'
response.headers[hdrs.ACCESS_CONTROL_ALLOW_HEADERS] = (
'Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With'
)
if __name__ == '__main__':
# Cleanup temp dir
shutil.rmtree(TEMP_PATH, ignore_errors=True)
os.mkdir(TEMP_PATH)
# Serve static & register routes
routes.static('/', STATIC_PATH)
app.add_routes(routes)
# Start
port = os.getenv('PORT', 5000)
web.run_app(app, port=port)
| 3,226 | 1,075 |
import datetime
# Interactions for inpatient care
# "review_and_consultation",
# "bd_hypoglycaemic_ep",
# "bd_hyperglycaemic_ep",
# "bd_lower_limb_compl",
# "enhanced_independence",
# "retinal_procedure",
# "amputation"
# Inpatient interaction 1: Inpatient review and consultation (might take out if not in spell)
def review_and_consultation(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "review and consultation",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "review and consultation",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, PSSRU 2018-19
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 2: Hypoglycaemic episode bed day
def bd_hypoglycaemic_ep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "hypoglycaemic ep bd",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "hypoglycaemic ep bd",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, PSSRU 2018-19
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 3: Hyperglycaemic episode bed day
def bd_hyperglycaemic_ep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "hyperglycaemic ep bd",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "hyperglycaemic ep bd",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, PSSRU 2018-19
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 4: Lower limb complications bed day
def bd_lower_limb_ep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "lower limb ep bd",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "lower limb ep bd",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 5: Enhanced independence
def enhanced_indep(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "enhanced independence",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "enhanced independence",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 6: Retinal procedure
def retinal_procedure(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "retinal procedure",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "retinal procedure",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
)
# Inpatient interaction 7: Amputation
def amputation(patient, environment, patient_time):
encounter = {
"resource_type": "Encounter",
"name" : "amputation",
"start": patient_time,
}
entry = {
"resource_type" : "Observation",
"name": "amputation",
"start": encounter["start"] + datetime.timedelta(minutes=15),
"cost": 3053, # NEL long stay from PSSRU 2018-19 - to be updated
"glucose": -1, # dummy glucose impact, to be updated
"carbon": 5032, # carbon impact, to be updated
}
new_patient_record_entries = [encounter, entry]
next_environment_id_to_prob = {2: 0.5, 30: 0.3, 40: 0.2}
next_environment_id_to_time = {
2: datetime.timedelta(days=10), # TODO: from initial patient_time (not last)
30: datetime.timedelta(days=20),
40: datetime.timedelta(days=20),
}
update_data = {"new_patient_record_entries": new_patient_record_entries}
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
) | 8,548 | 3,025 |
import time
import board
import busio
from digitalio import DigitalInOut, Direction # pylint: disable=unused-import
import adafruit_miniesptool
print("ESP32 Nina-FW")
uart = busio.UART(board.TX, board.RX, baudrate=115200, timeout=1)
resetpin = DigitalInOut(board.D5)
gpio0pin = DigitalInOut(board.D6)
esptool = adafruit_miniesptool.miniesptool(uart, gpio0pin, resetpin,
flashsize=4*1024*1024)
esptool.sync()
print("Synced")
print("Found:", esptool.chip_name)
if esptool.chip_name != "ESP32":
raise RuntimeError("This example is for ESP32 only")
esptool.baudrate = 912600
print("MAC ADDR: ", [hex(i) for i in esptool.mac_addr])
# Note: Make sure to use the LATEST nina-fw binary release!
esptool.flash_file("NINA_W102-1.3.1.bin",0x0,'3f9d2765dd3b7b1eab61e1eccae73e44')
esptool.reset()
time.sleep(0.5)
| 881 | 387 |
import sys, os
from utils.logger_setup import LoggerSetup
logger = LoggerSetup.get_logger()
logger.info("client.inst_builder package intialized")
| 148 | 44 |
birth_year = input('Birth year: ')
print(type(birth_year))
age = 2020 - int(birth_year)
print(type(age))
print(age)
| 122 | 57 |
#Import our configuration functions
from models import Film
from config import get_session
#Get instances for working with DB
session = get_session()
#***********************Working with db********************
#Update spcify row v1
film = session.query(Film).filter(Film.id == 3).one()
session.delete(film)
session.commit()
#Update spcify row v2
films = Film.__table__.delete().where(Film.id.in_([2, 4, 5]))
session.execute(films)
session.commit() | 451 | 141 |
import glob
import json
import pickle
import gzip
import os
import hashlib
import re
import bs4
import concurrent.futures
names = set([name.split('/').pop() for name in glob.glob('hrefs/*')])
size = len(names)
def _map(arg):
urls = set()
index, size, name = arg
print(index, '/', size, name)
try:
html = gzip.decompress(open(f'htmls/{name}', 'rb').read()).decode()
except Exception as ex:
return []
soup = bs4.BeautifulSoup(html)
for a in soup.find_all('a', href=True):
href = a.get('href')
href = re.sub(r'\?.*?', '', href)
href = '/'.join(filter(lambda x:'='not in x, href.split('/')))
#print(href)
if 'https://book.dmm.com' in href:
urls.add(href)
return urls
urls = set()
args = [(index, size, name) for index, name in enumerate(names)]
_map(args[0])
with concurrent.futures.ProcessPoolExecutor(max_workers=16) as exe:
for _urls in exe.map(_map, args):
for url in _urls:
urls.add(url)
print(urls)
open('urls.pkl.gz', 'wb').write(gzip.compress(pickle.dumps(urls)))
| 1,045 | 398 |
"""
Module that holds all information of a session of scrounger
"""
# custom module imports
from sys import path as _path
# config imports
from scrounger.utils.config import _SCROUNGER_HOME
class Session(object):
_name = ""
_rows, _columns = 128, 80
options = {}
global_options = {}
devices = {}
results = {}
exceptions = [] # unused
_available_modules = None
_module_instance = None
_current_module = None
_module_class = None
prompt = None
def __init__(self, name):
from os import popen, path
# helper functions
from scrounger.utils.general import execute
# used to find the available modules
import scrounger.modules
self._name = name
self._rows, self._columns = popen('stty size', 'r').read().split()
self._rows, self._columns = int(self._rows), int(self._columns)
if self._columns < 128: self._columns = 128
# need to add / to then replace it
modules_path = "{}/".format(scrounger.modules.__path__[0])
modules = execute("find {} -name '*.py'".format(modules_path))
self._available_modules = [
module.replace(modules_path, "").replace(".py", "")
for module in modules.split("\n")
if module and "__" not in module
]
# add custom modules
modules_path = "{}/modules/".format(_SCROUNGER_HOME)
modules = execute("find {} -name \"*.py\"".format(modules_path))
# add path to sys.path
_path.append(modules_path)
self._available_modules += [
module.replace(modules_path, "").replace(".py", "")
for module in modules.split("\n")
if module and "__" not in module
]
# fix for macos
self._available_modules = [
module[1:] if module.startswith("/") else module
for module in sorted(self._available_modules)
]
# public vars to be used by calling modules
self.options = {}
self.global_options = {
"debug": "False",
"device": "",
"output": "",
"verbose": "False"
}
self.devices = {}
self.results = {}
self.exceptions = [] # unused
self.prompt = None
# initialize private vars
self._module_instance = None
self._current_module = None
self._module_class = None
def modules(self):
"""
Returns the available modules
:return: returns a list with the available modules
"""
return self._available_modules
def back(self):
"""Returns to the main state"""
self._module_instance = None
self._current_module = None
self._module_class = None
def use(self, module):
self._current_module = module
if module.startswith("custom/"):
self._module_class = __import__("{}".format(
module.replace("/", ".")), fromlist=["Module"])
else:
self._module_class = __import__("scrounger.modules.{}".format(
module.replace("/", ".")), fromlist=["Module"])
if not hasattr(self._module_class, "Module"):
self._current_module = None
self._module_class = None
raise Exception("Missing `Module` class")
self._module_instance = self._module_class.Module()
if not hasattr(self._module_class.Module, "meta") or not hasattr(
self._module_instance, "options"):
self._module_instance = None
self._current_module = None
self._module_class = None
raise Exception("Missing required variables")
def module_options(self):
"""
Returns the options dict for the current module or None if no module
is active
:return: a dict with the required options
"""
if self._module_instance:
return self._module_instance.options
return None
def module(self):
"""
Returns the current active module or None if no module is active
:return: a str with the current module
"""
return self._current_module
def instance(self):
"""
Returns an instance with the current active module or None if no module
is active
:return: an object representing an inatance of the current active module
"""
return self._module_instance
def name(self):
"""
Returns the name of a session
:return: a str with the session name
"""
return self._name
def to_dict(self):
"""
Returns a dict representing the current sesssion
:return: a dict representing the session
"""
return {
"name": self._name,
"devices": [
{
"id": self.devices[device]["device"].device_id(),
"type": self.devices[device]["type"],
"no": device
} for device in self.devices
],
"results": self.results, # TODO: if object, need to reproduce it
"global": self.global_options,
"options": self.options,
"current": self._current_module,
"prompt": self.prompt
}
def __str__(self):
return "Session {}".format(self.name())
def load_sessions(filename):
"""
Loads a list of sessions from a file
:param str filename: the file path to load the sessions from
:return: a list of Session objects
"""
from scrounger.core.device import IOSDevice, AndroidDevice
from scrounger.utils.general import file_exists
from json import loads
if not file_exists(filename):
return []
with open(filename, "r") as fp:
content = fp.read()
sessions = []
try:
json_sessions = loads(content)
except Exception as e:
# error loading sessions files
return []
for json_session in json_sessions["sessions"]:
session = Session(json_session["name"])
for json_device in json_session["devices"]:
if json_device["type"] == "ios":
device = IOSDevice(json_device["id"])
else:
device = AndroidDevice(json_device["id"])
session.devices[json_device["no"]] = {
"device": device,
"type": json_device["type"]
}
session.results = json_session["results"]
session.global_options = json_session["global"]
session.options = json_session["options"]
if json_session["current"]:
session.use(json_session["current"])
session.prompt = json_session["prompt"]
sessions += [session]
return sessions
def save_sessions(sessions, filename):
"""
Saves a list of session into a file
:param list sessions: a list of Session objects
:param str filename: the filepath to save the sessions to
:return: nothing
"""
from json import dumps
with open(filename, "w") as fp:
fp.write(dumps(
{"sessions": [session.to_dict() for session in sessions]}
))
| 7,339 | 1,968 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for treegen.cnf_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from deepmath.treegen import cnf_model
from deepmath.treegen import cnf_model_test_lib
flags = tf.flags
FLAGS = flags.FLAGS
class CnfModelTest(tf.test.TestCase):
# From l102_finseq_1, in test0.jsonl
# v7_ordinal1(X1) | ~m1_subset_1(X1, k4_ordinal1())
tiny_expr = json.loads(
'''{"clauses": [{"positive": true, "params": [{"var": "X1"}], "pred":
"v7_ordinal1"}, {"positive": false, "params": [{"var": "X1"}, {"params":
[], "func": "k4_ordinal1"}], "pred": "m1_subset_1"}]}''')
# From l102_modelc_2, in test0.jsonl
huge_expr = json.loads(
'''{"clauses": [{"positive": true, "params": [{"params": [], "func":
"esk4_0"}, {"var": "X1"}, {"var": "X1"}, {"var": "X1"}], "pred":
"r5_modelc_2"}, {"positive": false, "equal": [{"params": [{"var": "X1"},
{"params": [{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var":
"X2"}, {"var": "X3"}], "func": "esk2_4"}], "func": "k1_funct_1"},
{"params": [{"var": "X1"}, {"params": [{"params": [], "func": "esk4_0"},
{"var": "X1"}, {"var": "X2"}, {"var": "X3"}], "func": "esk2_4"}], "func":
"k1_funct_1"}]}, {"positive": false, "params": [{"var": "X1"}, {"params":
[{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var": "X2"}, {"var":
"X3"}], "func": "esk2_4"}, {"params": [], "func": "esk4_0"}], "pred":
"epred2_3"}, {"positive": false, "params": [{"var": "X1"}], "pred":
"v7_ordinal1"}, {"positive": false, "params": [{"params": [{"params": [],
"func": "esk4_0"}, {"var": "X1"}, {"var": "X2"}, {"var": "X3"}], "func":
"esk2_4"}], "pred": "v3_modelc_2"}, {"positive": false, "params":
[{"params": [{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var":
"X2"}, {"var": "X3"}], "func": "esk2_4"}, {"params": [], "func":
"k5_numbers"}], "pred": "m2_finseq_1"}, {"positive": false, "params":
[{"params": [{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var":
"X2"}, {"var": "X3"}], "func": "esk2_4"}], "pred": "v1_modelc_2"},
{"positive": false, "params": [{"params": [], "func": "esk4_0"},
{"params": [], "func": "esk5_0"}, {"var": "X1"}], "pred": "r4_modelc_2"},
{"positive": false, "params": [{"var": "X1"}, {"params": [{"params":
[{"params": [], "func": "k9_modelc_2"}, {"params": [{"params": [], "func":
"esk4_0"}], "func": "u1_struct_0"}], "func": "k2_zfmisc_1"}], "func":
"k1_zfmisc_1"}], "pred": "m1_subset_1"}, {"positive": false, "params":
[{"var": "X1"}, {"params": [{"params": [{"params": [], "func":
"k15_modelc_2"}, {"params": [{"params": [], "func": "esk4_0"}], "func":
"u1_modelc_2"}], "func": "k2_zfmisc_1"}], "func": "k1_zfmisc_1"}], "pred":
"m1_subset_1"}, {"positive": false, "params": [{"var": "X1"}, {"params":
[], "func": "k9_modelc_2"}, {"params": [{"params": [], "func": "esk4_0"}],
"func": "u1_struct_0"}], "pred": "v1_funct_2"}, {"positive": false,
"params": [{"var": "X1"}, {"params": [], "func": "k15_modelc_2"},
{"params": [{"params": [], "func": "esk4_0"}], "func": "u1_modelc_2"}],
"pred": "v1_funct_2"}, {"positive": false, "params": [{"var": "X1"}],
"pred": "v1_funct_1"}, {"positive": false, "params": [{"var": "X1"}],
"pred": "v1_funct_1"}]}''')
def testSeqModelMemorizesTinyExpr(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=200,
extra_hparams='depth=1',
model_class=cnf_model.CNFSequenceModel)
def testSeqModelMemorizesTinyExprMaskedXent(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=200,
extra_hparams='depth=1,masked_xent=true',
model_class=cnf_model.CNFSequenceModel)
def testSeqModelWorksWithTinyHugeExpr(self):
cnf_model_test_lib.test_memorization(
self, [self.tiny_expr, self.huge_expr],
num_iterations=1,
model_class=cnf_model.CNFSequenceModel)
def testSeqModelWorksWithTinyHugeExprMaskedXent(self):
cnf_model_test_lib.test_memorization(
self, [self.tiny_expr, self.huge_expr],
num_iterations=1,
extra_hparams='masked_xent=true',
model_class=cnf_model.CNFSequenceModel)
def testTreeModelMemorizesTinyExprStdFixedZ(self):
cnf_model_test_lib.test_memorization(self, self.tiny_expr)
def testTreeModelMemorizesTinyExprStdVae(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='objective=vae,min_kl_weight=1')
def testTreeModelMemorizesTinyExprStdIwaeMcSamples2(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='objective=iwae,min_kl_weight=1,mc_samples=2')
def testTreeModelMemorizesTinyExprStdVaeMix(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='objective=vae_mix,batch_size=3',
num_iterations=150)
def testTreeModelMemorizesTinyExprAuxLstmFixedZ(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='model_variants=[aux_lstm]')
def testTreeModelMemorizesTinyExprUncondSibFixedZ(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='model_variants=[uncond_sib]')
def testTreeModelMemorizesTinyExprGatedSigmoidFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated],gate_type=sigmoid')
def testTreeModelMemorizesTinyExprGatedSoftmaxFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated],gate_type=softmax')
def testTreeModelMemorizesTinyExprGatedTiedFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated],gate_tied=true')
def testTreeModelMemorizesTinyExprAuxLstmUncondSibFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[aux_lstm,uncond_sib]')
def testTreeModelMemorizesTinyExprAuxLstmVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=80,
extra_hparams='model_variants=[aux_lstm],objective=vae,min_kl_weight=1')
def testTreeModelMemorizesTinyExprAuxLstmGatedUncondSibFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[aux_lstm,gated,uncond_sib]')
def testTreeModelMemorizesTinyExprGatedUncondSibVae(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='model_variants=[gated,uncond_sib]')
def testTreeModelMemorizesTinyExprAuxLstmGatedLayerNormUncondSibVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[aux_lstm,gated,layer_norm,uncond_sib]')
def testTreeModelMemorizesTinyExprGatedLayerNormUncondSibVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated,layer_norm,uncond_sib]')
def testTreeModelWorksWithTinyExprTanhMostVariationssVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=1,
extra_hparams='model_variants=[gated,layer_norm,rev_read,uncond_sib],'
'act_fn=tanh,objective=vae,min_kl_weight=1')
def testTreeModelMemorizesTinyExprMostVariationsDeepFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated,layer_norm,rev_read,uncond_sib],'
'highway_layers=5,op_hidden=1')
if __name__ == '__main__':
tf.test.main()
| 8,703 | 3,352 |
# -*- coding: utf-8 -*-
import os
import sys
import io
import importlib
import math
from typing import Dict, Iterable, List, Optional, Tuple
from games.game_types import Move, Player
from games.game_types import Point
from games.game_types import game_name_dict
from games.game_types import game_state_dict
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
COLS = 'ABCDEFGHJKLMNOPQRSTUVWXYZ'
EMPTY = 0
STONE_TO_CHAR = {
EMPTY: '━╋━',
Player.black.value: ' ○ ',
Player.white.value: ' ● ',
}
def get_rule_constructor(game_name: str, rule_name: str):
module = importlib.import_module(f'games.{game_name_dict[game_name]}.rule')
constructor = getattr(module, rule_name)
return constructor
def get_game_state_constructor(name: str):
module = importlib.import_module(f'games.{game_name_dict[name]}.{game_name_dict[name]}_game_state')
constructor = getattr(module, game_state_dict[name])
return constructor
def print_turn(game_state) -> None:
print(f'{game_state.player.name} turn!')
sys.stdout.flush()
def print_move(player_move: Move) -> None:
if player_move is not None:
player = player_move[0]
move = player_move[1]
if move.is_pass:
move_str = 'passes'
else:
move_str = '%s%d' % (COLS[move.point.col], move.point.row + 1)
print('%s %s' % (player, move_str))
sys.stdout.flush()
def print_board(board) -> None:
board_size = board.get_board_size()
for row in range(board_size - 1, -1, -1):
bump = " " if row <= board_size else ""
line = []
for col in range(0, board_size):
stone = board.get(Point(row=row, col=col))
line.append(STONE_TO_CHAR[stone])
print('%s%2d %s' % (bump, row + 1, ''.join(line)))
print(' ' + ' '.join(COLS[:board_size]))
sys.stdout.flush()
def print_visit_count(visit_counts: Optional[Iterable[int]]) -> None:
if visit_counts is not None:
board_size = int(math.sqrt(len(visit_counts)))
for row in range(board_size - 1, -1, -1):
bump = " " if row <= board_size else ""
print('\n%s%2d' % (bump, row + 1), end='')
for col in range(0, board_size):
visit_count = visit_counts[row * board_size + col]
print('%4d ' % (visit_count), end='')
print('')
print(' ' + ' '.join(COLS[:board_size]))
sys.stdout.flush()
def print_winner(winner: Player) -> None:
if winner is Player.both:
print("DRAW!!!")
else:
print(winner.name, "WINS!!!")
sys.stdout.flush()
def point_from_coords(coords: Tuple[int, int]) -> Point:
col = COLS.index(coords[0])
row = int(coords[1:]) - 1
return Point(row=row, col=col)
def is_on_grid(point: Point, board_size: int) -> bool:
"""[summary]
check point is on grid
Args:
point (Point): [description]
board_size (int): Size of board.
Returns:
bool: Is point on board.
"""
return 0 <= point.row < board_size and 0 <= point.col < board_size
def get_agent_filename(game_name: str, version: int, postfix: str = "", prefix: str = "") -> str:
cur_file_path = os.path.abspath(__file__)
project_path = os.path.dirname(os.path.dirname(cur_file_path))
dir_path = os.path.join(project_path, f'trained_models/{game_name}')
file_name = f'{postfix}-v{version}{prefix}.pth'
os.makedirs(dir_path, exist_ok=True)
return os.path.join(dir_path, file_name)
def copy_list(input_list: List) -> List:
ret = input_list.copy()
for idx, item in enumerate(ret):
ret[idx] = item
return ret
def copy_dict(input_dict: Dict) -> Dict:
ret = input_dict.copy()
for key, value in ret.items():
ret[key] = value
return ret
| 3,905 | 1,398 |
DEFAULT_BAUDRATE = 38400
import sys
if (hasattr(sys, 'implementation') and
sys.implementation.name == 'micropython'):
# if using pyBoard
from pyb import UART as uart_base
else:
from serial import Serial as uart_base
from obdlib.logging import logger
class UART(object):
def __init__(self):
self.bus_name = uart_base.__name__
self.bus = None
self.map = {}
def connection(self, port, baudrate=DEFAULT_BAUDRATE):
try:
self.bus = uart_base(port, baudrate)
self._mapping()
except Exception as err:
# logging exception
logger.error(err)
return None
return self
def __getattr__(self, item):
def args_wrapper(*args, **kwargs):
try:
response = getattr(self.bus, item)(*args, **kwargs)
except AttributeError:
response = self._invoke_mapping(item, *args, **kwargs)
return response
return args_wrapper
def _invoke_mapping(self, method, *args, **kwargs):
try:
item = self.map[self.bus_name][method]
return getattr(self.bus, item)(*args, **kwargs) if item else None
except KeyError:
raise Exception(
"Unregistered method or attribute {}".format(method))
def _mapping(self):
self.map = {
"UART": {
"close": "deinit",
"flushInput": "",
"flushOutput": ""
},
}
| 1,545 | 432 |
import sys
import subprocess
from collections import defaultdict
nested_dict = lambda: defaultdict(nested_dict)
# arguments
if len(sys.argv) <= 1:
sys.exit("Usage: combineTE.py [reformatted .gff] [gap size, default=150]")
gff = sys.argv[1]
# quick check number of line of the file
sh = subprocess.run(['wc', '-l',gff], stdout=subprocess.PIPE)
totalline = str(int(sh.stdout.split()[0]))
cnt = 0
d = nested_dict()
lastchrom = ""
# Progress
dcnt = 0
# Check number of row of header
with open(gff, "r") as f:
for line in f:
cnt += 1
if not line.startswith("#"):
cnt -= 1
break
print("##gff-version 3")
print("##repeatcraft")
with open(gff, "r") as f:
for i in range(cnt):
next(f)
for line in f:
# Progress
dcnt += 1
sys.stderr.write("\rProgress:" + str(dcnt) + "/"+ totalline+ "...")
col = line.rstrip().split("\t")
# Extract attribute
cattrD = {}
cattr = col[8].split(";")
for i in cattr:
k, v = i.split("=")
cattrD[k] = v
cattrD["Tstart"] = int(cattrD["Tstart"])
cattrD["Tend"] = int(cattrD["Tend"])
# if changing to the last column, need to print the what havn't print out (lastcol for all families in last chrom)
if col[0] != lastchrom:
if lastchrom != "":
#print("new chrom, print remaining lastcol") # debug
for family in d[lastchrom]:
print(*d[lastchrom][family]["lastcol"],sep="\t")
lastchrom = col[0] # Update lastcol
if d[col[0]][cattrD["ID"]]: # not the first family on this chrom
#print("not first family") # debug
if (int(col[3]) - d[col[0]][cattrD["ID"]]["lastend"]) > 150 or col[0] != d[col[0]][cattrD["ID"]]["lastcol"][
0]:
#print("larger than 150") # debug
# don't need to group the two records
# print the lastest record of the lastest family group without adding new label
col2print = d[col[0]][cattrD["ID"]]["lastcol"]
print(*col2print, sep = "\t")
# update the dictionary
d[col[0]][cattrD["ID"]]["lastcol"] = col
d[col[0]][cattrD["ID"]]["lastend"] = int(col[4])
d[col[0]][cattrD["ID"]]["Tstart"] = cattrD["Tstart"]
d[col[0]][cattrD["ID"]]["Tend"] = cattrD["Tend"]
d[col[0]][cattrD["ID"]]["lastTElabel"] = False
else:
#print("less than 150") # debug
# Is the lastcol carrying a TEgroup label?
if d[col[0]][cattrD["ID"]]["lastTElabel"]:
#print("last one have label") # debug
# check consensus information (all in last group)
groupnumber = d[col[0]][cattrD["ID"]]["groupcnt"]
o = False
for i in range(cattrD["Tstart"],cattrD["Tend"]):
if i in list(d[col[0]][cattrD["ID"]][groupnumber]):
o = True
break
if o: # overlap with some copies in the group, break the grouping
#print("consensus overlap") # debug
print(*d[col[0]][cattrD["ID"]]["lastcol"], sep="\t")
d[col[0]][cattrD["ID"]]["lastcol"] = col
d[col[0]][cattrD["ID"]]["lastend"] = int(col[4])
d[col[0]][cattrD["ID"]]["Tstart"] = cattrD["Tstart"]
d[col[0]][cattrD["ID"]]["Tend"] = cattrD["Tend"]
d[col[0]][cattrD["ID"]]["lastTElabel"] = True
else:
#print("consensus pass") # debug
# print the lastcol directly
print(*d[col[0]][cattrD["ID"]]["lastcol"], sep="\t")
# Update consensus coverage
for i in range(cattrD["Tstart"],cattrD["Tend"]):
d[col[0]][cattrD["ID"]][groupnumber][i] = 1
# Update last col using label from last label
attr = ";TEgroup=" + col[0] + "|" + cattrD["ID"] + "|" + str(d[col[0]][cattrD["ID"]]["groupcnt"])
col[8] = col[8] + attr
d[col[0]][cattrD["ID"]]["lastcol"] = col
d[col[0]][cattrD["ID"]]["lastend"] = int(col[4])
d[col[0]][cattrD["ID"]]["Tstart"] = cattrD["Tstart"]
d[col[0]][cattrD["ID"]]["Tend"] = cattrD["Tend"]
d[col[0]][cattrD["ID"]]["lastTElabel"] = True
else: # the lastcol is the first element is this group, just need to check if last and current copies overlap
#print("last copy no label") # debug
o = min(d[col[0]][cattrD["ID"]]["Tend"], cattrD["Tstart"]) - max(d[col[0]][cattrD["ID"]]["Tstart"], cattrD["Tend"])
if o > 0: # Consensus position overlap, don't need to group them just print
#print("consensus overlap") # debug
print(*d[col[0]][cattrD["ID"]]["lastcol"], sep="\t")
d[col[0]][cattrD["ID"]]["lastcol"] = col
d[col[0]][cattrD["ID"]]["lastend"] = int(col[4])
d[col[0]][cattrD["ID"]]["Tstart"] = cattrD["Tstart"]
d[col[0]][cattrD["ID"]]["Tend"] = cattrD["Tend"]
d[col[0]][cattrD["ID"]]["lastTElabel"] = True
else: # can open a new group now and update the attr of the last and current copies
#print("consensus pass") # debug
# Make a new label for lastcol and current col
if d[col[0]][cattrD["ID"]]["groupcnt"]: # Is there a previous family group in the same chrom?
d[col[0]][cattrD["ID"]]["groupcnt"] = d[col[0]][cattrD["ID"]]["groupcnt"] + 1
else:
d[col[0]][cattrD["ID"]]["groupcnt"] = 1
# Mark down the consensus coverage
groupnumber = d[col[0]][cattrD["ID"]]["groupcnt"]
for i in range(d[col[0]][cattrD["ID"]]["Tstart"],d[col[0]][cattrD["ID"]]["Tend"]):
d[col[0]][cattrD["ID"]][groupnumber][i] = 1
for i in range(cattrD["Tstart"],cattrD["Tend"]):
d[col[0]][cattrD["ID"]][groupnumber][i] = 1
# Print lastcol
lastcol2print = d[col[0]][cattrD["ID"]]["lastcol"]
attr = ";TEgroup=" + col[0] + "|" + cattrD["ID"] + "|" + str(d[col[0]][cattrD["ID"]]["groupcnt"])
lastcol2print[8] = lastcol2print[8] + attr
print(*lastcol2print,sep="\t")
# Update lastcol
col[8] = col[8] + attr
d[col[0]][cattrD["ID"]]["lastcol"] = col
d[col[0]][cattrD["ID"]]["lastend"] = int(col[4])
d[col[0]][cattrD["ID"]]["Tstart"] = cattrD["Tstart"]
d[col[0]][cattrD["ID"]]["Tend"] = cattrD["Tend"]
d[col[0]][cattrD["ID"]]["lastTElabel"] = True
else: # first family on this chrom
#print("first element on this chrom") # debug
d[col[0]][cattrD["ID"]]["lastcol"] = col
d[col[0]][cattrD["ID"]]["lastend"] = int(col[4])
d[col[0]][cattrD["ID"]]["Tstart"] = cattrD["Tstart"]
d[col[0]][cattrD["ID"]]["Tend"] = cattrD["Tend"]
d[col[0]][cattrD["ID"]]["lastTElabel"] = False
# print the last record for all families from the last chrom
#print("print remaining lastcol") # debug
for family in d[lastchrom]:
print(*d[lastchrom][family]["lastcol"],sep="\t")
| 6,411 | 2,995 |
import numpy as np
import time
import sklearn as sk
from sklearn import metrics
import gc
import sys
import pickle
init_t = time.time()
at = sys.argv[1]
ref_envs = np.load('./data/train_envs.npy',
allow_pickle=True).item()[at]
reps_dict = np.load('./data/red_reps_dict.npy', allow_pickle=True)
attypes = np.load('./data/attypes.npy', allow_pickle=True)
gc.collect()
t1 = time.time()
with open('progress_gen__diffs_{}.txt'.format(at), 'a') as file:
file.write('Reps loaded, time: {} \n'.format(time.time() - t1))
atom_projections = []
with open('progress_gen__diffs_{}.txt'.format(at), 'a') as file:
file.write('Starting train products at {} \n'.format(time.time() - init_t))
atom_diffs = []
t1 = time.time()
for i in range(len(reps_dict[:])):
repd = reps_dict[i]
rep_at_envs = repd[at]
if len(rep_at_envs) > 0:
atom_diff = sk.metrics.pairwise_distances(
rep_at_envs, ref_envs, n_jobs=-1).T
else:
atom_diff = []
atom_diffs.append(atom_diff)
if i % 100 == 0:
with open('progress_gen__diffs_{}.txt'.format(at), 'a') as file:
file.write(' Train mol {}, cost: {} \n'.format(
i, time.time() - t1))
t1 = time.time()
with open('./euclideans/{}_diffs.npy'.format(at), "wb") as fp:
pickle.dump(atom_diffs, fp)
| 1,352 | 537 |
from json import loads
import argparse
from urllib.request import urlopen
from pprint import pprint
from datetime import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument("start_date", help="Start date in format DD.MM.YYYY",
type=lambda s: datetime.strptime(s, '%d.%m.%Y'))
parser.add_argument("end_date", help="End date in format DD.MM.YYYY",
type=lambda s: datetime.strptime(s, '%d.%m.%Y'))
args = parser.parse_args()
start_date = args.start_date
end_date = args.end_date
if(start_date > end_date):
parser.error("start_date must be before end_date")
# Can be changed to use different coins
coin = "bitcoin"
coin_history = get_price_history(start_date, end_date, coin)
price_history = parse_data_history(coin_history, start_date, end_date, "prices")
longest_bear = get_longest_bear(price_history)
volume_history = parse_data_history(coin_history, start_date, end_date, "total_volumes")
optimal_investment_dates = get_optimal_investment(price_history)
highest_volume = get_highest_volume(volume_history)
print(f"Longest bearish trend: {longest_bear[0]}")
print(
f"Highest volume: {highest_volume[0]} on "
f"{highest_volume[1].strftime('%d.%m.%Y')}"
)
if(optimal_investment_dates[0] == 0):
print(f"Optimal investment dates: do not invest in this period")
else:
print(
f"Optimal investment dates: buy on "
f"{optimal_investment_dates[0].strftime('%d.%m.%Y')} and sell on "
f"{optimal_investment_dates[1].strftime('%d.%m.%Y')}"
)
def get_highest_volume(volume_history):
"""Returns the date with highest trading volume.
Return is format: [highest volume, date with highest volume].
"""
highest_volume = 0
highest_volume_timestamp = 0
for timestamp in volume_history:
if(volume_history[timestamp] > highest_volume):
highest_volume = volume_history[timestamp]
highest_volume_timestamp = timestamp
return([highest_volume,
datetime.fromtimestamp(highest_volume_timestamp / 1000)])
def get_optimal_investment(price_history):
"""Returns optimal dates to buy and sell given coin"""
sorted_history = sorted(price_history, key=price_history.get)
largest_profit = 0
largest_profit_dates = [0,0]
complete = False
current_end_price = 0
current_start_price = 0
current_time = 0
current_iteration = 0
# Loop from largest price down
for current_end_time in reversed(sorted_history):
current_end_price = price_history[current_end_time]
if(current_end_price - price_history[sorted_history[0]]
< largest_profit):
# With current end price not possible to find better deal
break
# Loop from smallest price up
for current_start_time in sorted_history:
current_start_price = price_history[current_start_time]
if(current_end_price - current_start_price > largest_profit):
if(current_end_time > current_start_time):
# Found start price that is before end price, best deal
# with current end_price
largest_profit = current_end_price - current_start_price
largest_profit_dates = [
datetime.fromtimestamp(current_start_time / 1000),
datetime.fromtimestamp(current_end_time / 1000)
]
break
else:
# Largest possible profit is smaller than previously found
break
return(largest_profit_dates)
def get_longest_bear(price_history):
"""Get longest bear trend length and dates
Return is format: [longest bear length, start date, end date].
"""
previous_price = 0
longest_bear_start = 0
longest_bear_end = 0
longest_bear_length = 0
current_bear_start = 0
current_bear_length = 0
for timestamp in price_history:
if(price_history[timestamp] < previous_price):
current_bear_length += 1
else:
current_bear_start = timestamp
current_bear_length = 0
if(current_bear_length > longest_bear_length):
longest_bear_length = current_bear_length
longest_bear_start = current_bear_start
longest_bear_end = timestamp
previous_price = price_history[timestamp]
return([
longest_bear_length, datetime.fromtimestamp(longest_bear_start / 1000),
datetime.fromtimestamp(longest_bear_end / 1000)
])
def parse_data_history(data, start_date, end_date, data_type):
"""Parse price history to include only datapoints closest to midnight UTC."""
datapoints = {}
previous_value = data[data_type][0]
expected_timestamp = int(datetime.timestamp(start_date) * 1000)
end_timestamp = int(datetime.timestamp(end_date) * 1000)
for k in data[data_type]:
# Timestamp is correct
if(k[0] == expected_timestamp):
datapoints[k[0]] = k[1]
# Set expected timestamp to next day
expected_timestamp += 86400000
elif(k[0] > expected_timestamp):
# Current value was closer to midnight than previous
if(abs(k[0] - expected_timestamp)
< abs(previous_value[0] - expected_timestamp)):
datapoints[k[0]] = k[1]
#Previous value was closer to midnight
else:
datapoints[previous_value[0]] = previous_value[1]
expected_timestamp += 86400000
if(end_timestamp < expected_timestamp):
break
previous_value = k
return datapoints
def get_price_history(start_date, end_date, coin):
"""Get price history for given coin and date period."""
# 3600 added to end date in order to
# make sure end date midnight is also included
with urlopen(f"https://api.coingecko.com/api/v3/coins/{coin}"
f"/market_chart/range?vs_currency=eur&from="
f"{str(datetime.timestamp(start_date) - 3600)}&to="
f"{str(datetime.timestamp(end_date) + 3600)}") as response:
response_content = response.read()
response_content.decode('utf-8')
json_response = loads(response_content)
return json_response
if __name__ == '__main__':
main()
| 6,540 | 1,987 |
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = "https://abit.itmo.ru/bachelor/rating_rank/all/261/"
required_name = 'Шараев Павел Ильдарович'
def write_to_file(result):
global required_name
import xlsxwriter
with xlsxwriter.Workbook('result.xlsx') as workbook:
worksheet = workbook.add_worksheet('Таблица')
worksheet.write_row(0, 0, ['Номер', 'Номер в конкурсной группе', 'Условие поступления', '№ п/п',
'Номер заявления', 'ФИО', 'Вид', 'М', 'Р', 'И', 'ЕГЭ+ИД', 'ЕГЭ', 'ИД',
'Наличие согласия на зачисление', 'Преимущественное право', 'Олимпиада', 'Статус'])
data_format1 = workbook.add_format({'bg_color': '#16de69'})
gray = workbook.add_format({'bg_color': '#dbdbdb'})
white = workbook.add_format({'bg_color': '#ffffff'})
current_color = gray
last_color = white
last_cond = result[0][0]
j = 1
for i in range(len(result)):
if i > 0 and result[i][0] == last_cond:
j += 1
else:
j = 1
current_color, last_color = last_color, current_color
last_cond = result[i][0]
if required_name == result[i][3]:
worksheet.write_row(i + 1, 0, [i + 1, j] + result[i], data_format1)
else:
worksheet.write_row(i + 1, 0, [i + 1, j] + result[i], current_color)
def cmp_items(a, b):
def convert(l):
condtion_key = {'без вступительных испытаний': 4,
'на бюджетное место в пределах особой квоты': 3,
'на бюджетное место в пределах целевой квоты': 2,
'по общему конкурсу': 1,
'на контрактной основе': 0}
l[0] = condtion_key[l[0]]
l[8] = int(l[8] or 0)
l[11] = 1 if l[11] == 'Да' else 0
l[12] = 1 if l[12] == 'Да' else 0
return l
a = convert(a.copy());
b = convert(b.copy())
r = 0
if a[11] > b[11]:
r = -1 # Наличие согласия на зачисление
elif a[11] < b[11]:
r = 1
else:
if a[0] > b[0]:
r = -1 # Условие поступления (бви, контракт ...)
elif a[0] < b[0]:
r = 1
else:
if a[12] > b[12]:
r = -1 # Преимущественное право
elif a[12] > b[12]:
r = 1
else:
if a[8] > b[8]:
r = -1 # ЕГЭ+ИД
elif a[8] < b[8]:
r = 1
else:
r = 0
return r
last_condition = ''
def parse_row(row):
def to_int_possible(a):
try:
r = int(a)
except:
r = ''
return r
global last_condition
cells = row.find_all('td')
if len(cells) == 15:
last_condition = cells[0].getText()
cells = row.find_all('td', {'rowspan': None})
condition = last_condition
number_1 = int(cells[0].getText())
number_2 = int(cells[1].getText())
full_name = cells[2].getText()
mode = cells[3].getText()
m = to_int_possible(cells[4].getText())
r = to_int_possible(cells[5].getText())
i = to_int_possible(cells[6].getText())
exam_and_ia = to_int_possible(cells[7].getText())
exam = to_int_possible(cells[8].getText())
ia = to_int_possible(cells[9].getText())
agreement = cells[10].getText()
advantage = cells[11].getText()
olympiad = cells[12].getText()
status = cells[13].getText()
res = [condition, number_1, number_2, full_name,
mode, m, r, i,
exam_and_ia, exam, ia,
agreement, advantage, olympiad, status]
return res
def main():
print('Скачиваю страницу:', url)
import requests
r = requests.get(url, verify=False) # получаем страницу
print('Ищу таблицу')
from bs4 import BeautifulSoup
soup = BeautifulSoup(r.text, features='html.parser') # парсим таблицу
rows = soup.find_all('tr', {'class': None}) # получаем строки
print('Начинаю парсить таблицу')
result = []
for row in rows:
res = parse_row(row)
result.append(res)
print('Ранжирую таблицу')
from functools import cmp_to_key
result = sorted(result, key=cmp_to_key(cmp_items))
print('Вывожу таблицу в файл')
write_to_file(result)
print('Готово!')
if __name__ == '__main__':
main()
| 4,504 | 1,650 |
### EXAMPLE JOB SUBMISISON ###
# sbatch -A SternbergGroup --gres gpu --mem=64000 -t 15:00:00 --ntasks 10 --nodes 1 --job-name "bcbg" --wrap "python bcbg.py"
### FILENAME CHANGE BEFORE RUNNING ###
model_name= 'blah'
adata_file='../blah.h5ad'
######################################
import sys
import warnings; warnings.simplefilter('ignore')
import os
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from scvi.dataset import GeneExpressionDataset
from scvi.models import VAE
from scvi.inference import UnsupervisedTrainer
import torch
import anndata
import scvi
import datetime
import plotly.express as px
import plotly.graph_objects as go
from anndata import AnnData
from umap import UMAP
from fastTSNE import TSNE
from fastTSNE.callbacks import ErrorLogger
import plotnine as p
print('Starting makeplotz with model:', model_name)
##### PLOTTING FUNCTIONS ######
def isnotebook():
# return false if not running on a notebook to avoid drawing and wasting time
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def derplot(adata=None, filename='derplot',embedding='tsne',feature='sample_type_tech',
size=(12, 12), save=False, draw=False, psize=3):
start = datetime.datetime.now()
p.options.figure_size = size
savename=filename +'.' + embedding + '.' + feature + '.png'
print(start.strftime("%H:%M:%S"), 'Starting ... \t',savename, )
p.theme_set(p.theme_classic())
pt = \
p.ggplot(p.aes(embedding +'0', embedding + '1', color=feature), adata.obs) \
+ p.geom_point(size=psize, alpha = 1, stroke = 0 ) \
+ p.guides(color = p.guide_legend(override_aes={'size': 15}))
if isnotebook() and draw: pt.draw()
if save: pt.save(savename, format='png', dpi=200)
end = datetime.datetime.now()
delta = end-start
print(start.strftime("%H:%M:%S"), str(int(delta.total_seconds())), 's to make: \t', savename)
return(pt)
def wraplot(adata=None, filename='wraplot',embedding='tsne',feature='sample_type_tech',
size=(12, 12), color=None, save=False, draw=False, psize=3):
start = datetime.datetime.now()
p.options.figure_size = size
savename = filename +'.' + embedding + '.' + feature + '.' + str(color) + '.png'
if color==None:
color=feature
savename = filename +'.' + embedding + '.' + feature + '.png'
print(start.strftime("%H:%M:%S"), 'Starting ... \t',savename, )
pt = (
p.ggplot(p.aes(x= embedding+'0', y=embedding+'1', color=color), adata.obs)
+ p.geom_point(color='lightgrey', shape = '.', data=adata.obs.drop(feature, axis = 1))
+ p.geom_point(shape='.', size=psize, alpha = 1, stroke = 0 )
+ p.theme_minimal()
+ p.facet_wrap('~' + feature )
+ p.guides(color = p.guide_legend(override_aes={'size': 10}))
)
if isnotebook() and draw: pt.draw()
if save: pt.save(savename, format='png', dpi=200)
end = datetime.datetime.now()
delta = end-start
print(start.strftime("%H:%M:%S"), str(int(delta.total_seconds())), 's to make: \t', savename)
return(pt)
| 3,503 | 1,201 |
"""
In this example you'll see how to supply an already-initialized instance as injectable.
For whatever reason we have already initialized an instance of ``Application`` and
assigned it to the ``app`` variable so we use the
:meth:`injectable_factory <injectable.injectable_factory>` decorator in a lambda which
in turn just returns the existing ``app``.
Now our ``InjectingExistingInstance`` example class can be injected with our existing
``Application`` instance.
.. seealso::
The :meth:`injectable_factory <injectable.injectable_factory>` decorator can also be
used in regular functions and not just in lambdas. The :ref:`factory_example` shows
how to use it.
"""
# sphinx-start
from examples import Example
from examples.injecting_existing_instance.app import Application
from injectable import autowired, Autowired, load_injection_container
class InjectingExistingInstance(Example):
@autowired
def __init__(
self,
app: Autowired(Application),
):
self.app = app
def run(self):
print(self.app.number)
# 42
def run_example():
load_injection_container()
example = InjectingExistingInstance()
example.run()
if __name__ == "__main__":
run_example()
| 1,244 | 352 |
from fHDHR.exceptions import TunerError
from .tuner import Tuner
class Tuners():
def __init__(self, fhdhr, epg, channels):
self.fhdhr = fhdhr
self.channels = channels
self.epg = epg
self.max_tuners = int(self.fhdhr.config.dict["fhdhr"]["tuner_count"])
self.tuners = {}
for i in range(1, self.max_tuners + 1):
self.tuners[i] = Tuner(fhdhr, i, epg)
def tuner_grab(self, tuner_number):
if int(tuner_number) not in list(self.tuners.keys()):
self.fhdhr.logger.error("Tuner %s does not exist." % str(tuner_number))
raise TunerError("806 - Tune Failed")
# TunerError will raise if unavailable
self.tuners[int(tuner_number)].grab()
return tuner_number
def first_available(self):
if not self.available_tuner_count():
raise TunerError("805 - All Tuners In Use")
for tunernum in list(self.tuners.keys()):
try:
self.tuners[int(tunernum)].grab()
except TunerError:
continue
else:
return tunernum
raise TunerError("805 - All Tuners In Use")
def tuner_close(self, tunernum):
self.tuners[int(tunernum)].close()
def status(self):
all_status = {}
for tunernum in list(self.tuners.keys()):
all_status[tunernum] = self.tuners[int(tunernum)].get_status()
return all_status
def available_tuner_count(self):
available_tuners = 0
for tunernum in list(self.tuners.keys()):
tuner_status = self.tuners[int(tunernum)].get_status()
if tuner_status["status"] == "Inactive":
available_tuners += 1
return available_tuners
def inuse_tuner_count(self):
inuse_tuners = 0
for tunernum in list(self.tuners.keys()):
tuner_status = self.tuners[int(tunernum)].get_status()
if tuner_status["status"] == "Active":
inuse_tuners += 1
return inuse_tuners
def get_stream_info(self, stream_args):
stream_args["channelUri"] = self.channels.get_channel_stream(str(stream_args["channel"]))
if not stream_args["channelUri"]:
raise TunerError("806 - Tune Failed")
channelUri_headers = self.fhdhr.web.session.head(stream_args["channelUri"]).headers
stream_args["true_content_type"] = channelUri_headers['Content-Type']
if stream_args["true_content_type"].startswith(tuple(["application/", "text/"])):
stream_args["content_type"] = "video/mpeg"
else:
stream_args["content_type"] = stream_args["true_content_type"]
return stream_args
| 2,736 | 921 |
from rest_framework import serializers
from shared.serializer import NoNullSerializer, OfficerPercentileSerializer
class CoaccusedSerializer(OfficerPercentileSerializer):
id = serializers.IntegerField()
full_name = serializers.CharField()
allegation_count = serializers.IntegerField()
class TRRDetailSerializer(NoNullSerializer):
kind = serializers.SerializerMethodField()
trr_id = serializers.IntegerField(source='id')
to = serializers.CharField(source='v2_to')
taser = serializers.NullBooleanField()
firearm_used = serializers.NullBooleanField()
date = serializers.SerializerMethodField()
address = serializers.SerializerMethodField()
officer = serializers.SerializerMethodField()
def get_kind(self, obj):
return 'FORCE'
def get_date(self, obj):
return obj.trr_datetime.date().strftime('%Y-%m-%d')
def get_address(self, obj):
return ' '.join(filter(None, [obj.block, obj.street]))
def get_officer(self, obj):
return CoaccusedSerializer(obj.officer).data
| 1,057 | 306 |
# Copyright (C) 2014 Colin Bernet
# https://github.com/cbernet/heppy/blob/master/LICENSE
import math
import copy
class Value(object):
def __init__(self, val, err):
self.val = val
self.err = err
def relerr(self):
return abs(self.err / self.val)
def __eq__(self, other):
return self.val == other.val and self.err == other.err
def __iadd__(self, other):
self.val += other.val
self.err = math.sqrt( self.err*self.err + other.err*other.err)
return self
def __add__(self, other):
new = copy.deepcopy(self)
new += other
return new
def __isub__(self, other):
self.val -= other.val
self.err = math.sqrt( self.err*self.err + other.err*other.err)
return self
def __sub__(self, other):
new = copy.deepcopy(self)
new -= other
return new
def __idiv__(self, other):
relerr = math.sqrt( self.relerr()*self.relerr() + other.relerr()*other.relerr())
self.val /= other.val
self.err = relerr * self.val
return self
def __div__(self, other):
new = copy.deepcopy(self)
new /= other
return new
def __str__(self):
return '{val:10.3f} +- {err:8.3f} ({relerr:5.2f}%)'.format(val=self.val,
err=self.err,
relerr=self.relerr()*100)
| 1,478 | 483 |
import os
import asyncio
import concurrent.futures
import requests
import aiohttp
from mlimages.util.file_api import FileAPI
import mlimages.util.log_api as LogAPI
class API():
def __init__(self, data_root, proxy="", proxy_user="", proxy_password="", parallel=-1, limit=-1, timeout=10, debug=False):
self.file_api = FileAPI(data_root)
self.proxy = proxy
self.proxy_user = proxy_user
self.proxy_password = proxy_password
self.parallel = parallel if parallel > 0 else 4
self.limit = limit
self.timeout = timeout
self.logger = LogAPI.create_logger(type(self).__name__, debug)
def _gather(self):
raise Exception("API has to implements gather method")
def create_session(self, loop):
conn = None
if self.proxy and self.proxy_user:
conn = aiohttp.ProxyConnector(
loop=loop,
limit=self.parallel,
proxy=self.proxy,
proxy_auth=aiohttp.BasicAuth(self.proxy_user, self.proxy_password)
)
elif self.proxy:
conn = aiohttp.ProxyConnector(loop=loop, limit=self.parallel, proxy=self.proxy)
else:
conn = aiohttp.TCPConnector(loop=loop, limit=self.parallel)
session = aiohttp.ClientSession(connector=conn)
return session
async def _download_images(self, session, relative, image_urls):
self.file_api.prepare_dir(relative)
successed = 0
for urls in [image_urls[i:i+self.parallel] for i in range(0, len(image_urls), self.parallel)]:
done, pendings = await asyncio.wait([self.fetch_image(session, relative, u) for u in urls])
for d in done:
try:
successed += 1 if d.result() else 0
except:
pass
if successed >= self.limit:
break
async def fetch_image(self, session, relative, image_url):
fname = self.file_api.get_file_name(image_url)
p = os.path.join(relative, fname)
fetched = False
try:
with aiohttp.Timeout(self.timeout):
async with session.get(image_url) as r:
if r.status == 200 and self.file_api.get_file_name(r.url) == fname:
c = await r.read()
if c:
with open(self.file_api.to_abs(p), "wb") as f:
f.write(c)
fetched = True
except FileNotFoundError as ex:
self.logger.error("{0} is not found.".format(p))
except concurrent.futures._base.TimeoutError as tx:
self.logger.warning("{0} is timeouted.".format(image_url))
except Exception as ex:
self.logger.warning("fetch image is failed. url: {0}, cause: {1}".format(image_url, str(ex)))
return fetched
def download_dataset(self, url, relative):
r = requests.get(url, stream=True)
if r.ok:
with self.file_api.open_with_mkdir(relative) as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
| 3,317 | 963 |
# -*- coding: utf-8 -*-
def main():
n = int(input())
r_count = 0
b_count = 0
for i in range(n):
si = input()
r_count += si.count('R')
b_count += si.count('B')
if r_count > b_count:
print('TAKAHASHI')
elif r_count < b_count:
print('AOKI')
else:
print('DRAW')
if __name__ == '__main__':
main()
| 401 | 176 |
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Lab 3B - Depth Camera Cone Parking
"""
########################################################################################
# Imports
########################################################################################
import sys
import cv2 as cv
import numpy as np
from typing import Any, Tuple, List, Optional
from nptyping import NDArray
from enum import IntEnum
sys.path.insert(0, "../../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
# Sets up the racecar object
rc = racecar_core.create_racecar()
# >> Constants
# The smallest contour we will recognize as a valid contour
MIN_CONTOUR_AREA = 30
# The HSV range for the color orange, stored as (hsv_min, hsv_max)
ORANGE = ((10, 100, 100), (20, 255, 255))
# >> Variables
speed = 0.0 # The current speed of the car
angle = 0.0 # The current angle of the car's wheels
contour_center = None # The (pixel row, pixel column) of contour
contour_area = 0 # The area of contour
# Add any global variables here
isParked = False # Set to true once the car has stopped around 30cm in front of the cone
########################################################################################
# Functions
########################################################################################
class State(IntEnum):
search = 0
approach = 1
curState = State.search
def update_contour():
"""
Finds contours in the current color image and uses them to update contour_center
and contour_area
"""
global contour_center
global contour_area
image = rc.camera.get_color_image()
if image is None:
contour_center = None
contour_area = 0
else:
# Find all of the orange contours
contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])
# Select the largest contour
contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)
if contour is not None:
# Calculate contour information
contour_center = rc_utils.get_contour_center(contour)
contour_area = rc_utils.get_contour_area(contour)
# Draw contour onto the image
rc_utils.draw_contour(image, contour)
rc_utils.draw_circle(image, contour_center)
else:
contour_center = None
contour_area = 0
def get_mask(
image: NDArray[(Any, Any, 3), np.uint8],
hsv_lower: Tuple[int, int, int],
hsv_upper: Tuple[int, int, int]
) -> NDArray[Any, Any]:
"""
Returns a mask containing all of the areas of image which were between hsv_lower and hsv_upper.
Args:
image: The image (stored in BGR) from which to create a mask.
hsv_lower: The lower bound of HSV values to include in the mask.
hsv_upper: The upper bound of HSV values to include in the mask.
"""
# Convert hsv_lower and hsv_upper to numpy arrays so they can be used by OpenCV
hsv_lower = np.array(hsv_lower)
hsv_upper = np.array(hsv_upper)
# TODO: Use the cv.cvtColor function to switch our BGR colors to HSV colors
image = cv.cvtColor(image, cv.COLOR_BGR2HSV)
# TODO: Use the cv.inRange function to highlight areas in the correct range
mask = cv.inRange(image, hsv_lower, hsv_upper)
return mask
def start():
"""
This function is run once every time the start button is pressed
"""
# Have the car begin at a stop
rc.drive.stop()
# Print start message
print(">> Lab 3B - Depth Camera Cone Parking")
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
# TODO: Park the car 30 cm away from the closest orange cone.
global speed
global angle
global curState
# Search for contours in the current color image
update_contour()
print(curState)
imgX = rc.camera.get_width()
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)
if contour_center is None:
curState == State.search
angle = 1
if curState == State.search:
angle = 1
speed = 0.2
if contour_center is not None:
curState = State.approach
depth_image = rc.camera.get_depth_image()
depth_image_adjust = (depth_image - 0.01) % 9999
depth_image_adjust_blur = cv.GaussianBlur(depth_image_adjust, (11,11), 0)
image = rc.camera.get_color_image()
mask = get_mask(image, ORANGE[0], ORANGE[1])
masked_depth_image = cv.bitwise_and(depth_image, depth_image, mask=mask)
top_left_inclusive = (0, 0)
bottom_right_exclusive = ((rc.camera.get_height() * 4 // 5) , rc.camera.get_width())
cropped_image = rc_utils.crop(masked_depth_image, top_left_inclusive, bottom_right_exclusive)
closest_pixel = rc_utils.get_closest_pixel(cropped_image)
distance = cropped_image[closest_pixel[0], closest_pixel[1]]
rc.display.show_depth_image(cropped_image, points=[closest_pixel])
if curState == State.approach:
if distance < 29:
speed = rc_utils.remap_range(distance, 0, 30, -1, 0)
print("backing")
elif distance < 30:
speed = 0
angle = 0
elif distance > 30 and distance < 100:
speed = rc_utils.remap_range(distance, 30, 1000, 0, 1)
elif distance > 100:
speed = 0.5
rc.drive.set_speed_angle(speed, angle)
# Print the current speed and angle when the A button is held down
if rc.controller.is_down(rc.controller.Button.Y):
isParked = False
print("not parke")
if rc.controller.is_down(rc.controller.Button.A):
print("Speed:", speed, "Angle:", angle)
# Print the center and area of the largest contour when B is held down
if rc.controller.is_down(rc.controller.Button.B):
if contour_center is None:
print("No contour found")
else:
print("Center:", contour_center, "Area:", contour_area)
pass
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, None)
rc.go() | 6,622 | 2,063 |
# -*- coding: utf-8 -*-
#
# This file is part of the SdpSubarrayLeafNode project
#
#
#
# Distributed under the terms of the BSD-3-Clause license.
# See LICENSE.txt for more info.
"""
SDP Subarray Leaf node is to monitor the SDP Subarray and issue control actions during an observation.
It also acts as a SDP contact point for Subarray Node for observation execution.
"""
# PROTECTED REGION ID(sdpsubarrayleafnode.additionnal_import) ENABLED START #
# Third party imports
import os
# PyTango imports
import tango
import threading
from tango import DebugIt, AttrWriteType, ApiUtil
from tango.server import run, command, device_property, attribute
# Additional imports
from ska.base import SKABaseDevice
from ska.base.control_model import HealthState, ObsState
from ska.base.commands import ResultCode
from tmc.common.tango_client import TangoClient
from tmc.common.tango_server_helper import TangoServerHelper
from . import const, release
from .assign_resources_command import AssignResources
from .release_resources_command import ReleaseAllResources
from .configure_command import Configure
from .scan_command import Scan
from .endscan_command import EndScan
from .end_command import End
from .abort_command import Abort
from .restart_command import Restart
from .obsreset_command import ObsReset
from .telescope_on_command import TelescopeOn
from .telescope_off_command import TelescopeOff
from .reset_command import ResetCommand
from .device_data import DeviceData
from .exceptions import InvalidObsStateError
# PROTECTED REGION END # // SdpSubarrayLeafNode.additionnal_import
__all__ = [
"SdpSubarrayLeafNode",
"main",
"AssignResources",
"const",
"release",
"ReleaseAllResources",
"TelescopeOn",
"TelescopeOff",
"Configure",
"Abort",
"Restart",
"ObsReset",
"Scan",
"End",
"EndScan",
"ResetCommand"
]
# pylint: disable=unused-argument,unused-variable, implicit-str-concat
class SdpSubarrayLeafNode(SKABaseDevice):
"""
SDP Subarray Leaf node is to monitor the SDP Subarray and issue control actions during an observation.
:Device Properties:
SdpSubarrayFQDN:
FQDN of the SDP Subarray Tango Device Server.
:Device Attributes:
receiveAddresses:
This attribute is used for testing purposes. In the unit test cases
it is used to provide FQDN of receiveAddresses attribute from SDP.
activityMessage:
String providing information about the current activity in SDP Subarray Leaf Node.
activeProcessingBlocks:
This is a attribute from SDP Subarray which depicts the active Processing
Blocks in the SDP Subarray.
"""
# -----------------
# Device Properties
# -----------------
SdpSubarrayFQDN = device_property(
dtype="str", doc="FQDN of the SDP Subarray Tango Device Server."
)
# ----------
# Attributes
# ----------
receiveAddresses = attribute(
dtype="str",
access=AttrWriteType.READ_WRITE,
doc="This attribute is used for testing purposes. In the unit test cases, "
"it is used to provide FQDN of receiveAddresses attribute from SDP.",
)
activityMessage = attribute(
dtype="str",
access=AttrWriteType.READ_WRITE,
doc="String providing information about the current activity in SDP Subarray Leaf Node",
)
activeProcessingBlocks = attribute(
dtype="str",
doc="This is a attribute from SDP Subarray which depicts the active Processing Blocks in "
"the SDP Subarray.",
)
class InitCommand(SKABaseDevice.InitCommand):
"""
A class for the TMC SdpSubarrayLeafNode's init_device() method.
"""
def do(self):
"""
Initializes the attributes and properties of the SdpSubarrayLeafNode.
return:
A tuple containing a return code and a string message indicating status.
The message is for information purpose only.
rtype:
(ResultCode, str)
"""
super().do()
device = self.target
self.this_server = TangoServerHelper.get_instance()
self.this_server.set_tango_class(device)
device.attr_map = {}
device.attr_map["receiveAddresses"] = ""
device.attr_map["activeProcessingBlocks"] = ""
device.attr_map["activityMessage"] = ""
# Initialise attributes
device._sdp_subarray_health_state = HealthState.OK
device._build_state = "{},{},{}".format(
release.name, release.version, release.description
)
device._version_id = release.version
# Create DeviceData class instance
device_data = DeviceData.get_instance()
device.device_data = device_data
standalone_mode = os.environ.get("STANDALONE_MODE")
self.logger.info("Device running in standalone_mode:%s", standalone_mode)
ApiUtil.instance().set_asynch_cb_sub_model(tango.cb_sub_model.PUSH_CALLBACK)
log_msg = f"{const.STR_SETTING_CB_MODEL}{ApiUtil.instance().get_asynch_cb_sub_model()}"
self.logger.debug(log_msg)
self.this_server.write_attr(
"activityMessage", const.STR_SDPSALN_INIT_SUCCESS, False
)
# Initialise Device status
device.set_status(const.STR_SDPSALN_INIT_SUCCESS)
self.logger.info(const.STR_SDPSALN_INIT_SUCCESS)
return (ResultCode.OK, const.STR_SDPSALN_INIT_SUCCESS)
# ---------------
# General methods
# ---------------
def always_executed_hook(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.always_executed_hook) ENABLED START #
"""Internal construct of TANGO."""
# PROTECTED REGION END # // SdpSubarrayLeafNode.always_executed_hook
def delete_device(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.delete_device) ENABLED START #
"""Internal construct of TANGO."""
# PROTECTED REGION END # // SdpSubarrayLeafNode.delete_device
# ------------------
# Attributes methods
# ------------------
def read_receiveAddresses(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.receiveAddresses_read) ENABLED START #
"""Internal construct of TANGO. Returns the Receive Addresses.
receiveAddresses is a forwarded attribute from SDP Master which depicts State of the SDP."""
return self.attr_map["receiveAddresses"]
# PROTECTED REGION END # // SdpSubarrayLeafNode.receiveAddresses_read
def write_receiveAddresses(self, value):
# PROTECTED REGION ID(SdpSubarrayLeafNode.receiveAddresses_read) ENABLED START #
"""Internal construct of TANGO. Sets the Receive Addresses.
receiveAddresses is a forwarded attribute from SDP Master which depicts State of the SDP."""
self.attr_map["receiveAddresses"] = value
# PROTECTED REGION END # // SdpSubarrayLeafNode.receiveAddresses_read
def read_activityMessage(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.activityMessage_read) ENABLED START #
"""Internal construct of TANGO. Returns Activity Messages.
activityMessage is a String providing information about the current activity in SDP Subarray Leaf Node"""
return self.attr_map["activityMessage"]
# PROTECTED REGION END # // SdpSubarrayLeafNode.activityMessage_read
def write_activityMessage(self, value):
# PROTECTED REGION ID(SdpSubarrayLeafNode.activityMessage_write) ENABLED START #
"""Internal construct of TANGO. Sets the activity message.
activityMessage is a String providing information about the current activity in SDP Subarray Leaf Node"""
self.update_attr_map("activityMessage", value)
# PROTECTED REGION END # // SdpSubarrayLeafNode.activityMessage_write
def update_attr_map(self, attr, val):
# PROTECTED REGION ID(SdpSubarrayLeafNode.update_attr_map) ENABLED START #
"""This method updates attribute value in attribute map. Once a thread has acquired a lock,
subsequent attempts to acquire it are blocked, until it is released."""
lock = threading.Lock()
lock.acquire()
self.attr_map[attr] = val
lock.release()
# PROTECTED REGION END # // SdpSubarrayLeafNode.update_attr_map
def read_activeProcessingBlocks(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.activeProcessingBlocks_read) ENABLED START #
"""Internal construct of TANGO. Returns Active Processing Blocks.activeProcessingBlocks is a forwarded attribute
from SDP Subarray which depicts the active Processing Blocks in the SDP Subarray"""
return self.attr_map["activeProcessingBlocks"]
# PROTECTED REGION END # // SdpSubarrayLeafNode.activeProcessingBlocks_read
# --------
# Commands
# --------
def is_telescope_on_allowed(self):
"""
Checks Whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
raises: DevF
ailed if this command is not allowed to be run in current device state.
"""
handler = self.get_command_object("TelescopeOn")
return handler.check_allowed()
@command()
@DebugIt()
def TelescopeOn(self):
"""
Sets the opState to ON.
:param argin: None
:return: None
"""
handler = self.get_command_object("TelescopeOn")
handler()
def is_telescope_off_allowed(self):
"""
Checks Whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
raises: DevF
ailed if this command is not allowed to be run in current device state.
"""
handler = self.get_command_object("TelescopeOff")
return handler.check_allowed()
@command()
@DebugIt()
def TelescopeOff(self):
"""
Sets the opState to Off.
:param argin: None
:return: None
"""
handler = self.get_command_object("TelescopeOff")
handler()
@command()
@DebugIt()
def Abort(self):
"""
Invoke Abort on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("Abort")
handler()
def is_Abort_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
raises:
DevFailed if this command is not allowed to be run in current device state
"""
handler = self.get_command_object("Abort")
return handler.check_allowed()
@command(
dtype_in=("str"),
doc_in="The input JSON string consists of information related to id, max_length, scan_types"
" and processing_blocks.",
)
@DebugIt()
def AssignResources(self, argin):
"""
Assigns resources to given SDP subarray.
"""
handler = self.get_command_object("AssignResources")
try:
self.validate_obs_state()
except InvalidObsStateError as error:
self.logger.exception(error)
tango.Except.throw_exception(
const.ERR_DEVICE_NOT_IN_EMPTY_IDLE,
const.ERR_ASSGN_RESOURCES,
"SdpSubarrayLeafNode.AssignResources()",
tango.ErrSeverity.ERR,
)
handler(argin)
def is_AssignResources_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
"""
handler = self.get_command_object("AssignResources")
return handler.check_allowed()
def is_Configure_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
"""
handler = self.get_command_object("Configure")
return handler.check_allowed()
@command(
dtype_in=("str"),
doc_in="The JSON input string consists of scan type.",
)
@DebugIt()
def Configure(self, argin):
"""
Invokes Configure on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("Configure")
handler(argin)
def is_End_allowed(self):
"""
Checks whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
"""
handler = self.get_command_object("End")
return handler.check_allowed()
@command()
@DebugIt()
def End(self):
"""This command invokes End command on SDP subarray to end the current Scheduling block."""
handler = self.get_command_object("End")
handler()
def is_EndScan_allowed(self):
"""
Checks whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
"""
handler = self.get_command_object("EndScan")
return handler.check_allowed()
@command()
@DebugIt()
def EndScan(self):
"""
Invokes EndScan on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("EndScan")
handler()
@command()
@DebugIt()
def ObsReset(self):
"""
Invoke ObsReset command on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("ObsReset")
handler()
def is_ObsReset_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
"""
handler = self.get_command_object("ObsReset")
return handler.check_allowed()
def is_ReleaseAllResources_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
raises:
DevFailed if this command is not allowed to be run in current device state
"""
handler = self.get_command_object("ReleaseAllResources")
return handler.check_allowed()
@command()
@DebugIt()
def ReleaseAllResources(self):
"""
Invokes ReleaseAllResources command on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("ReleaseAllResources")
handler()
@command()
@DebugIt()
def Restart(self):
"""
Invoke Restart command on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("Restart")
handler()
def is_Restart_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
raises:
DevFailed if this command is not allowed to be run in current device state
"""
handler = self.get_command_object("Restart")
return handler.check_allowed()
def is_Scan_allowed(self):
"""
Checks whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
"""
handler = self.get_command_object("Scan")
return handler.check_allowed()
@command(
dtype_in=("str"),
doc_in="The JSON input string consists of SB ID.",
)
@DebugIt()
def Scan(self, argin):
"""Invoke Scan command to SDP subarray."""
handler = self.get_command_object("Scan")
handler(argin)
def validate_obs_state(self):
self.this_server = TangoServerHelper.get_instance()
sdp_subarray_fqdn = self.this_server.read_property("SdpSubarrayFQDN")[0]
sdp_sa_client = TangoClient(sdp_subarray_fqdn)
if sdp_sa_client.get_attribute("obsState").value in [
ObsState.EMPTY,
ObsState.IDLE,
]:
self.logger.info(
"SDP subarray is in required obstate,Hence resources to SDP can be assign."
)
else:
self.logger.error("Subarray is not in EMPTY obstate")
log_msg = "Error in device obstate."
self.this_server.write_attr("activityMessage", log_msg, False)
raise InvalidObsStateError("SDP subarray is not in EMPTY obstate.")
def init_command_objects(self):
"""
Initialises the command handlers for commands supported by this
device.
"""
super().init_command_objects()
# Create device_data class object
device_data = DeviceData.get_instance()
args = (device_data, self.state_model, self.logger)
self.register_command_object("AssignResources", AssignResources(*args))
self.register_command_object("ReleaseAllResources", ReleaseAllResources(*args))
self.register_command_object("Scan", Scan(*args))
self.register_command_object("End", End(*args))
self.register_command_object("Restart", Restart(*args))
self.register_command_object("Configure", Configure(*args))
self.register_command_object("EndScan", EndScan(*args))
self.register_command_object("Abort", Abort(*args))
self.register_command_object("ObsReset", ObsReset(*args))
self.register_command_object("TelescopeOff", TelescopeOff(*args))
self.register_command_object("TelescopeOn", TelescopeOn(*args))
self.register_command_object("Reset", ResetCommand(*args))
# ----------
# Run server
# ----------
def main(args=None, **kwargs):
# PROTECTED REGION ID(SdpSubarrayLeafNode.main) ENABLED START #
"""
Runs the SdpSubarrayLeafNode
:param args: Arguments internal to TANGO
:param kwargs: Arguments internal to TANGO
:return: SdpSubarrayLeafNode TANGO object
"""
# PROTECTED REGION ID(SdpSubarrayLeafNode.main) ENABLED START #
ret_val = run((SdpSubarrayLeafNode,), args=args, **kwargs)
return ret_val
# PROTECTED REGION END # // SdpSubarrayLeafNode.main
if __name__ == "__main__":
main()
| 19,272 | 5,445 |
from aiocqhttp import CQHttp
from datetime import datetime
from sendmsg import SendMsg
from loadData import LoadData
import threading
import time
# windows本机运行本脚本与coolq的配置
# HOST = '127.0.0.1'
# PORT = 7788
# 这个url是发送给docker容器里的coolq
# 举例来说,假如docker命令有这样的 -p 3542:9000 -p 15700:5700
# 9000 是coolq暴露的页面访问地址(这里映射到了外面的3542,所以外界通过3542端口访问)
# 而5700是是coolq接受数据的端口(即是这个python服务发送给coolq的数据),这里映射到了15700,
# 所以外界通过15700端口发送信息给coolq
BASEURL = 'http://127.0.0.1:15700/'
bot = CQHttp(api_root=BASEURL)
d = {
# '博客': 'https://blog.csdn.net/qq20004604',
# 'github': 'https://github.com/qq20004604',
# 'nginx': 'https://github.com/qq20004604/nginx-demo',
# 'django': 'https://github.com/qq20004604/Python3_Django_Demo',
# 'docker': 'https://github.com/qq20004604/docker-learning',
# 'webpack': 'https://github.com/qq20004604/webpack-study',
# 'react': 'https://github.com/qq20004604/react-demo',
# 'vue': 'github: https://github.com/qq20004604/vue-scaffold\n博客专栏(1.x):https://blog.csdn.net/qq20004604/article/category/6381182',
# '笔记': 'https://github.com/qq20004604/notes',
# 'demo': 'https://github.com/qq20004604/some_demo',
# '海外服务器': 'https://manage.hostdare.com/aff.php?aff=939\n这个可以做私人服务器(不需要备案),也可以找群主询问如何架设SS server的方法。',
# 'QQ 机器人': 'https://github.com/qq20004604/qq-robot',
# '架构': 'https://juejin.im/post/5cea1f705188250640005472',
# 'es6': 'https://blog.csdn.net/qq20004604/article/details/78014684',
# 'vue脚手架': 'https://github.com/qq20004604/Vue-with-webpack',
# 'react脚手架': 'https://github.com/qq20004604/react-with-webpack',
# 'Macbook常用软件': 'https://github.com/qq20004604/when-you-get-new-Macbook',
# 'python的django与mysql交互': 'https://blog.csdn.net/qq20004604/article/details/89934212'
}
ld = LoadData()
def log(context, filename='./log.log'):
with open(filename, 'a', encoding='utf-8') as f:
f.write('time:%s, sender:%s, message_type:%s, user_id:%s, content:%s\n' % (
datetime.now(),
context['sender']['nickname'],
context['message_type'],
context['sender']['user_id'],
context['raw_message']))
@bot.on_message()
async def handle_msg(context):
msg = context['message']
# print(msg)
'''
# print(str(context)) 内容示例如下
{'font': 1473688, 'message': '#help', 'message_id': 528, 'message_type': 'private', 'post_type': 'message',
'raw_message': '#help', 'self_id': 2691365658,
'sender': {'age': 30, 'nickname': '零零水', 'sex': 'male', 'user_id': 20004604}, 'sub_type': 'friend',
'time': 1558283078, 'user_id': 20004604}
'''
result = ''
isindict = False
isinhelp = False
for k in d:
if ('#' + k) in msg:
result += d[k] + '\n'
isindict = True
if '#help' in msg:
result += '你可以使用以下命令~记得前面带上#喔\n'
isinhelp = True
for k in d:
result += '#' + k + '\n'
# 默认词典要求给star
if isindict is True:
result += "记得给star!"
# 只要是词典之一,则打印日志
if isindict is True or isinhelp is True:
log(context)
return {'reply': result}
@bot.on_notice('group_increase')
async def handle_group_increase(context):
await bot.send(context, message='欢迎新人~可以输入#help来向我查询所有命令喔',
at_sender=True, auto_escape=True)
@bot.on_request('group', 'friend')
async def handle_request(context):
return {'approve': True}
SendMsg(BASEURL)
def mixin_dict():
global d
minutes = 0
while True:
# 1 分钟更新一次
minutes = minutes + 1
if minutes % 60 == 0:
print('%s hours pass' % (minutes / 60))
ld_dict = ld.load_search_info()
d = {**ld_dict}
time.sleep(60)
t1 = threading.Thread(target=mixin_dict, name='loop')
t1.start()
# docker的配置
HOST = '172.18.0.1'
PORT = 12399
# 这里是coolq接收到qq信息,然后发送到这个python服务的端口。
# 所以也就是这个python服务,接收到这个消息的端口
# 在 coolq 的docker容器里,这个是在 */coolq/app/io.github.richardchien.coolqhttpapi/config/(qq号).ini 里配置的
# 由于容器不能通过 127.0.0.1 直接访问宿主机的端口,因此,需要通过执行 ip addr show docker0 命令来查看宿主机的端口
# 举例来说,我的server执行这个命令,获得的宿主机的 ip 是 172.18.0.1 (即,容器访问 172.18.0.1 这个地址是访问宿主机)
# 于是修改那个ini配置文件:post_url = http://172.18.0.1:34519
# 这里的host可以保持要和那个ip地址保持一样,port也是
bot.run(host=HOST, port=PORT)
| 4,255 | 2,202 |
from setuptools import setup
if __name__ == '__main__':
console_scripts = ["pystart = pystart.cmd:entry_point"]
setup(entry_points=dict(console_scripts=console_scripts))
| 179 | 59 |
"""Sphinx configuration."""
from datetime import datetime
project = "iterage"
author = "Richard Liebscher"
copyright = f"{datetime.now().year}, {author}"
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
]
| 231 | 83 |
from inspect import signature
from functools import wraps
def typeassert(*ty_args, **ty_kwargs):
def decorate(func):
# If in optimized mode, disable type checking
if not __debug__:
return func
# Map function argument names to supplied types
sig = signature(func)
bound_types = sig.bind_partial(*ty_args, **ty_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
bound_values = sig.bind(*args, **kwargs)
# Enforce type assertions across supplied arguments
for name, value in bound_values.arguments.items():
if name in bound_types:
if not isinstance(value, bound_types[name]):
raise TypeError(
'Argument {} must be {}'.format(name, bound_types[name])
)
return func(*args, **kwargs)
return wrapper
return decorate
# Examples
@typeassert(int, int)
def add(x, y):
return x + y
@typeassert(int, z=int)
def spam(x, y, z=42):
print(x, y, z)
if __name__ == '__main__':
print(add(2,3))
try:
add(2, 'hello')
except TypeError as e:
print(e)
spam(1, 2, 3)
spam(1, 'hello', 3)
try:
spam(1, 'hello', 'world')
except TypeError as e:
print(e)
| 1,359 | 408 |
###################################################################################
#
# Copyright (C) 2017 MuK IT GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
import os
import base64
import logging
from odoo.exceptions import AccessError, ValidationError
from odoo.addons.muk_utils.tests.common import multi_users
from odoo.addons.muk_dms.tests.common import setup_data_function
from odoo.addons.muk_dms.tests.test_file import FileTestCase
_path = os.path.dirname(os.path.dirname(__file__))
_logger = logging.getLogger(__name__)
class FileActionTestCase(FileTestCase):
def setUp(self):
super(FileActionTestCase, self).setUp()
self.action = self.env['muk_dms_actions.action'].sudo()
@multi_users(lambda self: self.multi_users())
@setup_data_function(setup_func='_setup_test_data')
def test_available_actions(self):
self.action.create({'name': "Test 01"})
self.action.create({'name': "Test 02", 'is_limited_to_single_file': True})
self.action.create({'name': "Test 03", 'criteria_directory': self.new_root_directory.id})
self.action.create({'name': "Test 04", 'criteria_directory': self.new_sub_directory.id})
self.assertTrue(len(self.new_file_root_directory.actions) == 3)
self.assertTrue(len(self.new_file_root_directory.actions_multi) == 2)
self.assertTrue(len(self.new_file_sub_directory.actions) == 4)
self.assertTrue(len(self.new_file_sub_directory.actions_multi) == 3)
| 2,230 | 696 |
# Mock method to populate data to a Shopify store
# This is used only for development purpose
import io
import requests
import random
from typing import List
from time import sleep
import shopify
from faker import Faker
ACCESS_TOKEN = "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" # Temporary use only
API_VERSION = "2020-10"
SHOP_URL = "fromairstore.myshopify.com"
class Populate:
def __init__(self, access_token: str, shop_url: str, api_version: str):
"""
Initialize a populate object
Args:
access_token (str): shopify API access token
shop_url (str): shopify shop URL
api_version (str): shopify API version
"""
self.token = access_token
self.shop_url = shop_url
self.api_version = api_version
random.seed(42)
session = shopify.Session(shop_url, api_version, access_token)
shopify.ShopifyResource.activate_session(session)
self.existing_customers = None
self.existing_products = None
def get_customers(self) -> List:
if not self.existing_customers:
self.existing_customers = shopify.Customer.find()
return self.existing_customers
def get_products(self) -> List:
if not self.existing_products:
self.existing_products = shopify.Product.find()
return self.existing_products
def generate_customer(self):
"""Add customers with random fake information to the shop
"""
fake = Faker()
names = fake.name().split(' ')
customer = shopify.Customer()
customer.first_name = names[0]
customer.last_name = names[1]
customer.email = "{0}{1}@gmail.com".format(names[0], names[1])
customer.save()
def generate_products(self):
"""Generate fake products
"""
NotImplemented
def generate_order(self):
"""Generate an order for a customer to purchase a product
"""
customer = random.choice(self.get_customers())
product = random.choice(self.get_products())
order = shopify.Order()
order.customer = {
"first_name": customer.first_name,
"last_name": customer.last_name,
"email": customer.email
}
order.fulfillment_status = "fulfilled"
order.line_items = [
{
"title": product.title,
"quantity": 1,
"price": product.price_range()
}
]
order.save()
if __name__ == "__main__":
populator = Populate(access_token=ACCESS_TOKEN, shop_url=SHOP_URL, api_version=API_VERSION)
# generate 5 fake customers
for _ in range(5):
populator.generate_customer()
sleep(0.5)
# generate 10 fake orders with random customer and product
for _ in range(5):
populator.generate_order()
sleep(1)
| 2,500 | 944 |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from logging import DEBUG, ERROR, FATAL, INFO, WARN
import pigpio
from VNH5019_driver import VNH5019 as MOTOR
if __name__ == "__main__":
count = 0.0
one_count = 360 * 4 / (64 * 50)
pi = pigpio.pi()
motor0 = MOTOR(
pi,
driver_out1=20,
driver_out2=21,
encoder_in1=5,
encoder_in2=6,
pwm_channel=0,
gear_ratio=150,
logging_level=WARN)
motor1 = MOTOR(
pi,
driver_out1=23,
driver_out2=24,
encoder_in1=27,
encoder_in2=22,
pwm_channel=1,
gear_ratio=50,
logging_level=WARN)
time.sleep(3)
motor0.rotate_motor(pwm_duty_cycle=500, rotation_angle=180)
motor1.rotate_motor(pwm_duty_cycle=500, rotation_angle=180)
#motor0.drive(pwm_duty_cycle=4095)
#motor1.drive(pwm_duty_cycle=4095)
time.sleep(3)
print("-" * 10)
print(motor0.get_current_angle())
print(motor1.get_current_angle())
# メモ I制御を導入して平滑化したい、ゲインにスピードの逆数かけるのやめたい
| 1,067 | 517 |
from idm_lp.commands.members_manager import ignored
from idm_lp.commands.members_manager import ignored_global
from idm_lp.commands.members_manager import muted
from idm_lp.commands.members_manager import trusted
users_bp = (
ignored.user,
ignored_global.user,
muted.user,
trusted.user,
)
| 306 | 98 |
from .stock_prediction_logic.analyse import start_analysis
from home.models import stock
def updateStocks():
# start_analysis()
obj = stock.objects.get(stockSymbol = "AAPL")
obj.macd_trend = 'be'
obj.rank = int(4)
import datetime
now = datetime.datetime.now()
obj.volume = str(now)
obj.save()
| 326 | 109 |
import tkinter as tk
def btn_press():
print('Button was pressed')
root=tk.Tk()
root.geometry('150x80')
bt=tk.Button(bitmap='question',command=btn_press)
bt.pack()
root.mainloop()
| 187 | 74 |
from torch.utils.data.dataloader import DataLoader
from dataloader import ConvAIDataset
from utils import combine_contexts
from vocab.text import BPEVocab
max_seq_len = 512
train_data = 'data/train_self_revised_no_cands.txt'
bpe_vocab_path = 'vocab/bpe.vocab'
bpe_codes_path = 'vocab/bpe.code'
params = {'batch_size': 64, 'shuffle': True, 'num_workers': 2, 'collate_fn': combine_contexts}
if __name__ == '__main__':
vocab = BPEVocab.from_files(bpe_vocab_path, bpe_codes_path)
dataset = ConvAIDataset(filename=train_data,
max_seq_len=max_seq_len,
bpe_vocab=vocab)
dataloader = DataLoader(dataset, **params)
for i, (contexts, targets) in enumerate(dataloader):
print(i, contexts, targets)
exit(0)
| 790 | 306 |
"""
Leetcode No: 102
Title: Binary Tree Level Order Traversal
Description:
Given the root of a binary tree, return the level order traversal of its
nodes' values. (i.e., from left to right, level by level).
Example 1:
Input: root = [3,9,20,null,null,15,7]
Output: [[3],[9,20],[15,7]]
Example 2:
Input: root = [1]
Output: [[1]]
Example 3:
Input: root = []
Output: []
"""
from typing import Optional, List
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def level_order(self, root: Optional[TreeNode]) -> List[List[int]]:
if root is None:
return root
tree_queue = deque()
tree_queue.append(root)
level_order_traversal = []
while tree_queue:
tree_levels = []
for _ in range(len(tree_queue)):
current_node = tree_queue.popleft()
if current_node:
tree_levels.append(current_node.val)
if current_node.left:
tree_queue.append(current_node.left)
if current_node.right:
tree_queue.append(current_node.right)
level_order_traversal.append(tree_levels)
return level_order_traversal
| 1,415 | 445 |
#!/usr/bin/env python
#A script to test threading concepts/implementations
from __future__ import print_function
import threading,time,sys
#The following used from past bad experience with multithreading in Python
def _cleanup():
pass
#Define the "thread" class, of which each thread is a instance
class TestThread(threading.Thread):
#Constructor with 2 arguments
def __init__(self,Name,Cpu,**kwargs): #Name and Cpu are equivalent to any number of options necessary to instantiate the Test()
self.Name=Name
self.Cpu=Cpu
self.kwargs=kwargs
threading.Thread.__init__(self)
#Actual function executed at the invocation of thread's start() method
def run(self):
#Instantiate the test of class Test()
self.Test=Test(self.Name,self.Cpu,**(self.kwargs))
#This is the function used to really activate the test
#Launch it!
self.Test.runTest()
return
#Define the class Test, of which each test (executed in a thread or not) is an instance
class Test:
#Constructor with 2 optional arguments
def __init__(self,Name="N/A",Cpu="N/A",**kwargs):
self.Name=Name
self.Cpu=Cpu
self.kwargs=kwargs
#Initializing some list to keep timestamps for a silly test
self.Times=[]
print("Initializing Test() instance, value of Name is %s and valud of Cpu is %s"%(self.Name,self.Cpu))
#Silly functions to get back the Name and Cpu arguments originally passed to the Test object
def getName(self):
return self.Name
def getCpu(self):
return self.Cpu
#The actual test function
def runTest(self):
print("I am thread Test and I was invoked with arguments Name %s, Cpu %s and optional keyword arguments %s"%(self.Name,self.Cpu,self.kwargs))
self.time=0
while self.time<10:
self.Times.append(time.ctime())
time.sleep(1)
self.time+=1
print(self.Times)
if self.kwargs:
print("Testing keyword arguments handling with function invocation")
test(**(self.kwargs))
return
#Test function for arguments fun
ahi="AHI!"
def test(cpu='N/A',perfsuitedir=ahi,IgProfEvents='N/A',IgProfCandles='N/A',cmsdriverOptions='N/A',stepOptions='N/A',string="IgProf",profilers='N/A',bypasshlt='N/A',userInputFile='N/A'):
print(cpu)
print(perfsuitedir)
print(userInputFile)
#print "Value of Available is: %s"%Available
#Playing with classes for variable scope tests:
class Pippo:
def __init__(self):
self.a=0
self.b=1
def test1(self,d):
print(d)
def test2(self):
self.e=self.Pluto(self)
self.e.testscope()
class Pluto:
def __init__(self,mother):
self.Me="Pluto"
self.mother=mother
def testscope(self):
#print a
#print self.a
self.mother.test1(self.Me)
def main():
#Testing threading concepts ;)
#First set that all 4 cores are available:
Available=['0','1','2','3']
#Then populate the list of tests to do:
#This list should be a list of arguments with which to run simpleGenReport (except the cpu).
TestToDo=['Pippo','Pluto','Paperino','Minnie','Qui','Quo','Qua','Zio Paperone','Banda Bassotti','Archimede','Topolino']
#Now let's set up an infinite loop that will go through the TestToDo list, submit a thread per cpu available from the Available list
#using pop.
activeThreads={}
while True:
#If there are cores available and tests to run:
print("Main while loop:")
print(Available)
print(TestToDo)
#Logic based on checking for TestToDo first:
if TestToDo:
print("Still folllowing %s tests to do:"%len(TestToDo))
print(TestToDo)
#Test available cores:
if Available:
print("Hey there is at least one core available!")
print(Available)
cpu=Available.pop()
print("Let's use core %s"%cpu)
threadArgument=TestToDo.pop()
print("Let's submit job %s on core %s"%(threadArgument,cpu))
print("Instantiating thread")
print("Testing the keyword arguments with:")
kwargs={'cpu':3,'perfsuitedir':"work",'userInputFile':'TTBAR_GEN,FASTSIM.root'}
print(kwargs)
threadToDo=TestThread(threadArgument,cpu,**kwargs)
print("Starting thread %s"%threadToDo)
threadToDo.start()
print("Appending thread %s to the list of active threads"%threadToDo)
activeThreads[cpu]=threadToDo
#If there is no available core, pass, there will be some checking of activeThreads, a little sleep and then another check.
else:
pass
#Test activeThreads:
for cpu in activeThreads.keys():
if activeThreads[cpu].isAlive():
pass
elif cpu not in Available:
print("About to append cpu %s to Available list"%cpu)
Available.append(cpu)
if set(Available)==set(['0','1','2','3']) and not TestToDo:
break
else:
print("Sleeping and checking again...")
time.sleep(1)
#Check we broke out of the infinite loop!
print("WHEW! We're done... all TestToDo are done...")
print(Available)
print(TestToDo)
#Next: check scenarios
#1-many more TestToDo than Available cores
#Test 1 done successfully.
#2-complicated Test() class that calls other functions with args
#3-What happens on the machine with top
#4-What if they get killed or hang?
| 5,785 | 1,676 |
import os
from unittest import TestCase
from unittest.mock import patch, call
from flask import Flask
from flask_pypendency import Pypendency
class TestLoader(TestCase):
def setUp(self) -> None:
self.test_folder = os.path.dirname(os.path.abspath(__file__))
@patch("flask_pypendency.YamlLoader")
@patch("flask_pypendency.PyLoader")
def test_loader_default_values(self, py_loader, yaml_loader):
app = Flask(__name__)
Pypendency(app)
py_loader.return_value.load_dir.assert_has_calls([
call(f"{self.test_folder}/resources/test_loader/autodiscover1/_dependency_injection"),
call(f"{self.test_folder}/resources/test_loader/autodiscover2/_dependency_injection"),
])
yaml_loader.return_value.load_dir.assert_has_calls([
call(f"{self.test_folder}/resources/test_loader/autodiscover1/_dependency_injection"),
call(f"{self.test_folder}/resources/test_loader/autodiscover2/_dependency_injection"),
])
@patch("flask_pypendency.YamlLoader")
@patch("flask_pypendency.PyLoader")
def test_loader_configured_di_folder(self, py_loader, yaml_loader):
"""
Specifying the folder's name loads different routes
"""
app = Flask(__name__)
app.config.from_mapping(
PYPENDENCY_DI_FOLDER_NAME="_di_folder1",
)
Pypendency(app)
py_loader.return_value.load_dir.assert_has_calls([
call(f"{self.test_folder}/resources/test_loader/autodiscover1/_di_folder1"),
])
yaml_loader.return_value.load_dir.assert_has_calls([
call(f"{self.test_folder}/resources/test_loader/autodiscover1/_di_folder1"),
])
@patch("flask_pypendency.YamlLoader")
@patch("flask_pypendency.PyLoader")
def test_loader_configured_di_discover_paths(self, py_loader, yaml_loader):
"""
Specifying the folder's name loads different routes
"""
app = Flask(__name__)
app.config.from_mapping(
PYPENDENCY_DISCOVER_PATHS=[f"{self.test_folder}/resources/test_loader/autodiscover2"]
)
Pypendency(app)
py_loader.return_value.load_dir.assert_has_calls([
call(f"{self.test_folder}/resources/test_loader/autodiscover2/_dependency_injection"),
])
yaml_loader.return_value.load_dir.assert_has_calls([
call(f"{self.test_folder}/resources/test_loader/autodiscover2/_dependency_injection"),
])
| 2,512 | 829 |
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "16 Feb 2019"
# !!! SEE CODERULES.TXT !!!
import os
from silx.gui import qt
from silx.gui import plot as splot
from ..core import singletons as csi
class Plot1D(splot.PlotWindow):
def __init__(self, parent=None, backend=None, position=True):
super(Plot1D, self).__init__(parent=parent, backend=backend,
resetzoom=True, autoScale=True,
logScale=True, grid=True,
curveStyle=True, colormap=False,
aspectRatio=False, yInverted=False,
copy=True, save=True, print_=True,
control=True, position=position,
roi=False, mask=False, fit=False)
if parent is None:
self.setWindowTitle('Plot1D')
action = self.getFitAction()
action.setXRangeUpdatedOnZoom(True)
action.setFittedItemUpdatedFromActiveCurve(True)
def graphCallback(self, ddict=None):
"""This callback is going to receive all the events from the plot."""
if ddict is None:
ddict = {}
if ddict['event'] in ["legendClicked", "curveClicked"]:
if ddict['button'] == "left":
self.activateCurve(ddict['label'])
qt.QToolTip.showText(self.cursor().pos(), ddict['label'])
def activateCurve(self, label):
alias = os.path.splitext(label)[0]
for item in csi.allLoadedItems:
if item.alias == alias:
break
else:
return
index = csi.model.indexFromItem(item)
csi.selectionModel.setCurrentIndex(
index, qt.QItemSelectionModel.ClearAndSelect |
qt.QItemSelectionModel.Rows)
class Plot2D(splot.Plot2D):
pass
class Plot3D(splot.StackView):
posInfo = [
('Position', None), # None is callback fn set after instantiation
('Value', None)] # None is callback fn set after instantiation
def setCustomPosInfo(self):
p = self._plot._positionWidget._fields[0]
self._plot._positionWidget._fields[0] = (p[0], p[1], self._imagePos)
p = self._plot._positionWidget._fields[1]
self._plot._positionWidget._fields[1] = (p[0], p[1], self._imageVal)
def _imageVal(self, x, y):
"used for displaying pixel value under cursor"
activeImage = self.getActiveImage()
if activeImage is not None:
data = activeImage.getData()
height, width = data.shape
# print(width, height, x, y)
x = int(x)
y = int(y)
return data[y][x] if 0 <= x < width and 0 <= y < height else ''
return '-'
def _imagePos(self, x, y):
"used for displaying pixel coordinates under cursor"
img_idx = self._browser.value()
if self._perspective == 0:
dim0, dim1, dim2 = img_idx, int(y), int(x)
elif self._perspective == 1:
dim0, dim1, dim2 = int(y), img_idx, int(x)
elif self._perspective == 2:
dim0, dim1, dim2 = int(y), int(x), img_idx
return '{0}, {1}, {2}'.format(dim0, dim1, dim2)
| 3,301 | 999 |
def solveSudoku(board: list()) -> None:
solver(board)
def solver(board) -> bool:
for i in range(9):
for j in range(9):
if board[i][j] == ".":
for count in range(1, 10):
print(board)
if isValidSudoku(board, str(count), i, j):
board[i][j] = str(count)
if solver(board):
return True
else:
board[i][j] = "."
return False
return True
def isValidSudoku(board, nstr, row, col) -> bool:
if nstr in board[row]:
return False
for co in range(9):
if nstr == board[co][col]:
return False
start_row = row // 3 * 3
start_col = col // 3 * 3
for i in range(3):
for j in range(3):
if board[start_row + i][start_col + j] == nstr:
return False
return True
valid_s = [
["5", "3", ".", ".", "7", ".", ".", ".", "."],
["6", ".", ".", "1", "9", "5", ".", ".", "."],
[".", "9", "8", ".", ".", ".", ".", "6", "."],
["8", ".", ".", ".", "6", ".", ".", ".", "3"],
["4", ".", ".", "8", ".", "3", ".", ".", "1"],
["7", ".", ".", ".", "2", ".", ".", ".", "6"],
[".", "6", ".", ".", ".", ".", "2", "8", "."],
[".", ".", ".", "4", "1", "9", ".", ".", "5"],
[".", ".", ".", ".", "8", ".", ".", "7", "9"],
]
print(solveSudoku(valid_s))
| 1,463 | 532 |
from app import app
import json
import socket
import os.path
# The first and the second test validate json structure
def test_get_status_code():
app.config["TESTING"] = True
with app.test_client() as client:
response = client.get("/request")
assert response.status_code == 200
def test_get_status_msg():
app.config["TESTING"] = True
host = socket.gethostname()
with app.test_client() as client:
response = client.get("/request")
assert response.json == {
'response': "Respeitem o isolamento social!"
} | 576 | 165 |
import json
DEFAULT_ALPINE_VERSION = '3.15'
ALPINE_VERSIONS = ['3.12','3.13','3.14', '3.15']
def generate_tags(key, version):
update = version.split('.')[1] if (key == '8') else version.split('.')[2]
expanded_version = f"{key}u{update}" if (key == '8') else f"{key}.0.{update}"
al2_tags = [f"{key}", f"{expanded_version}", f"{expanded_version}-al2", f"{key}-al2-full",f"{key}-al2-jdk"]
if key == '8':
al2_tags.append('latest')
print("Tags: " + ", ".join(al2_tags) + "")
print("Architectures: amd64, arm64v8")
print(f"Directory: {key}/jdk/al2\n")
for alpine_version in ALPINE_VERSIONS:
alpine_tags = [f"{key}-alpine{alpine_version}", f"{expanded_version}-alpine{alpine_version}", f"{key}-alpine{alpine_version}-full", f"{key}-alpine{alpine_version}-jdk"]
if alpine_version == DEFAULT_ALPINE_VERSION:
alpine_tags.extend([f"{key}-alpine", f"{expanded_version}-alpine", f"{key}-alpine-full", f"{key}-alpine-jdk"])
print("Tags: " + ", ".join(alpine_tags) + "")
print("Architectures: amd64")
print(f"Directory: {key}/jdk/alpine/{alpine_version}\n")
if key == '8':
alpine_jre_tags = [f"{key}-alpine{alpine_version}-jre", f"{expanded_version}-alpine{alpine_version}-jre"]
if alpine_version == DEFAULT_ALPINE_VERSION:
alpine_jre_tags.extend([f"{key}-alpine-jre", f"{expanded_version}-alpine-jre"])
print("Tags: " + ", ".join(alpine_jre_tags) + "")
print("Architectures: amd64")
print(f"Directory: {key}/jre/alpine/{alpine_version}\n")
def main():
with open('versions.json','r') as version_file:
versions = json.load(version_file)
with open('.tags', 'w') as tag_file:
for key in versions:
generate_tags(key, versions[key])
if __name__ == "__main__":
main() | 1,874 | 701 |
ssh_username='Wireless-LAN-controller-USERNAME'
ssh_password='Wireless-LAN-controller-PASSWD'
ssh_ip='10.1.1.1'
gmail_user='some_user@gmail.com'
gmail_password='PASSWD'
emdomain='@your_domain.com'
emailAddrs=['IT-team@your_domain.com','Wifi-Admin@your_domain.com']
| 265 | 108 |
# Standard library imports.
import ast
import getopt
import logging
import pprint
import re
import sys
# External imports.
import docx
import pympi
import pyramid.paster as paster
# Project imports.
from lingvodoc.models import (
DBSession,
Dictionary,
)
# Setting up logging, if we are not being run as a script.
if __name__ != '__main__':
log = logging.getLogger(__name__)
log.debug('module init')
def levenshtein(
snippet_str,
snippet_index,
word_str,
__debug_levenshtein_flag__ = False):
"""
Matches word string to the snippet string via adjusted Levenshtein matching, with no penalties for
snippet string skipping before and after match.
"""
d = {(0, j): (j, 1e256)
for j in range(len(word_str) + 1)}
for i in range(len(snippet_str) - snippet_index):
d[(i + 1, 0)] = (0, 1e256)
minimum_distance = len(word_str)
minimum_begin_index = 0
minimum_end_index = 0
for i in range(1, len(snippet_str) - snippet_index + 1):
if __debug_levenshtein_flag__:
log.debug(
'd[{0}, 0]: {1}'.format(i, d[(i, 0)]))
for j in range(1, len(word_str) + 1):
# Matching current characters of the word and snippet strings.
s_distance, s_begin_index = d[i - 1, j - 1]
substitution_value = s_distance + (
0 if snippet_str[snippet_index + i - 1] == word_str[j - 1] else 1)
substitution_index = min(s_begin_index, i - 1)
# Skipping current character from the snippet string.
d_distance, d_begin_index = d[i - 1, j]
deletion_value = d_distance + (
1 if j < len(word_str) else 0)
deletion_index = d_begin_index
# Skipping current character from the word string.
i_distance, i_begin_index = d[i, j - 1]
insertion_value = i_distance + 1
insertion_index = i_begin_index
# Getting minimum.
minimum_value = min(
substitution_value,
deletion_value,
insertion_value)
if minimum_value == deletion_value:
operation_index = 1
minimum_index = deletion_index
elif minimum_value == insertion_value:
operation_index = 2
minimum_index = insertion_index
else:
operation_index = 0
minimum_index = substitution_index
d[(i, j)] = (minimum_value, minimum_index)
# Showing edit distance computation details.
if __debug_levenshtein_flag__:
log.debug(
'\nd[{0}, {1}] (\'{18}\' & \'{14}\'): {4}'
'\n d[{5}, {6}] (\'{2}\' & \'{3}\'): {9} + {10}{11} (\'{12}\', \'{13}\')'
'\n d[{5}, {1}] (\'{2}\' & \'{14}\'): {15} + {16}{17}'
'\n d[{0}, {6}] (\'{18}\' & \'{3}\'): {19} + 1{20}'.format(
i, j,
snippet_str[snippet_index : snippet_index + i - 1] + '|' +
snippet_str[snippet_index + i - 1],
word_str[: j - 1] + '|' + word_str[j - 1],
d[(i, j)][0],
i - 1, j - 1,
snippet_str[snippet_index : snippet_index + i - 1] + '|',
word_str[: j - 1] + '|',
d[(i - 1, j - 1)][0],
0 if snippet_str[snippet_index + i - 1] == word_str[j - 1] else 1,
'*' if operation_index == 0 else '',
snippet_str[snippet_index + i - 1],
word_str[j - 1],
word_str[:j] + '|',
d[(i - 1, j)][0],
1 if j < len(word_str) else 0,
'*' if operation_index == 1 else '',
snippet_str[snippet_index : snippet_index + i] + '|',
d[(i, j - 1)][0],
'*' if operation_index == 2 else ''))
# Checking if we have a new best matching.
if d[i, len(word_str)][0] < minimum_distance:
minimum_distance, minimum_begin_index = d[i, len(word_str)]
minimum_end_index = i
if minimum_distance == 0:
break
return (
minimum_distance,
minimum_begin_index,
minimum_end_index)
def prepare_match_string(cell_str):
"""
Processes string for matching, finding and marking portions in parentheses to be considered as
optional during matching.
"""
chr_list = []
chr_index = 0
for match in re.finditer(r'\([^()]*?\)', cell_str):
for chr in re.sub(
r'\W+', '', cell_str[chr_index : match.start()]):
chr_list.append((chr, False))
for chr in re.sub(
r'\W+', '', match.group(0)):
chr_list.append((chr, True))
chr_index = match.end()
for chr in re.sub(
r'\W+', '', cell_str[chr_index:]):
chr_list.append((chr, False))
return chr_list
def format_match_string(marked_chr_list):
"""
Formats list of marked characters as a string.
"""
chr_list = []
mark_prev = False
for chr, mark in marked_chr_list:
if mark != mark_prev:
chr_list.append('(' if mark else ')')
chr_list.append(chr)
mark_prev = mark
if mark_prev:
chr_list.append(')')
return ''.join(chr_list)
class State(object):
"""
State of snippet table parsing.
"""
def __init__(self, snippet_str, cell_list, row_index):
"""
Initialization with the contents of the first snippet string.
"""
self.snippet_count = 0
self.snippet_chain = None
self.snippet_str = snippet_str
self.row_index = row_index
self.row_list = [cell_list]
self.d0 = []
self.d1 = [0.999 * i for i in range(len(self.snippet_str) + 1)]
self.word_list = []
self.word_str = []
self.total_value = 0
self.snippet_value = 0
def process_row(
self,
row_str,
cell_list,
row_index,
__debug_flag__ = False):
"""
Processing another data string, splitting into a state when it's a word string and another state
when it's a new snippet string.
"""
# First, assuming that this data string is the next snippet string.
if row_str:
copy = State(row_str, cell_list, row_index)
copy.snippet_chain = (
(tuple(self.row_list), self.row_index),
self.snippet_chain)
copy.snippet_count = self.snippet_count + 1
copy.total_value = self.total_value + self.d1[-1]
yield copy
# Second, assuming that this data string is a word string.
len_prev = len(self.word_str)
self.word_list.append(row_str)
self.word_str += row_str
self.row_list.append(cell_list)
# Updating Levenshtein alignment of snippet words to the snippet string.
for i in range(len(row_str)):
self.d0 = self.d1
self.d1 = [len_prev + i + 1]
for j in range(len(self.snippet_str)):
# Matching current characters of the snippet string and the word string.
s_cost = 0 if self.snippet_str[j][0] == row_str[i][0] else 1
if s_cost and (self.snippet_str[j][1] or row_str[i][1]):
s_cost = 0.001
s_value = self.d0[j] + s_cost
# Skipping current character either from the snippet string or from the word string.
d_value = self.d1[j] + (0.000999 if self.snippet_str[j][1] else 0.999)
i_value = self.d0[j + 1] + (0.001 if row_str[i][1] else 1)
self.d1.append(min(s_value, d_value, i_value))
# Showing debug info, if required.
if __debug_flag__:
log.debug((
format_match_string(self.snippet_str[:j]),
format_match_string(self.word_str[:len_prev + i]),
self.d0[j],
self.snippet_str[j][0],
row_str[i][0],
round(s_value, 6)))
log.debug((
format_match_string(self.snippet_str[:j]),
format_match_string(self.word_str[:len_prev + i + 1]),
self.d1[j],
self.snippet_str[j][0],
round(d_value, 6)))
log.debug((
format_match_string(self.snippet_str[:j + 1]),
format_match_string(self.word_str[:len_prev + i]),
self.d0[j + 1],
row_str[i][0],
round(i_value, 6)))
log.debug((
format_match_string(self.snippet_str[:j + 1]),
format_match_string(self.word_str[:len_prev + i + 1]),
round(min(s_value, d_value, i_value), 6)))
log.debug(self.d1)
# Updating alignment value.
if len(self.word_str) <= 0:
self.snippet_value = 0
elif len(self.word_str) > len(self.snippet_str):
self.snippet_value = self.d1[-1]
else:
self.snippet_value = min(
self.d1[len(self.word_str) : 2 * len(self.word_str)])
yield self
def beam_search_step(
state_list,
cell_str,
cell_list,
row_index,
beam_width,
__debug_beam_flag__ = False):
"""
Another step of alignment beam search.
"""
if not state_list:
return [State(
cell_str, cell_list, row_index)]
# Sorting parsing states by the snippet they are parsing.
state_dict = {}
for state in state_list:
for state_after in state.process_row(
cell_str, cell_list, row_index):
index = state_after.row_index
# Leaving only states with the best snippet histories.
if (index not in state_dict or
state_after.total_value < state_dict[index][0]):
state_dict[index] = (state_after.total_value, [state_after])
elif state_after.total_value == state_dict[index][0]:
state_dict[index][1].append(state_after)
state_list = []
for value, state_after_list in state_dict.values():
state_list.extend(state_after_list)
# Showing snippet alignment beam search state, if required.
if __debug_beam_flag__:
log.debug('\n' +
pprint.pformat([(
round(state.total_value + state.snippet_value, 6),
state.snippet_count,
format_match_string(state.snippet_str),
'|'.join(
format_match_string(word_str)
for word_str in state.word_list))
for state in state_list],
width = 384))
# Leaving only a number of best states.
state_list.sort(key = lambda state:
(state.total_value + state.snippet_value, state.snippet_count))
return state_list[:beam_width]
def parse_table(
row_list,
limit = None,
__debug_beam_flag__ = False):
"""
Tries to parse snippet data represented as a table.
"""
# Removing any snippet alignment marks, if we have any.
for cell_list in row_list:
for i in range(len(cell_list)):
match = re.match(r'\(__\d+__\)\s*', cell_list[i])
if match:
cell_list[i] = cell_list[i][match.end():]
state_list = []
beam_width = 32
# Going through snippet data.
for row_index, cell_list in enumerate(row_list[1:], 1):
if limit and row_index > limit:
break
if not any(cell_list[:3]):
continue
cell_str = (
prepare_match_string(
cell_list[0].lower()))
# Updating alignment search on another row.
state_list = (
beam_search_step(
state_list,
cell_str,
cell_list,
row_index,
beam_width,
__debug_beam_flag__))
# Returning final parsing search state.
return state_list
def parse_by_paragraphs(
row_list,
limit = None,
__debug_flag__ = False,
__debug_beam_flag__ = False):
"""
Tries to parse snippet data with paragraph separation inside table cells.
"""
# Splitting row texts by paragraphs.
line_row_list = []
line_row_count = 0
for cell_list in row_list[1:]:
if limit and line_row_count >= limit:
break
paragraph_list_list = [
re.split(r'[^\S\n]*\n\s*', text)
for text in cell_list]
how_many = max(
len(paragraph_list)
for paragraph_list in paragraph_list_list[:3])
# Iterating over aligned paragraphs in adjacent cells.
line_rank_list = []
for i in range(how_many):
line_cell_list = []
for paragraph_list in paragraph_list_list:
if i < len(paragraph_list):
# Removing snippet alignment mark, if there is one present.
cell_str = paragraph_list[i]
match = re.match(r'\(__\d+__\)\s*', cell_str)
line_cell_list.append(
cell_str[match.end():] if match else
cell_str)
else:
line_cell_list.append('')
# Another line row, if it is non-empty.
if any(line_cell_list):
line_rank_list.append(line_cell_list)
line_row_count += 1
if limit and line_row_count >= limit:
break
line_row_list.append(line_rank_list)
# Showing what we have, if required.
if __debug_flag__:
log.debug(
'\nrow_list:\n{0}'.format(
pprint.pformat(
row_list, width = 196)))
state_list = []
beam_width = 32
line_row_count = 0
# Going through snippet data.
for row_index, line_rank_list in enumerate(line_row_list):
if limit and line_row_count >= limit:
break
for line_index, line_cell_list in enumerate(line_rank_list):
line_cell_str = (
prepare_match_string(
line_cell_list[0].lower()))
# Updating alignment search on another row.
state_list = (
beam_search_step(
state_list,
line_cell_str,
line_cell_list,
(row_index, line_index),
beam_width,
__debug_beam_flag__))
# Returning final parsing search state.
return state_list
def main_import(args):
"""
Test import of 5-tier data from a Docx file.
"""
opt_list, arg_list = (
getopt.gnu_getopt(args, '', [
'all-tables',
'check-docx-file=',
'check-file=',
'debug',
'debug-beam',
'debug-eaf',
'eaf-file=',
'limit=',
'modify-docx-file',
'no-db',
'separate-by-paragraphs']))
opt_dict = dict(opt_list)
# Parsing command-line options.
docx_path = arg_list[0]
check_file_path = opt_dict.get('--check-file')
check_docx_file_path = opt_dict.get('--check-docx-file')
eaf_file_path = opt_dict.get('--eaf-file')
limit = (
ast.literal_eval(opt_dict['--limit'])
if '--limit' in opt_dict else None)
modify_docx_flag = '--modify-docx-file' in opt_dict
separate_by_paragraphs_flag = '--separate-by-paragraphs' in opt_dict
__debug_flag__ = '--debug' in opt_dict
__debug_beam_flag__ = '--debug-beam' in opt_dict
__debug_eaf_flag__ = '--debug-eaf' in opt_dict
# Processing specified Docx file.
log.debug(
'\ndocx_path: {0}'.format(docx_path))
document = docx.Document(docx_path)
if len(document.tables) <= 0:
raise NotImplementedError
# Accessing info of the first table, or all tables, depending on the options.
#
# Counting only unique cells because apparently some .docx documents can have repeating cells in their
# structure.
row_list = []
table_list = (
document.tables if '--all-tables' in opt_dict else
document.tables[:1])
for table_index, table in enumerate(table_list):
column_count = len(set(table.rows[0].cells))
row_count = len(set(table.columns[0].cells))
table_cell_list = list(table._cells)
source_cell_list = []
source_cell_set = set()
for cell in table_cell_list:
if cell not in source_cell_set:
source_cell_list.append(cell)
source_cell_set.add(cell)
# Checking for non-uniform rows / columns.
if len(source_cell_list) != column_count * row_count:
log.error(
'\nTable ({}): rows and / or columns are uneven, '
'{} rows, {} columns, {3} != {1} * {2} cells.'.format(
table_index,
row_count,
column_count,
len(source_cell_list)))
raise NotImplementedError
row_list.extend(
[cell.text
for cell in source_cell_list[
i * column_count : (i + 1) * column_count]]
for i in range(row_count))
log.debug(
'\ntable ({}): {} columns, {} rows, {} cells'.format(
table_index,
column_count,
row_count,
len(source_cell_list)))
# Processing this info.
header_list = row_list[0]
log.debug(
'\nheader: {0}'.format(header_list))
if separate_by_paragraphs_flag:
state_list = parse_by_paragraphs(
row_list,
limit,
__debug_flag__,
__debug_beam_flag__)
else:
state_list = parse_table(
row_list,
limit,
__debug_beam_flag__)
# Showing final alignment search state, if required.
if __debug_beam_flag__:
log.debug('\n' +
pprint.pformat([(
round(state.total_value + state.snippet_value, 6),
state.snippet_count,
format_match_string(state.snippet_str),
'|'.join(
format_match_string(word_str)
for word_str in state.word_list))
for state in state_list],
width = 384))
# Getting all parsed snippets, if we need them.
if (eaf_file_path is not None or
check_file_path is not None or
check_docx_file_path is not None or
modify_docx_flag):
if not state_list:
log.debug('\nno data')
return
best_state = state_list[0]
snippet_chain = (
(tuple(best_state.row_list), best_state.row_index),
best_state.snippet_chain)
snippet_list = []
# Compiling snippet list, showing it, if required.
while snippet_chain is not None:
(row_tuple, row_index), snippet_chain = snippet_chain
snippet_list.append((list(row_tuple), row_index))
snippet_list.reverse()
if __debug_flag__:
log.debug(
'\nsnippet_list:\n{0}'.format(
pprint.pformat(
snippet_list, width = 196)))
# Saving parsed alignment, if required.
if check_file_path is not None:
with open(
check_file_path, 'w', encoding = 'utf-8') as check_file:
check_file.write('\n')
# Showing each parsed snippet.
for i, (snippet_value_list, snippet_value_index) in enumerate(snippet_list):
check_file.write(
'{0}\n'.format(i + 1))
value = snippet_value_list[0]
check_file.write(
(value if isinstance(value, str) else value[0]) + '\n')
for value in snippet_value_list[1:]:
check_file.write(' ' +
(value if isinstance(value, str) else value[0]) + '\n')
check_file.write('\n')
# Saving parsing alignment as Docx file, if required.
if check_docx_file_path is not None:
if separate_by_paragraphs_flag:
raise NotImplementedError
check_docx = docx.Document()
check_table = check_docx.add_table(
rows = row_count - 1 + len(snippet_list),
cols = 3)
table_cell_list = check_table._cells
table_cell_index = 0
# Exporting all parsed snippets with their numbers.
for i, (snippet_row_list, snippet_row_index) in enumerate(snippet_list):
table_cell_list[table_cell_index].text = '{0}'.format(i + 1)
table_cell_index += 3
for cell_list in snippet_row_list:
for table_cell, snippet_cell in zip(
table_cell_list[table_cell_index : table_cell_index + 3],
cell_list):
table_cell.text = snippet_cell
table_cell_index += 3
check_docx.save(check_docx_file_path)
# Saving parsed snippets as the standard 5-tier EAF structure.
if eaf_file_path is not None:
log.debug('\n' + pprint.pformat(snippet_list, width = 196))
eaf = pympi.Elan.Eaf()
eaf.add_linguistic_type('text_top_level')
eaf.add_linguistic_type('symbolic_association', 'Symbolic_Association', False)
eaf.add_linguistic_type('word_translation_included_in', 'Included_In')
eaf.remove_linguistic_type('default-lt')
# Showing linguistic types info, if required.
if __debug_eaf_flag__:
log.debug(
'\nget_linguistic_type_names(): {0}'.format(eaf.get_linguistic_type_names()))
log.debug(''.join(
'\nget_parameters_for_linguistic_type({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_linguistic_type(name))
for name in eaf.get_linguistic_type_names()))
eaf.add_tier('text', 'text_top_level')
eaf.add_tier('other text', 'symbolic_association', 'text')
eaf.add_tier('literary translation', 'symbolic_association', 'text')
eaf.add_tier('translation', 'word_translation_included_in', 'text')
eaf.add_tier('transcription', 'symbolic_association', 'translation')
eaf.add_tier('word', 'symbolic_association', 'translation')
eaf.remove_tier('default')
# Showing tier info, if required.
if __debug_eaf_flag__:
log.debug(
'\nget_tier_names(): {0}'.format(eaf.get_tier_names()))
log.debug(''.join(
'\nget_parameters_for_tier({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_tier(name))
for name in eaf.get_tier_names()))
# Compiling annotation data.
step = 75
position = step
for snippet_value_list, snippet_value_index in snippet_list:
# Snippet base texts.
text, text_other, text_translation = snippet_value_list[0]
duration = len(text) * step
eaf.add_annotation(
'text', position, position + duration, text)
eaf.add_ref_annotation(
'other text', 'text', position, text_other)
eaf.add_ref_annotation(
'literary translation', 'text', position, text_translation)
# Preparing to create annotations for snippet words.
translation_position = position
translation_length = (
sum(len(text_list[0] or text_list[2] or text_list[1])
for text_list in snippet_value_list[1:]) +
len(snippet_value_list) - 2)
translation_position = position
translation_step = duration // translation_length
# Snippet words.
for text_list in snippet_value_list[1:]:
word, word_other, translation = text_list
translation_duration = (
round(
max(len(word or translation or word_other), 1) *
translation_step))
eaf.add_annotation(
'translation',
translation_position,
translation_position + translation_duration,
translation)
eaf.add_ref_annotation(
'transcription', 'translation', translation_position, word_other)
eaf.add_ref_annotation(
'word', 'translation', translation_position, word)
translation_position += (
translation_duration + translation_step)
# Ready to go to the next snippet.
position += duration + step
# Showing annotation info, if required.
if __debug_eaf_flag__:
log.debug(''.join(
'\nget_annotation_data_for_tier({0}):\n{1}'.format(
repr(name),
eaf.get_annotation_data_for_tier(name)[:4])
for name in eaf.get_tier_names()))
eaf.header['TIME_UNITS'] = 'milliseconds'
eaf.to_file(eaf_file_path)
# Modifying source Docx file with alignment marks, if required.
if modify_docx_flag:
if not separate_by_paragraphs_flag:
for i, (snippet_row_list, snippet_row_index) in enumerate(snippet_list):
mark_str = '(__{0}__)\n'.format(i + 1)
cell_index = snippet_row_index * column_count
for j, cell in enumerate(
source_cell_list[cell_index : cell_index + 3]):
# Right now can't do something like
#
# cell.paragraphs[0].insert_paragraph_before(mark_str),
#
# because, if there is a mark there already, we should delete it, and tracking this
# deletion across all possible paragraphs and runs in the cell is too high complexity.
cell.text = mark_str + snippet_row_list[0][j]
document.save(docx_path)
# When tables are separated by paragraphs.
else:
snippet_index = 0
snippet_row_index, snippet_rank_index = snippet_list[snippet_index][1]
for row_index, cell_list in enumerate(row_list[1:]):
# Along the lines of data extraction from such tables, see 'parse_by_paragraphs()' function.
paragraph_list_list = [
re.split(r'([^\S\n]*\n\s*)', text)
for text in cell_list]
for i, paragraph_list in enumerate(paragraph_list_list):
paragraph_list.append('')
paragraph_list_list[i] = list(
zip(paragraph_list[::2], paragraph_list[1::2]))
line_list_list = [[]
for text in cell_list]
how_many = max(
len(paragraph_list)
for paragraph_list in paragraph_list_list[:3])
# Iterating over aligned paragraphs in adjacent cells.
line_rank_count = 0
for i in range(how_many):
line_cell_list = []
if (snippet_index is not None and
row_index == snippet_row_index and
line_rank_count == snippet_rank_index):
mark_str = '(__{0}__)\n'.format(snippet_index + 1)
for line_list in line_list_list:
line_list.append(mark_str)
# Next snippet.
snippet_index += 1
if snippet_index >= len(snippet_list):
snippet_index = None
else:
snippet_row_index, snippet_rank_index = (
snippet_list[snippet_index][1])
for paragraph_list, line_list in zip(
paragraph_list_list, line_list_list):
if i < len(paragraph_list):
# Removing previous snippet alignment mark, if there is one present.
cell_str, separator_str = paragraph_list[i]
match = re.match(r'\(__\d+__\)\s*', cell_str)
if match:
cell_str = cell_str[match.end():]
line_cell_list.append(cell_str)
line_list.append(cell_str + separator_str)
# Another line row, if it is non-empty.
if any(line_cell_list):
line_rank_count += 1
else:
for line_list in line_list_list:
line_list.pop()
# Replacing contents of another table cell.
cell_index = (row_index + 1) * column_count
for cell, line_list in zip(
source_cell_list[cell_index : cell_index + 3],
line_list_list):
match = re.fullmatch(
r'(.*?)[^\S\n]*\n[^\S\n]*', line_list[0], re.DOTALL)
cell.text = match.group(1) if match else line_list[0]
# Splitting text into distinct paragraphs because otherwise at least LibreOffice writer
# starts to take too much time to process resulting documents.
for line in line_list[1:]:
match = re.fullmatch(
r'(.*?)[^\S\n]*\n[^\S\n]*', line, re.DOTALL)
cell.add_paragraph(
match.group(1) if match else line, 'Normal')
# Saving Docx file updates.
document.save(docx_path)
def main_eaf(args):
"""
Showing structure of a specified Eaf file.
"""
for eaf_path in args:
log.debug(
'\neaf_path: {0}'.format(eaf_path))
eaf = pympi.Elan.Eaf(eaf_path)
log.debug(
'\nget_controlled_vocabulary_names(): {0}'.format(eaf.get_controlled_vocabulary_names()))
log.debug(
'\nget_external_ref_names(): {0}'.format(eaf.get_external_ref_names()))
log.debug(
'\nget_languages(): {0}'.format(eaf.get_languages()))
log.debug(
'\nget_lexicon_ref_names(): {0}'.format(eaf.get_lexicon_ref_names()))
log.debug(
'\nget_licenses(): {0}'.format(eaf.get_licenses()))
log.debug(
'\nget_linguistic_type_names(): {0}'.format(eaf.get_linguistic_type_names()))
log.debug(
'\nget_linked_files(): {0}'.format(eaf.get_linked_files()))
log.debug(
'\nget_locales(): {0}'.format(eaf.get_locales()))
log.debug(
'\nget_properties(): {0}'.format(eaf.get_properties()))
log.debug(
'\nget_secondary_linked_files(): {0}'.format(eaf.get_secondary_linked_files()))
log.debug(
'\nget_tier_names(): {0}'.format(eaf.get_tier_names()))
log.debug('\n' +
pprint.pformat(eaf.linguistic_types, width = 196))
# L-type and tier parameters.
log.debug(''.join(
'\nget_parameters_for_linguistic_type({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_linguistic_type(name))
for name in eaf.get_linguistic_type_names()))
log.debug(''.join(
'\nget_tier_ids_for_linguistic_type({0}): {1}'.format(
repr(name),
eaf.get_tier_ids_for_linguistic_type(name))
for name in eaf.get_linguistic_type_names()))
log.debug(''.join(
'\nget_parameters_for_tier({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_tier(name))
for name in eaf.get_tier_names()))
# Select annotations.
log.debug(''.join(
'\nget_annotation_data_for_tier({0}):\n{1}'.format(
repr(name),
eaf.get_annotation_data_for_tier(name)[:4])
for name in eaf.get_tier_names()))
# Average time interval per character.
total_duration = 0
total_length = 0
for name in eaf.get_tier_names():
tier_duration = 0
tier_length = 0
for annotation in eaf.get_annotation_data_for_tier(name):
begin, end, text = annotation[:3]
tier_duration += end - begin
tier_length += len(text)
log.debug(
'\ntier {0}: {1:.3f} / {2} -> {3:.3f}'.format(
repr(name),
tier_duration / 1000.0,
tier_length,
tier_duration / (tier_length * 1000)))
total_duration += tier_duration
total_length += tier_length
log.debug(
'\ntotal: {0:.3f} / {1} -> {2:.3f}'.format(
total_duration / 1000.0,
total_length,
total_duration / (total_length * 1000)))
# If we are being run as a script.
if __name__ == '__main__':
if (len(sys.argv) > 1 and
sys.argv[1] == '-config'):
# We have a configuration file; initializing DB, if required, and logging.
config_path = sys.argv[2]
if sys.argv[3] != '-no-db':
pyramid_env = paster.bootstrap(config_path)
arg_list = sys.argv[3:]
else:
arg_list = sys.argv[4:]
paster.setup_logging(config_path)
log = logging.getLogger(__name__)
else:
# No config file, so just logging to stdout.
arg_list = sys.argv[1:]
log_root = logging.getLogger()
log_root.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setLevel(logging.DEBUG)
log_formatter = (
logging.Formatter(
'%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] '
'%(pathname)s:%(lineno)d: %(message)s'))
log_handler.setFormatter(log_formatter)
log_root.addHandler(log_handler)
log = logging.getLogger(__name__)
# Doing what we need.
if len(arg_list) <= 0:
log.info(
'\nPlease specify a command to execute.')
elif arg_list[0] == 'import':
main_import(arg_list[1:])
elif arg_list[0] == 'eaf':
main_eaf(arg_list[1:])
else:
log.warn(
'\nUnknown command \'{0}\'.'.format(arg_list[0]))
| 35,663 | 11,000 |
class Auth:
@staticmethod
def opencup():
return {'login': '', 'password' : ''}
@staticmethod
def yandexcontest():
return {'login': '', 'password' : ''}
@staticmethod
def atcoder():
return {'login': '', 'password' : ''}
@staticmethod
def hackerrank():
return {'login': '', 'password' : ''} | 355 | 108 |
from .configuration import Configuration
import vizivault
import openpyxl
def validate_columns(headers, column_config):
if type(column_config) is str:
if column_config not in headers:
raise TypeError("Attempting to read from nonexistent column", column_config)
else:
for key, value in column_config.items():
validate_columns(headers, value)
def validate_all_columns(headers, attributes):
for attribute in attributes:
validate_columns(headers, attribute['columns'])
def get_primitive(attribute_schema, value):
if attribute_schema == "int":
return int(value)
elif attribute_schema == 'boolean':
return bool(value)
elif attribute_schema == 'float':
return float(value)
return str(value)
def assemble_value(attribute_columns, attribute_schema, header_map, cells):
if type(attribute_columns) is str:
return get_primitive(attribute_schema, cells[header_map[attribute_columns]].value)
else:
return {column : assemble_value(attribute_columns[column], attribute_schema[column], header_map, cells) for column in attribute_columns}
def load_excel(file_path: str, conf_path: str,
url: str,
api_key: str,
encryption_key_file: str,
decryption_key_file: str):
configuration = Configuration(conf_path)
with open(encryption_key_file, 'r') as encryption_file:
encryption_key = encryption_file.read()
with open(decryption_key_file, 'r') as decryption_file:
decryption_key = decryption_file.read()
vault = vizivault.ViziVault(base_url=url, api_key=api_key, encryption_key=encryption_key, decryption_key=decryption_key)
for attribute in configuration.attributes:
attribute_def = vizivault.AttributeDefinition(**{k:v for k, v in attribute.items() if k not in {'columns'}})
vault.store_attribute_definition(attribute_definition=attribute_def)
#TODO Load in parallel and validate data types based on primitive schemas.
# Could potentially also check if user exists (update) or will be created (insertion)
workbook = openpyxl.load_workbook(file_path)
for sheet in workbook.worksheets:
# use next(sheet.rows) to get the first row of the spreadsheet
header_map = {cell.value : i for i, cell in enumerate(next(sheet.rows))}
validate_all_columns(header_map.keys(), configuration.attributes)
count = 0
for row_cells in sheet.iter_rows(min_row=2):
userid = row_cells[header_map[configuration.user_id_column]].value
if userid is None:
break
new_user = vizivault.User(str(userid))
valid = True
for attribute in configuration.attributes:
try:
new_user.add_attribute(attribute=attribute['name'], value=assemble_value(attribute['columns'], attribute['schema'], header_map, row_cells))
except ValueError as error:
valid = False
print("Invalid value for attribute %s (%s)" % (attribute['name'], " ".join(error.args)))
if valid:
vault.save(new_user)
count += 1
print("Loaded user %s" % userid)
else:
print("User %s has invalid data; skipping" % userid)
print("Loaded data for %d users" % count)
#TODO Export the result of the upload as a log file and STDIO. Export shoudl be inserts/updates and errors or warnings.
| 3,580 | 980 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe, random, erpnext
from frappe.desk import query_report
from erpnext.stock.stock_ledger import NegativeStockError
from erpnext.stock.doctype.serial_no.serial_no import SerialNoRequiredError, SerialNoQtyError
from erpnext.stock.doctype.batch.batch import UnableToSelectBatchError
from erpnext.stock.doctype.delivery_note.delivery_note import make_sales_return
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_return
def work():
frappe.set_user(frappe.db.get_global('demo_manufacturing_user'))
make_purchase_receipt()
make_delivery_note()
make_stock_reconciliation()
submit_draft_stock_entries()
make_sales_return_records()
make_purchase_return_records()
def make_purchase_receipt():
if random.random() < 0.6:
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_receipt
report = "Purchase Order Items To Be Received"
po_list =list(set([r[0] for r in query_report.run(report)["result"] if r[0]!="Total"]))[:random.randint(1, 10)]
for po in po_list:
pr = frappe.get_doc(make_purchase_receipt(po))
if pr.is_subcontracted=="Yes":
pr.supplier_warehouse = "Supplier - WPL"
pr.posting_date = frappe.flags.current_date
pr.insert()
try:
pr.submit()
except NegativeStockError:
print('Negative stock for {0}'.format(po))
pass
frappe.db.commit()
def make_delivery_note():
# make purchase requests
# make delivery notes (if possible)
if random.random() < 0.6:
from erpnext.selling.doctype.sales_order.sales_order import make_delivery_note
report = "Ordered Items To Be Delivered"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 3)]:
dn = frappe.get_doc(make_delivery_note(so))
dn.posting_date = frappe.flags.current_date
for d in dn.get("items"):
if not d.expense_account:
d.expense_account = ("Cost of Goods Sold - {0}".format(
frappe.get_cached_value('Company', dn.company, 'abbr')))
try:
dn.insert()
dn.submit()
frappe.db.commit()
except (NegativeStockError, SerialNoRequiredError, SerialNoQtyError, UnableToSelectBatchError):
frappe.db.rollback()
def make_stock_reconciliation():
# random set some items as damaged
from erpnext.stock.doctype.stock_reconciliation.stock_reconciliation \
import OpeningEntryAccountError, EmptyStockReconciliationItemsError
if random.random() < 0.4:
stock_reco = frappe.new_doc("Stock Reconciliation")
stock_reco.posting_date = frappe.flags.current_date
stock_reco.company = erpnext.get_default_company()
stock_reco.get_items_for("Stores - WPL")
if stock_reco.items:
for item in stock_reco.items:
if item.qty:
item.qty = item.qty - round(random.randint(1, item.qty))
try:
stock_reco.insert(ignore_permissions=True, ignore_mandatory=True)
stock_reco.submit()
frappe.db.commit()
except OpeningEntryAccountError:
frappe.db.rollback()
except EmptyStockReconciliationItemsError:
frappe.db.rollback()
def submit_draft_stock_entries():
from erpnext.stock.doctype.stock_entry.stock_entry import IncorrectValuationRateError, \
DuplicateEntryForWorkOrderError, OperationsNotCompleteError
# try posting older drafts (if exists)
frappe.db.commit()
for st in frappe.db.get_values("Stock Entry", {"docstatus":0}, "name"):
try:
ste = frappe.get_doc("Stock Entry", st[0])
ste.posting_date = frappe.flags.current_date
ste.save()
ste.submit()
frappe.db.commit()
except (NegativeStockError, IncorrectValuationRateError, DuplicateEntryForWorkOrderError,
OperationsNotCompleteError):
frappe.db.rollback()
def make_sales_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Delivery Note', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
dn = make_sales_return(data.name)
dn.insert()
dn.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()
def make_purchase_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Purchase Receipt', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
pr = make_purchase_return(data.name)
pr.insert()
pr.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()
| 4,509 | 1,823 |
#
# @file dot2txt.py
# @author Chirag Jain <cjain7@gatech.edu>
# @brief convert dot formatted compressed DBG from splitMEM to our .txt format
# note: splitMEM.cc was modified to print vertex DNA labels as well
# (set OPT_DisplaySeq=1 and OPT_SeqToDisplay= <int>::max() )
# note: After reading code in splitMEM, we realized that it replaces each 'N'
# character with 'A', and it builds the concatenated string by merging
# adjacent fasta records while putting a 'N' character in between, and
# at the very end
# @usage python dot2txt.py "MEM or kmer size" "dot file" > ".txt file"
import sys
import re
k=int(sys.argv[1])
dotFile=sys.argv[2]
vertexLabels = []
edges = []
#######
# Read all vertex labels and directed edges
######
with open(dotFile) as fp:
currentVertexId = -1
for line in fp:
tokens = line.split()
if len(tokens) == 2: # must be a vertex label
currentVertexId += 1
assert int(tokens[0]) == currentVertexId
label = re.sub(r'[^ACGTN\$]', '', tokens[1]) #get rid of ambiguous character
label = re.sub(r'[\$]', 'N', label) # get rid of ambiguous characters
vertexLabels.append(label)
elif len(tokens) == 3:
if tokens[1] == "->": # must be an edge
assert int(tokens[0]) == currentVertexId
edges.append((int(tokens[0]), int(tokens[2])))
#Remove duplicates
edges = list(dict.fromkeys(edges))
#####
# Remove overlapping prefix from selected vertices
# Algorithm: if a vertex has in-degree > 0, then
# remove first (k-1) characters from its label
#####
trimVertexLabel = [0] * len(vertexLabels)
for (u,v) in edges:
assert u >= 0
assert u < len(vertexLabels)
assert v >= 0
assert v < len(vertexLabels)
trimVertexLabel[v] = 1
for i in range(0, len(vertexLabels)):
if trimVertexLabel[i] == 1:
vertexLabels[i] = vertexLabels[i][k-1:]
#####
# Compute out-neighbors of each vertex
####
out_index = {}
for (u,v) in edges:
out_index[u] = []
for (u,v) in edges:
out_index[u].append(v)
####
# Finally print the contents
####
print len(vertexLabels)
for u in range(0, len(vertexLabels)):
if u in out_index: #print out-neighbors
for v in out_index[u]:
print v,
assert len(vertexLabels[u]) > 0
print vertexLabels[u] #print label
| 2,376 | 831 |
from django import template
register = template.Library()
@register.inclusion_tag("common/paginator.html")
def paginator(items):
adjacent_pages = 4
num_pages = items.paginator.num_pages
page = items.number
start_page = max(page - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
end_page = page + adjacent_pages + 1
if end_page >= num_pages - 1:
end_page = num_pages + 1
page_numbers = [n for n in range(start_page, end_page) if 0 < n <= num_pages]
return {
"items": items,
"page_numbers": page_numbers,
"show_first": 1 not in page_numbers,
"show_last": num_pages not in page_numbers,
"num_pages": num_pages,
}
| 720 | 249 |
"""
Definition of prior distribution
"""
import numpy as np
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from .utils import LogBase
from .dist import Normal
PRIOR_TYPE_NONSPATIAL = "N"
PRIOR_TYPE_SPATIAL_MRF = "M"
def get_prior(param, data_model, **kwargs):
"""
Factory method to return a vertexwise prior
"""
prior = None
if isinstance(param.prior_dist, Normal):
if param.prior_type == "N":
prior = NormalPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "M":
prior = MRFSpatialPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "M2":
prior = MRF2SpatialPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "Mfab":
prior = FabberMRFSpatialPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "A":
prior = ARDPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
if prior is not None:
return prior
else:
raise ValueError("Can't create prior type %s for distribution %s - unrecognized combination" % (param.prior_type, param.prior_dist))
class Prior(LogBase):
"""
Base class for a prior, defining methods that must be implemented
"""
def mean_log_pdf(self, samples):
"""
:param samples: A tensor of shape [W, P, S] where W is the number
of parameter vertices, P is the number of parameters in the prior
(possibly 1) and S is the number of samples
:return: A tensor of shape [W] where W is the number of parameter vertices
containing the mean log PDF of the parameter samples
provided
"""
raise NotImplementedError()
def log_det_cov(self):
raise NotImplementedError()
class NormalPrior(Prior):
"""
Prior based on a vertexwise univariate normal distribution
"""
def __init__(self, nvertices, mean, var, **kwargs):
"""
:param mean: Prior mean value
:param var: Prior variance
"""
Prior.__init__(self)
self.name = kwargs.get("name", "NormalPrior")
self.nvertices = nvertices
self.scalar_mean = mean
self.scalar_var = var
self.mean = tf.fill([nvertices], mean, name="%s_mean" % self.name)
self.var = tf.fill([nvertices], var, name="%s_var" % self.name)
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
def mean_log_pdf(self, samples):
"""
Mean log PDF for normal distribution
Note that ``term1`` is a constant offset when the prior variance is fixed and hence
in earlier versions of the code this was neglected, along with other constant offsets
such as factors of pi. However when this code is inherited by spatial priors and ARD
the variance is no longer fixed and this term must be included.
"""
dx = tf.subtract(samples, tf.reshape(self.mean, [self.nvertices, 1, 1])) # [W, 1, N]
z = tf.div(tf.square(dx), tf.reshape(self.var, [self.nvertices, 1, 1])) # [W, 1, N]
term1 = self.log_tf(-0.5*tf.log(tf.reshape(self.var, [self.nvertices, 1, 1])), name="term1")
term2 = self.log_tf(-0.5*z, name="term2")
log_pdf = term1 + term2 # [W, 1, N]
mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
return mean_log_pdf
def __str__(self):
return "Non-spatial prior (%f, %f)" % (self.scalar_mean, self.scalar_var)
class FabberMRFSpatialPrior(NormalPrior):
"""
Prior designed to mimic the 'M' type spatial prior in Fabber.
Note that this uses update equations for ak which is not in the spirit of the stochastic
method. 'Native' SVB MRF spatial priors are also defined which simply treat the spatial
precision parameter as an inference variable.
This code has been verified to generate the same ak estimate given the same input as
Fabber, however in practice it does not optimize to the same value. We don't yet know
why.
"""
def __init__(self, nvertices, mean, var, idx=None, post=None, nn=None, n2=None, **kwargs):
"""
:param mean: Tensor of shape [W] containing the prior mean at each parameter vertex
:param var: Tensor of shape [W] containing the prior variance at each parameter vertex
:param post: Posterior instance
:param nn: Sparse tensor of shape [W, W] containing nearest neighbour lists
:param n2: Sparse tensor of shape [W, W] containing second nearest neighbour lists
"""
NormalPrior.__init__(self, nvertices, mean, var, name="FabberMRFSpatialPrior")
self.idx = idx
# Save the original vertexwise mean and variance - the actual prior mean/var
# will be calculated from these and also the spatial variation in neighbour vertices
self.fixed_mean = self.mean
self.fixed_var = self.var
# nn and n2 are sparse tensors of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B, and similarly for n2 and second nearest neighbours
self.nn = nn
self.n2 = n2
# Set up spatial smoothing parameter calculation from posterior and neighbour lists
self._setup_ak(post, nn, n2)
# Set up prior mean/variance
self._setup_mean_var(post, nn, n2)
def __str__(self):
return "Spatial MRF prior (%f, %f)" % (self.scalar_mean, self.scalar_var)
def _setup_ak(self, post, nn, n2):
# This is the equivalent of CalculateAk in Fabber
#
# Some of this could probably be better done using linalg
# operations but bear in mind this is one parameter only
self.sigmaK = self.log_tf(tf.matrix_diag_part(post.cov)[:, self.idx], name="sigmak") # [W]
self.wK = self.log_tf(post.mean[:, self.idx], name="wk") # [W]
self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
# Sum over vertices of parameter variance multiplied by number of
# nearest neighbours for each vertex
trace_term = self.log_tf(tf.reduce_sum(self.sigmaK * self.num_nn), name="trace") # [1]
# Sum of nearest and next-nearest neighbour mean values
self.sum_means_nn = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.nn, tf.reshape(self.wK, (-1, 1))), (-1,)), name="wksum") # [W]
self.sum_means_n2 = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.n2, tf.reshape(self.wK, (-1, 1))), (-1,)), name="contrib8") # [W]
# vertex parameter mean multipled by number of nearest neighbours
wknn = self.log_tf(self.wK * self.num_nn, name="wknn") # [W]
swk = self.log_tf(wknn - self.sum_means_nn, name="swk") # [W]
term2 = self.log_tf(tf.reduce_sum(swk * self.wK), name="term2") # [1]
gk = 1 / (0.5 * trace_term + 0.5 * term2 + 0.1)
hk = tf.multiply(tf.to_float(self.nvertices), 0.5) + 1.0
self.ak = self.log_tf(tf.identity(gk * hk, name="ak"))
def _setup_mean_var(self, post, nn, n2):
# This is the equivalent of ApplyToMVN in Fabber
contrib_nn = self.log_tf(8*self.sum_means_nn, name="contrib_nn") # [W]
contrib_n2 = self.log_tf(-self.sum_means_n2, name="contrib_n2") # [W]
spatial_mean = self.log_tf(contrib_nn / (8*self.num_nn), name="spatial_mean")
spatial_prec = self.log_tf(self.num_nn * self.ak, name="spatial_prec")
self.var = self.log_tf(1 / (1/self.fixed_var + spatial_prec), name="%s_var" % self.name)
#self.var = self.fixed_var
self.mean = self.log_tf(self.var * spatial_prec * spatial_mean, name="%s_mean" % self.name)
#self.mean = self.fixed_mean + self.ak
class MRFSpatialPrior(Prior):
"""
Prior which performs adaptive spatial regularization based on the
contents of neighbouring vertices using the Markov Random Field method
This uses the same formalism as the Fabber 'M' type spatial prior but treats the ak
as a parameter of the optimization.
"""
def __init__(self, nvertices, mean, var, idx=None, post=None, nn=None, n2=None, **kwargs):
Prior.__init__(self)
self.name = kwargs.get("name", "MRFSpatialPrior")
self.nvertices = nvertices
self.mean = tf.fill([nvertices], mean, name="%s_mean" % self.name)
self.var = tf.fill([nvertices], var, name="%s_var" % self.name)
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
# nn is a sparse tensor of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B
self.nn = nn
# Set up spatial smoothing parameter calculation from posterior and neighbour lists
# We infer the log of ak.
self.logak = tf.Variable(-5.0, name="log_ak", dtype=tf.float32)
self.ak = self.log_tf(tf.exp(self.logak, name="ak"))
def mean_log_pdf(self, samples):
r"""
mean log PDF for the MRF spatial prior.
This is calculating:
:math:`\log P = \frac{1}{2} \log \phi - \frac{\phi}{2}\underline{x^T} D \underline{x}`
"""
samples = tf.reshape(samples, (self.nvertices, -1)) # [W, N]
self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
dx_diag = self.log_tf(tf.reshape(self.num_nn, (self.nvertices, 1)) * samples, name="dx_diag") # [W, N]
dx_offdiag = self.log_tf(tf.sparse_tensor_dense_matmul(self.nn, samples), name="dx_offdiag") # [W, N]
self.dx = self.log_tf(dx_diag - dx_offdiag, name="dx") # [W, N]
self.xdx = self.log_tf(samples * self.dx, name="xdx") # [W, N]
term1 = tf.identity(0.5*self.logak, name="term1")
term2 = tf.identity(-0.5*self.ak*self.xdx, name="term2")
log_pdf = term1 + term2 # [W, N]
mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
# Gamma prior if we care
#q1, q2 = 1, 100
#mean_log_pdf += (q1-1) * self.logak - self.ak / q2
return mean_log_pdf
def __str__(self):
return "MRF spatial prior"
class ARDPrior(NormalPrior):
"""
Automatic Relevance Determination prior
"""
def __init__(self, nvertices, mean, var, **kwargs):
NormalPrior.__init__(self, nvertices, mean, var, **kwargs)
self.name = kwargs.get("name", "ARDPrior")
self.fixed_var = self.var
# Set up inferred precision parameter phi
self.logphi = tf.Variable(tf.log(1/self.fixed_var), name="log_phi", dtype=tf.float32)
self.phi = self.log_tf(tf.exp(self.logphi, name="phi"))
self.var = 1/self.phi
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
def __str__(self):
return "ARD prior"
class MRF2SpatialPrior(Prior):
"""
Prior which performs adaptive spatial regularization based on the
contents of neighbouring vertices using the Markov Random Field method
This uses the same formalism as the Fabber 'M' type spatial prior but treats the ak
as a parameter of the optimization. It differs from MRFSpatialPrior by using the
PDF formulation of the PDF rather than the matrix formulation (the two are equivalent
but currently we keep both around for checking that they really are!)
FIXME currently this does not work unless sample size=1
"""
def __init__(self, nvertices, mean, var, idx=None, post=None, nn=None, n2=None, **kwargs):
Prior.__init__(self)
self.name = kwargs.get("name", "MRF2SpatialPrior")
self.nvertices = nvertices
self.mean = tf.fill([nvertices], mean, name="%s_mean" % self.name)
self.var = tf.fill([nvertices], var, name="%s_var" % self.name)
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
# nn is a sparse tensor of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B
self.nn = nn
# We need the number of samples to implement the log PDF function
self.sample_size = kwargs.get("sample_size", 5)
# Set up spatial smoothing parameter calculation from posterior and neighbour lists
self.logak = tf.Variable(-5.0, name="log_ak", dtype=tf.float32)
self.ak = self.log_tf(tf.exp(self.logak, name="ak"))
def mean_log_pdf(self, samples):
samples = tf.reshape(samples, (self.nvertices, -1)) # [W, N]
self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
expanded_nn = tf.sparse_concat(2, [tf.sparse.reshape(self.nn, (self.nvertices, self.nvertices, 1))] * self.sample_size)
xj = expanded_nn * tf.reshape(samples, (self.nvertices, 1, -1))
#xi = tf.reshape(tf.sparse.to_dense(tf.sparse.reorder(self.nn)), (self.nvertices, self.nvertices, 1)) * tf.reshape(samples, (1, self.nvertices, -1))
xi = expanded_nn * tf.reshape(samples, (1, self.nvertices, -1))
#xi = tf.sparse.transpose(xj, perm=(1, 0, 2))
neg_xi = tf.SparseTensor(xi.indices, -xi.values, dense_shape=xi.dense_shape )
dx2 = tf.square(tf.sparse.add(xj, neg_xi), name="dx2")
sdx = tf.sparse.reduce_sum(dx2, axis=0) # [W, N]
term1 = tf.identity(0.5*self.logak, name="term1")
term2 = tf.identity(-self.ak * sdx / 4, name="term2")
log_pdf = term1 + term2 # [W, N]
mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
return mean_log_pdf
def __str__(self):
return "MRF2 spatial prior"
class ConstantMRFSpatialPrior(NormalPrior):
"""
Prior which performs adaptive spatial regularization based on the
contents of neighbouring vertices using the Markov Random Field method
This is equivalent to the Fabber 'M' type spatial prior
"""
def __init__(self, nvertices, mean, var, idx=None, nn=None, n2=None, **kwargs):
"""
:param mean: Tensor of shape [W] containing the prior mean at each parameter vertex
:param var: Tensor of shape [W] containing the prior variance at each parameter vertex
:param post: Posterior instance
:param nn: Sparse tensor of shape [W, W] containing nearest neighbour lists
:param n2: Sparse tensor of shape [W, W] containing second nearest neighbour lists
"""
NormalPrior.__init__(self, nvertices, mean, var, name="MRFSpatialPrior")
self.idx = idx
# Save the original vertexwise mean and variance - the actual prior mean/var
# will be calculated from these and also the spatial variation in neighbour vertices
self.fixed_mean = self.mean
self.fixed_var = self.var
# nn and n2 are sparse tensors of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B, and similarly for n2 and second nearest neighbours
self.nn = nn
self.n2 = n2
def __str__(self):
return "Spatial MRF prior (%f, %f) - const" % (self.scalar_mean, self.scalar_var)
def update_ak(self, post_mean, post_cov):
# This is the equivalent of CalculateAk in Fabber
#
# Some of this could probably be better done using linalg
# operations but bear in mind this is one parameter only
self.sigmaK = post_cov[:, self.idx, self.idx] # [W]
self.wK = post_mean[:, self.idx] # [W]
self.num_nn = np.sum(self.nn, axis=1) # [W]
# Sum over vertices of parameter variance multiplied by number of
# nearest neighbours for each vertex
trace_term = np.sum(self.sigmaK * self.num_nn) # [1]
# Sum of nearest and next-nearest neighbour mean values
self.sum_means_nn = np.matmul(self.nn, np.reshape(self.wK, (-1, 1))) # [W]
self.sum_means_n2 = np.matmul(self.n2, tf.reshape(self.wK, (-1, 1))) # [W]
# vertex parameter mean multipled by number of nearest neighbours
wknn = self.wK * self.num_nn # [W]
swk = wknn - self.sum_means_nn # [W]
term2 = np.sum(swk * self.wK) # [1]
gk = 1 / (0.5 * trace_term + 0.5 * term2 + 0.1)
hk = float(self.nvertices) * 0.5 + 1.0
self.ak = gk * hk
self.log.info("%s: ak=%f", self.name, self.ak)
def _setup_mean_var(self, post_mean, post_cov):
# This is the equivalent of ApplyToMVN in Fabber
contrib_nn = self.log_tf(8*self.sum_means_nn, name="contrib_nn") # [W]
contrib_n2 = self.log_tf(-self.sum_means_n2, name="contrib_n2") # [W]
spatial_mean = self.log_tf(contrib_nn / (8*self.num_nn), name="spatial_mean")
spatial_prec = self.log_tf(self.num_nn * self.ak, name="spatial_prec")
self.var = self.log_tf(1 / (1/self.fixed_var + spatial_prec), name="%s_var" % self.name)
#self.var = self.fixed_var
self.mean = self.log_tf(self.var * spatial_prec * spatial_mean, name="%s_mean" % self.name)
#self.mean = self.fixed_mean + self.ak
class FactorisedPrior(Prior):
"""
Prior for a collection of parameters where there is no prior covariance
In this case the mean log PDF can be summed from the contributions of each
parameter
"""
def __init__(self, priors, **kwargs):
Prior.__init__(self)
self.priors = priors
self.name = kwargs.get("name", "FactPrior")
self.nparams = len(priors)
means = [prior.mean for prior in self.priors]
variances = [prior.var for prior in self.priors]
self.mean = self.log_tf(tf.stack(means, axis=-1, name="%s_mean" % self.name))
self.var = self.log_tf(tf.stack(variances, axis=-1, name="%s_var" % self.name))
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
self.nvertices = priors[0].nvertices
# Define a diagonal covariance matrix for convenience
self.cov = tf.matrix_diag(self.var, name='%s_cov' % self.name)
def mean_log_pdf(self, samples):
nvertices = tf.shape(samples)[0]
mean_log_pdf = tf.zeros([nvertices], dtype=tf.float32)
for idx, prior in enumerate(self.priors):
param_samples = tf.slice(samples, [0, idx, 0], [-1, 1, -1])
param_logpdf = prior.mean_log_pdf(param_samples)
mean_log_pdf = tf.add(mean_log_pdf, param_logpdf)
return mean_log_pdf
def log_det_cov(self):
"""
Determinant of diagonal matrix is product of diagonal entries
"""
return tf.reduce_sum(tf.log(self.var), axis=1, name='%s_log_det_cov' % self.name)
| 18,784 | 6,346 |
import sys
if sys.version_info >= (3, 8):
from typing import Optional, Literal
else:
from typing import Optional
from typing_extensions import Literal
from pydantic import BaseModel
class NumSctpStreams(BaseModel):
# Initially requested number of outgoing SCTP streams.
OS: int
# Maximum number of incoming SCTP streams.
MIS: int
class SctpCapabilities(BaseModel):
numStreams: NumSctpStreams
class SctpParameters(BaseModel):
# Must always equal 5000.
port: int
# Initially requested number of outgoing SCTP streams.
OS: int
# Maximum number of incoming SCTP streams.
MIS: int
# Maximum allowed size for SCTP messages.
maxMessageSize: int
# SCTP stream parameters describe the reliability of a certain SCTP stream.
# If ordered is True then maxPacketLifeTime and maxRetransmits must be
# False.
# If ordered if False, only one of maxPacketLifeTime or maxRetransmits
# can be True.
class SctpStreamParameters(BaseModel):
# SCTP stream id.
streamId: Optional[int]
# Whether data messages must be received in order. if True the messages will
# be sent reliably. Default True.
ordered: Optional[bool] = True
# When ordered is False indicates the time (in milliseconds) after which a
# SCTP packet will stop being retransmitted.
maxPacketLifeTime: Optional[int]
# When ordered is False indicates the maximum number of times a packet will
# be retransmitted.
maxRetransmits: Optional[int]
# DataChannel priority.
priority: Optional[Literal['very-low','low','medium','high']]
# A label which can be used to distinguish this DataChannel from others.
label: Optional[str]
# Name of the sub-protocol used by this DataChannel.
protocol: Optional[str] | 1,767 | 515 |
from setuptools import setup, find_packages
with open('README.rst') as f:
long_description = f.read()
setup(
name='ocdsextensionsdatacollector',
version='0.0.1',
author='Open Contracting Partnership, Open Data Services',
author_email='data@open-contracting.org',
url='https://github.com/open-contracting/extensions-data-collector',
description='Collects data about OCDS extensions into a machine-readable format',
license='BSD',
packages=find_packages(),
long_description=long_description,
install_requires=[
'Babel',
'ocds-babel>=0.0.3',
'ocdsextensionregistry>=0.0.5',
'polib',
'requests',
'Sphinx==1.5.1',
],
extras_require={
'test': [
'coveralls',
'pytest',
'pytest-cov',
],
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'ocdsextensionsdatacollector = ocdsextensionsdatacollector.cli.__main__:main',
],
},
)
| 1,123 | 362 |
import numpy as np
import matplotlib.pyplot as plt
import cv2
def detect_body(frame):
body_img = frame.copy()
body_classifier = cv2.CascadeClassifier("haarcascade_fullbody.xml")
gray = cv2.cvtColor(body_img, cv2.COLOR_BGR2GRAY)
bodies = body_classifier.detectMultiScale(gray)
for (x, y, w, h) in bodies:
cv2.rectangle(body_img, (x, y), (x+w, y+h), (255, 0, 0), 8)
return body_img
| 423 | 177 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import ctypes.util
import sys
_CACHED_CMDLINE_LENGTH = None
def set_command_line(cmdline):
"""Replaces the commandline of this process as seen by ps."""
# Get the current commandline.
argc = ctypes.c_int()
argv = ctypes.POINTER(ctypes.c_char_p)()
ctypes.pythonapi.Py_GetArgcArgv(ctypes.byref(argc), ctypes.byref(argv))
global _CACHED_CMDLINE_LENGTH
if _CACHED_CMDLINE_LENGTH is None:
# Each argument is terminated by a null-byte, so the length of the whole
# thing in memory is the sum of all the argument byte-lengths, plus 1 null
# byte for each.
_CACHED_CMDLINE_LENGTH = sum(
len(argv[i]) for i in range(0, argc.value)) + argc.value
# Pad the cmdline string to the required length. If it's longer than the
# current commandline, truncate it.
if len(cmdline) >= _CACHED_CMDLINE_LENGTH:
new_cmdline = ctypes.c_char_p(cmdline[:_CACHED_CMDLINE_LENGTH-1] + '\0')
else:
new_cmdline = ctypes.c_char_p(cmdline.ljust(_CACHED_CMDLINE_LENGTH, '\0'))
# Replace the old commandline.
libc = ctypes.CDLL(ctypes.util.find_library('c'))
libc.memcpy(argv.contents, new_cmdline, _CACHED_CMDLINE_LENGTH)
| 1,333 | 502 |
lista = [1,2,3,4,5,6,7,8,9,10]
print(lista)
lista2 = [11, 12, 13]
print(lista2)
lista_completa = lista + lista2
print(lista_completa)
#Ou
listaA = [1,2,3,4,5,6,7,8,9,10]
listaB = [11, 12, 13]
listaA = listaB.copy()
print(listaA)
| 231 | 134 |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.model_selection import StratifiedKFold
from scipy.stats import binom
def discriminatePlot(X, y, cVal, titleStr='', figdir='.', Xcolname = None, plotFig = False, removeTickLabels = False, testInd = None):
# Frederic's Robust Wrapper for discriminant analysis function. Performs lda, qda and RF afer error checking,
# Generates nice plots and returns cross-validated
# performance, stderr and base line.
# X np array n rows x p parameters
# y group labels n rows
# rgb color code for each data point - should be the same for each data beloging to the same group
# titleStr title for plots
# figdir is a directory name (folder name) for figures
# Xcolname is a np.array or list of strings with column names for printout display
# returns: ldaScore, ldaScoreSE, qdaScore, qdaScoreSE, rfScore, rfScoreSE, nClasses
# Global Parameters
CVFOLDS = 10
MINCOUNT = 10
MINCOUNTTRAINING = 5
# figdir = '/Users/frederictheunissen/Documents/Data/Julie/Acoustical Analysis/Figures Voice'
# Initialize Variables and clean up data
classes, classesCount = np.unique(y, return_counts = True) # Classes to be discriminated should be same as ldaMod.classes_
goodIndClasses = np.array([n >= MINCOUNT for n in classesCount])
goodInd = np.array([b in classes[goodIndClasses] for b in y])
if testInd is not None:
# Check for goodInd - should be an np.array of dtype=bool
# Transform testInd into an index inside xGood and yGood
testIndx = testInd.nonzero()[0]
goodIndx = goodInd.nonzero()[0]
testInd = np.hstack([ np.where(goodIndx == testval)[0] for testval in testIndx])
trainInd = np.asarray([i for i in range(len(goodIndx)) if i not in testInd])
yGood = y[goodInd]
XGood = X[goodInd]
cValGood = cVal[goodInd]
classes, classesCount = np.unique(yGood, return_counts = True)
nClasses = classes.size # Number of classes or groups
# Do we have enough data?
if (nClasses < 2):
print ('Error in ldaPLot: Insufficient classes with minimun data (%d) for discrimination analysis' % (MINCOUNT))
return -1, -1, -1, -1 , -1, -1, -1, -1, -1
if testInd is None:
cvFolds = min(min(classesCount), CVFOLDS)
if (cvFolds < CVFOLDS):
print ('Warning in ldaPlot: Cross-validation performed with %d folds (instead of %d)' % (cvFolds, CVFOLDS))
else:
cvFolds = 1
# Data size and color values
nD = XGood.shape[1] # number of features in X
nX = XGood.shape[0] # number of data points in X
cClasses = [] # Color code for each class
for cl in classes:
icl = (yGood == cl).nonzero()[0][0]
cClasses.append(np.append(cValGood[icl],1.0))
cClasses = np.asarray(cClasses)
# Use a uniform prior
myPrior = np.ones(nClasses)*(1.0/nClasses)
# Perform a PCA for dimensionality reduction so that the covariance matrix can be fitted.
nDmax = int(np.fix(np.sqrt(nX//5)))
if nDmax < nD:
print ('Warning: Insufficient data for', nD, 'parameters. PCA projection to', nDmax, 'dimensions.' )
nDmax = min(nD, nDmax)
pca = PCA(n_components=nDmax)
Xr = pca.fit_transform(XGood)
print ('Variance explained is %.2f%%' % (sum(pca.explained_variance_ratio_)*100.0))
# Initialise Classifiers
ldaMod = LDA(n_components = min(nDmax,nClasses-1), priors = myPrior, shrinkage = None, solver = 'svd')
qdaMod = QDA(priors = myPrior)
rfMod = RF() # by default assumes equal weights
# Perform CVFOLDS fold cross-validation to get performance of classifiers.
ldaYes = 0
qdaYes = 0
rfYes = 0
cvCount = 0
if testInd is None:
skf = StratifiedKFold(n_splits = cvFolds)
skfList = skf.split(Xr, yGood)
else:
skfList = [(trainInd,testInd)]
for train, test in skfList:
# Enforce the MINCOUNT in each class for Training
trainClasses, trainCount = np.unique(yGood[train], return_counts=True)
goodIndClasses = np.array([n >= MINCOUNTTRAINING for n in trainCount])
goodIndTrain = np.array([b in trainClasses[goodIndClasses] for b in yGood[train]])
# Specity the training data set, the number of groups and priors
yTrain = yGood[train[goodIndTrain]]
XrTrain = Xr[train[goodIndTrain]]
trainClasses, trainCount = np.unique(yTrain, return_counts=True)
ntrainClasses = trainClasses.size
# Skip this cross-validation fold because of insufficient data
if ntrainClasses < 2:
continue
goodInd = np.array([b in trainClasses for b in yGood[test]])
if (goodInd.size == 0):
continue
# Fit the data
trainPriors = np.ones(ntrainClasses)*(1.0/ntrainClasses)
ldaMod.priors = trainPriors
qdaMod.priors = trainPriors
ldaMod.fit(XrTrain, yTrain)
qdaMod.fit(XrTrain, yTrain)
rfMod.fit(XrTrain, yTrain)
ldaYes += np.around((ldaMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
qdaYes += np.around((qdaMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
rfYes += np.around((rfMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
cvCount += goodInd.size
# Refit with all the data for the plots
ldaMod.priors = myPrior
qdaMod.priors = myPrior
Xrr = ldaMod.fit_transform(Xr, yGood)
# Check labels
for a, b in zip(classes, ldaMod.classes_):
if a != b:
print ('Error in ldaPlot: labels do not match')
# Check the within-group covariance in the rotated space
# covs = []
# for group in classes:
# Xg = Xrr[yGood == group, :]
# covs.append(np.atleast_2d(np.cov(Xg,rowvar=False)))
# withinCov = np.average(covs, axis=0, weights=myPrior)
# Print the five largest coefficients of first 3 DFA
MAXCOMP = 3 # Maximum number of DFA componnents
MAXWEIGHT = 5 # Maximum number of weights printed for each componnent
ncomp = min(MAXCOMP, nClasses-1)
nweight = min(MAXWEIGHT, nD)
# The scalings_ has the eigenvectors of the LDA in columns and the pca.componnents has the eigenvectors of PCA in columns
weights = np.dot(ldaMod.scalings_[:,0:ncomp].T, pca.components_)
print('LDA Weights:')
for ic in range(ncomp):
idmax = np.argsort(np.abs(weights[ic,:]))[::-1]
print('DFA %d: '%ic, end = '')
for iw in range(nweight):
if Xcolname is None:
colstr = 'C%d' % idmax[iw]
else:
colstr = Xcolname[idmax[iw]]
print('%s %.3f; ' % (colstr, float(weights[ic, idmax[iw]]) ), end='')
print()
if plotFig:
dimVal = 0.8 # Overall diming of background so that points can be seen
# Obtain fits in this rotated space for display purposes
ldaMod.fit(Xrr, yGood)
qdaMod.fit(Xrr, yGood)
rfMod.fit(Xrr, yGood)
XrrMean = Xrr.mean(0)
# Make a mesh for plotting
x1, x2 = np.meshgrid(np.arange(-6.0, 6.0, 0.1), np.arange(-6.0, 6.0, 0.1))
xm1 = np.reshape(x1, -1)
xm2 = np.reshape(x2, -1)
nxm = np.size(xm1)
Xm = np.zeros((nxm, Xrr.shape[1]))
Xm[:,0] = xm1
if Xrr.shape[1] > 1 :
Xm[:,1] = xm2
for ix in range(2,Xrr.shape[1]):
Xm[:,ix] = np.squeeze(np.ones((nxm,1)))*XrrMean[ix]
XmcLDA = np.zeros((nxm, 4)) # RGBA values for color for LDA
XmcQDA = np.zeros((nxm, 4)) # RGBA values for color for QDA
XmcRF = np.zeros((nxm, 4)) # RGBA values for color for RF
# Predict values on mesh for plotting based on the first two DFs
yPredLDA = ldaMod.predict_proba(Xm)
yPredQDA = qdaMod.predict_proba(Xm)
yPredRF = rfMod.predict_proba(Xm)
# Transform the predictions in color codes
maxLDA = yPredLDA.max()
for ix in range(nxm) :
cWeight = yPredLDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcLDA[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcLDA[ix,3] = (cWeight.max()/maxLDA)*dimVal
# Plot the surface of probability
plt.figure(facecolor='white', figsize=(10,4))
plt.subplot(131)
Zplot = XmcLDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: LDA %d/%d' % (titleStr, ldaYes, cvCount))
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
# Transform the predictions in color codes
maxQDA = yPredQDA.max()
for ix in range(nxm) :
cWeight = yPredQDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcQDA[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcQDA[ix,3] = (cWeight.max()/maxQDA)*dimVal
# Plot the surface of probability
plt.subplot(132)
Zplot = XmcQDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: QDA %d/%d' % (titleStr, qdaYes, cvCount))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
# Transform the predictions in color codes
maxRF = yPredRF.max()
for ix in range(nxm) :
cWeight = yPredRF[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses # Weighted colors does not work
XmcRF[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcRF[ix,3] = (cWeight.max()/maxRF)*dimVal
# Plot the surface of probability
plt.subplot(133)
Zplot = XmcRF.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: RF %d/%d' % (titleStr, rfYes, cvCount))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
plt.show()
plt.savefig('%s/%s.png' % (figdir,titleStr), format='png', dpi=1000)
# Results
ldaYes = int(ldaYes)
qdaYes = int(qdaYes)
rfYes = int(rfYes)
p = 1.0/nClasses
ldaP = 0
qdaP = 0
rfP = 0
for k in range(ldaYes, cvCount+1):
ldaP += binom.pmf(k, cvCount, p)
for k in range(qdaYes, cvCount+1):
qdaP += binom.pmf(k, cvCount, p)
for k in range(rfYes, cvCount+1):
rfP += binom.pmf(k, cvCount, p)
print ("Number of classes %d. Chance level %.2f %%" % (nClasses, 100.0/nClasses))
print ("%s LDA: %.2f %% (%d/%d p=%.4f)" % (titleStr, 100.0*ldaYes/cvCount, ldaYes, cvCount, ldaP))
print ("%s QDA: %.2f %% (%d/%d p=%.4f)" % (titleStr, 100.0*qdaYes/cvCount, qdaYes, cvCount, qdaP))
print ("%s RF: %.2f %% (%d/%d p=%.4f)" % (titleStr, 100.0*rfYes/cvCount, rfYes, cvCount, rfP))
return ldaYes, qdaYes, rfYes, cvCount, ldaP, qdaP, rfP, nClasses, weights
| 14,240 | 5,129 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.http import *
from django.contrib.auth import *
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.exceptions import APIException
#from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import *
from .serializers import *
from itertools import chain
# Create your views here.
class RecipeDetails(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, pk):
try:
return Recipe.objects.get(pk=pk)
except Recipe.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
recipe = self.get_object(pk)
serializer = RecipeSerializer(recipe)
return Response(serializer.data)
#FALTA TRATAMENTO DE IMAGENS
class RecipeRegister(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def post(self, request, format=None):
serializer = RecipeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDuplicationError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = u'Duplicate user'
class ProfileSignUp(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
#see if User already exists
def get_object(self, data):
try:
retrievedUser = User.objects.filter(username = data)
raise UserDuplicationError()
except User.DoesNotExist:
return True
def post(self, request, format=None):
self.get_object(request.data['email'])
createdUser = User.objects.create_user(request.data['email'], None, request.data['password'])
request.data.pop('email', None)
request.data.pop('password', None)
request.data['user'] = createdUser.pk
serializer = CreateProfileSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfileLogin(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, data):
try:
retrievedUser = authenticate(username=data['email'], password=data['password'])
if retrievedUser is not None:
user = Profile.objects.get(user = retrievedUser)
print("achou usuario" + retrievedUser.username)
return user
else:
print("NAO achou usuario")
raise Http404
except User.DoesNotExist:
raise Http404
def post(self, request, format=None):
print("request body: " +request.data['email'] + " " + request.data['password'])
profile = self.get_object(request.data)
serializer = ProfileSerializer(profile)
return Response(serializer.data)
class MostThreeRecentRecipeFromEveryCategory(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
categories = Category.objects.all()
listOfRecipes = []
for cat in categories:
recipes = Recipe.objects.filter(category = cat)[:3]
listOfRecipes.append(recipes)
qs = list(chain.from_iterable(listOfRecipes))
serializer = ThreeRecentSerializer(qs, many=True)
return Response(serializer.data)
class RecipesFromCategory(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, pk):
try:
category = Category.objects.get(pk=pk)
return Recipe.objects.filter(category = category)
except Category.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
print("PK: " +pk)
recipes = self.get_object(pk)
serializer = CategorySerializer(recipes, many=True)
print(serializer.data)
return Response(serializer.data)
| 4,477 | 1,283 |
# coding=utf-8
import os
import unittest
import vcr
import requests
TEST_CASSETTE_FILE = 'cassettes/test_req.yaml'
class TestRequestsGet(unittest.TestCase):
def setUp(self):
self.unmolested_response = requests.get('http://httpbin.org/')
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.get('http://httpbin.org/')
self.cached_response = requests.get('http://httpbin.org/')
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.initial_response.status_code)
def test_cached_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.cached_response.status_code)
def test_initial_response_headers(self):
self.assertEqual(self.unmolested_response.headers['content-type'], self.initial_response.headers['content-type'])
def test_cached_response_headers(self):
self.assertEqual(self.unmolested_response.headers['content-type'], self.cached_response.headers['content-type'])
def test_initial_response_text(self):
self.assertEqual(self.unmolested_response.text, self.initial_response.text)
def test_cached_response_text(self):
self.assertEqual(self.unmolested_response.text, self.cached_response.text)
class TestRequestsAuth(unittest.TestCase):
def setUp(self):
self.unmolested_response = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwd'))
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwd'))
self.cached_response = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwd'))
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.initial_response.status_code)
def test_cached_response_code(self):
self.assertEqual(self.unmolested_response.status_code, self.cached_response.status_code)
def test_cached_response_auth_can_fail(self):
auth_fail_cached = requests.get('https://httpbin.org/basic-auth/user/passwd', auth=('user', 'passwdzzz'))
self.assertNotEqual(self.unmolested_response.status_code, auth_fail_cached.status_code)
class TestRequestsPost(unittest.TestCase):
def setUp(self):
payload = {'key1': 'value1', 'key2': 'value2'}
self.unmolested_response = requests.post('http://httpbin.org/post', payload)
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.post('http://httpbin.org/post', payload)
self.cached_response = requests.post('http://httpbin.org/post', payload)
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_post_response_text(self):
self.assertEqual(self.unmolested_response.text, self.initial_response.text)
def test_cached_post_response_text(self):
self.assertEqual(self.unmolested_response.text, self.cached_response.text)
class TestRequestsHTTPS(unittest.TestCase):
maxDiff = None
def setUp(self):
self.unmolested_response = requests.get('https://httpbin.org/get')
with vcr.use_cassette(TEST_CASSETTE_FILE):
self.initial_response = requests.get('https://httpbin.org/get')
self.cached_response = requests.get('https://httpbin.org/get')
def tearDown(self):
try:
os.remove(TEST_CASSETTE_FILE)
except OSError:
pass
def test_initial_https_response_text(self):
self.assertEqual(self.unmolested_response.text, self.initial_response.text)
def test_cached_https_response_text(self):
self.assertEqual(self.unmolested_response.text, self.cached_response.text)
| 4,133 | 1,345 |
import joblib
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
def predict(data):
clf, X_test, y_test = data
y_pred = clf.predict(X_test)
print(f'Accuracy: {round(accuracy_score(y_test, y_pred) * 100, 2)}%')
print(f'F1 Score: {round(f1_score(y_test, y_pred) * 100, 2)}%')
print(f'Recall Score: {round(precision_score(y_test, y_pred) * 100, 2)}%')
print(f'Precision Score: {round(recall_score(y_test, y_pred) * 100, 2)}%')
def main(params):
return predict(params)
if __name__ == '__main__':
model = joblib.load('../models/model.pkl')
S_test = joblib.load('../models/S_test.pkl')
y_test = joblib.load('../models/y_test.pkl')
main((model, S_test, y_test))
| 736 | 318 |
# Purpose: open example files with big polyface models
# Created: 23.04.2014
# Copyright (c) 2014-2020, Manfred Moitzi
# License: MIT License
import time
from pathlib import Path
import ezdxf
from ezdxf.render import MeshVertexMerger
SRCDIR = Path(r'D:\Source\dxftest\CADKitSamples')
OUTDIR = Path('~/Desktop/Outbox').expanduser()
def optimize_polyfaces(polyfaces):
count = 0
runtime = 0
vertex_diff = 0
print("start optimizing...")
for polyface in polyfaces:
count += 1
start_vertex_count = len(polyface)
start_time = time.time()
polyface.optimize()
end_time = time.time()
end_vertex_count = len(polyface)
runtime += end_time - start_time
vertex_diff += start_vertex_count - end_vertex_count
print(f"removed {vertex_diff} vertices in {runtime:.2f} seconds.")
def optimize(name: str):
filename = SRCDIR / name
new_filename = OUTDIR / ('optimized_' + name)
print(f'opening DXF file: {filename}')
start_time = time.time()
doc = ezdxf.readfile(filename)
msp = doc.modelspace()
end_time = time.time()
print(f'time for reading: {end_time - start_time:.1f} seconds')
print(f"DXF version: {doc.dxfversion}")
print(f"Database contains {len(doc.entitydb)} entities.")
polyfaces = (polyline for polyline in msp.query('POLYLINE') if polyline.is_poly_face_mesh)
optimize_polyfaces(polyfaces)
print(f'saving DXF file: {new_filename}')
start_time = time.time()
doc.saveas(new_filename)
end_time = time.time()
print(f'time for saving: {end_time - start_time:.1f} seconds')
def save_as(name):
filename = SRCDIR / name
print(f'opening DXF file: {filename}')
start_time = time.time()
doc = ezdxf.readfile(filename)
msp = doc.modelspace()
end_time = time.time()
print(f'time for reading: {end_time - start_time:.1f} seconds')
print(f"DXF version: {doc.dxfversion}")
print(f"Database contains {len(doc.entitydb)} entities.")
polyfaces = (polyline for polyline in msp.query('POLYLINE') if polyline.is_poly_face_mesh)
# create a new documents
doc1 = ezdxf.new()
msp1 = doc1.modelspace()
doc2 = ezdxf.new()
msp2 = doc2.modelspace()
for polyface in polyfaces:
b = MeshVertexMerger.from_polyface(polyface)
b.render(msp1, dxfattribs={
'layer': polyface.dxf.layer,
'color': polyface.dxf.color,
})
b.render_polyface(msp2, dxfattribs={
'layer': polyface.dxf.layer,
'color': polyface.dxf.color,
})
new_filename = OUTDIR / ('mesh_' + name)
print(f'saving as mesh DXF file: {new_filename}')
start_time = time.time()
doc1.saveas(new_filename)
end_time = time.time()
print(f'time for saving: {end_time - start_time:.1f} seconds')
new_filename = OUTDIR / ('recreated_polyface_' + name)
print(f'saving as polyface DXF file: {new_filename}')
start_time = time.time()
doc2.saveas(new_filename)
end_time = time.time()
print(f'time for saving: {end_time - start_time:.1f} seconds')
if __name__ == '__main__':
optimize('fanuc-430-arm.dxf')
optimize('cnc machine.dxf')
save_as('fanuc-430-arm.dxf') | 3,238 | 1,173 |
# adapted from http://code.cmlenz.net/diva/browser/trunk/diva/ext/firephp.py
# (c) 2008 C. M. Lenz, Glashammer Developers
from time import time
from logging import Handler
from simplejson import dumps
from glashammer.utils import local
from glashammer.utils.log import add_log_handler
LEVEL_MAP = {'DEBUG': 'LOG', 'WARNING': 'WARN', 'CRITICAL': 'ERROR'}
PREFIX = 'X-FirePHP-Data-'
def init_firephp():
# one-time initialisation per request
local.firephp_log = []
def inject_firephp_headers(response):
prefix = PREFIX
if not hasattr(response, 'headers'):
# an httpexception or some other weird response
return
for i, record in enumerate(local.firephp_log):
if i == 0:
response.headers[prefix + '100000000001'] = '{'
response.headers[prefix + '300000000001'] = '"FirePHP.Firebug.Console":['
response.headers[prefix + '399999999999'] = ',["__SKIP__"]],'
response.headers[prefix + '999999999999'] = '"__SKIP__":"__SKIP__"}'
secs = str(int(time()))[-3:]
msgid = '3' + secs + ('%08d' % (i + 2))
msg = dumps(record)
if i != 0:
msg = ',' + msg
response.headers[PREFIX + msgid] = msg
def emit(level, record):
try:
local.firephp_log.append((LEVEL_MAP.get(level.upper()), record))
except AttributeError:
pass
def setup_firephp(app):
app.connect_event('wsgi-call', init_firephp)
app.connect_event('response-start', inject_firephp_headers)
app.connect_event('log', emit)
setup_app = setup_firephp
| 1,576 | 566 |
"""
** deeplean-ai.com **
created by :: GauravBh1010tt
contact :: gauravbhatt.deeplearn@gmail.com
"""
from __future__ import unicode_literals, print_function, division
import math
import re
import os
import numpy as np
import torch
import random
import warnings
from io import open
import unicodedata
import matplotlib.pyplot as plt
from torch.autograd import Variable
import time
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %02ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
warnings.simplefilter('ignore')
plt.rcParams['figure.figsize'] = (8, 8)
np.random.seed(42)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
use_cuda = torch.cuda.is_available()
import zipfile
zip_ref = zipfile.ZipFile('data.zip', 'r')
zip_ref.extractall()
zip_ref.close()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# torch.cuda.set_device(1)
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p,reverse):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[reverse].startswith(eng_prefixes)
def filterPairs(pairs, reverse):
if reverse:
reverse = 1
else:
reverse = 0
return [pair for pair in pairs if filterPair(pair,reverse)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs,reverse)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair, input_lang, output_lang):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def indexes_from_sentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def variable_from_sentence(lang, sentence):
indexes = indexes_from_sentence(lang, sentence)
indexes.append(EOS_token)
var = Variable(torch.LongTensor(indexes).view(-1, 1))
# print('var =', var)
if use_cuda: var = var.cuda()
return var
def variables_from_pair(pair, input_lang, output_lang):
input_variable = variable_from_sentence(input_lang, pair[0])
target_variable = variable_from_sentence(output_lang, pair[1])
return (input_variable, target_variable)
def save_checkpoint(epoch, model, optimizer, directory, \
filename='best.pt'):
checkpoint=({'epoch': epoch+1,
'model': model.state_dict(),
'optimizer' : optimizer.state_dict()
})
try:
torch.save(checkpoint, os.path.join(directory, filename))
except:
os.mkdir(directory)
torch.save(checkpoint, os.path.join(directory, filename)) | 5,667 | 2,024 |
import codecs
import two1.bitcoin as bitcoin
import two1.bitcoin.utils as utils
import two1.channels.server as server
import two1.channels.blockchain as blockchain
import two1.channels.statemachine as statemachine
class MockTwo1Wallet:
"""Mock Two1 Wallet interface for unit testing. See two1.wallet.Two1Wallet for API."""
PRIVATE_KEY = bitcoin.PrivateKey.from_bytes(
codecs.decode("83407377a24a5cef75dedb0445d2da3a5389ed34c0f0c57266b1ed0a5ebb30c1", 'hex_codec'))
"Customer private key."
MOCK_UTXO_SCRIPT_PUBKEY = bitcoin.Script.build_p2pkh(PRIVATE_KEY.public_key.hash160())
MOCK_UTXO = bitcoin.Hash("3d3834fb69654cea89f9b086642b867c4cb9c86cc0a4cc1972924370dd54de19")
MOCK_UTXO_INDEX = 1
"Mock utxo to make deposit transaction."
def get_change_public_key(self):
return self.PRIVATE_KEY.public_key
def build_signed_transaction(
self, addresses_and_amounts, use_unconfirmed=False, insert_into_cache=False, fees=None, expiration=0):
address = list(addresses_and_amounts.keys())[0]
amount = addresses_and_amounts[address]
inputs = [bitcoin.TransactionInput(self.MOCK_UTXO, self.MOCK_UTXO_INDEX, bitcoin.Script(), 0xffffffff)]
outputs = [bitcoin.TransactionOutput(amount, bitcoin.Script.build_p2sh(utils.address_to_key_hash(address)[1]))]
tx = bitcoin.Transaction(bitcoin.Transaction.DEFAULT_TRANSACTION_VERSION, inputs, outputs, 0x0)
tx.sign_input(0, bitcoin.Transaction.SIG_HASH_ALL, self.PRIVATE_KEY, self.MOCK_UTXO_SCRIPT_PUBKEY)
return [tx]
def get_private_for_public(self, public_key):
assert bytes(public_key) == bytes(self.PRIVATE_KEY.public_key)
return self.PRIVATE_KEY
def broadcast_transaction(self, transaction):
return MockBlockchain.broadcast_tx(MockBlockchain, transaction)
@property
def testnet(self):
return False
class MockPaymentChannelServer(server.PaymentChannelServerBase):
"""Mock Payment Channel Server interface for unit testing."""
PRIVATE_KEY = bitcoin.PrivateKey.from_bytes(
codecs.decode("9d1ad8f765996474ff478ef65692a95dba0af2e24cd9e2cb6dfeee52ce2d38e8", 'hex_codec'))
"Merchant private key."
blockchain = None
"Merchant blockchain interface."
channels = {}
"Retained server-side channels state across instantiations of this payment channel server \"client\"."
def __init__(self, url=None):
"""Instantiate a Mock Payment Channel Server interface for the
specified URL.
Args:
url (str): URL of Mock server.
Returns:
MockPaymentChannelServer: instance of MockPaymentChannelServer.
"""
super().__init__()
self._url = url
def get_info(self):
return {'public_key': codecs.encode(self.PRIVATE_KEY.public_key.compressed_bytes, 'hex_codec').decode('utf-8')}
def open(self, deposit_tx, redeem_script):
# Deserialize deposit tx and redeem script
deposit_tx = bitcoin.Transaction.from_hex(deposit_tx)
deposit_txid = str(deposit_tx.hash)
redeem_script = statemachine.PaymentChannelRedeemScript.from_bytes(codecs.decode(redeem_script, 'hex_codec'))
# Validate redeem_script
assert redeem_script.merchant_public_key.compressed_bytes == self.PRIVATE_KEY.public_key.compressed_bytes
# Validate deposit tx
assert len(deposit_tx.outputs) == 1, "Invalid deposit tx outputs."
output_index = deposit_tx.output_index_for_address(redeem_script.hash160())
assert output_index is not None, "Missing deposit tx P2SH output."
assert deposit_tx.outputs[output_index].script.is_p2sh(), "Invalid deposit tx output P2SH script."
assert deposit_tx.outputs[output_index].script.get_hash160() == redeem_script.hash160(), "Invalid deposit tx output script P2SH address." # nopep8
self.channels[deposit_txid] = {'deposit_tx': deposit_tx, 'redeem_script': redeem_script, 'payment_tx': None}
def pay(self, deposit_txid, payment_tx):
# Deserialize payment tx
payment_tx = bitcoin.Transaction.from_hex(payment_tx)
# Validate payment tx
redeem_script = self.channels[deposit_txid]['redeem_script']
assert len(payment_tx.inputs) == 1, "Invalid payment tx inputs."
assert len(payment_tx.outputs) == 2, "Invalid payment tx outputs."
assert bytes(payment_tx.inputs[0].script[-1]) == bytes(self.channels[deposit_txid]['redeem_script']), "Invalid payment tx redeem script." # nopep8
# Validate payment is greater than the last one
if self.channels[deposit_txid]['payment_tx']:
output_index = payment_tx.output_index_for_address(self.PRIVATE_KEY.public_key.hash160())
assert output_index is not None, "Invalid payment tx output."
assert payment_tx.outputs[output_index].value > self.channels[deposit_txid]['payment_tx'].outputs[output_index].value, "Invalid payment tx output value." # nopep8
# Sign payment tx
assert redeem_script.merchant_public_key.compressed_bytes == self.PRIVATE_KEY.public_key.compressed_bytes, "Public key mismatch." # nopep8
sig = payment_tx.get_signature_for_input(0, bitcoin.Transaction.SIG_HASH_ALL, self.PRIVATE_KEY, redeem_script)[0] # nopep8
# Update input script sig
payment_tx.inputs[0].script.insert(1, sig.to_der() + bitcoin.utils.pack_compact_int(bitcoin.Transaction.SIG_HASH_ALL)) # nopep8
# Verify signature
output_index = self.channels[deposit_txid]['deposit_tx'].output_index_for_address(redeem_script.hash160())
assert payment_tx.verify_input_signature(0, self.channels[deposit_txid]['deposit_tx'].outputs[output_index].script), "Payment tx input script verification failed." # nopep8
# Save payment tx
self.channels[deposit_txid]['payment_tx'] = payment_tx
# Return payment txid
return str(payment_tx.hash)
def status(self, deposit_txid):
return {}
def close(self, deposit_txid, deposit_txid_signature):
# Assert a payment has been made to this chanel
assert self.channels[deposit_txid]['payment_tx'], "No payment tx exists."
# Verify deposit txid singature
public_key = self.channels[deposit_txid]['redeem_script'].customer_public_key
assert public_key.verify(deposit_txid.encode(), bitcoin.Signature.from_der(deposit_txid_signature)), "Invalid deposit txid signature." # nopep8
# Broadcast to blockchain
self.blockchain.broadcast_tx(self.channels[deposit_txid]['payment_tx'].to_hex())
# Return payment txid
return str(self.channels[deposit_txid]['payment_tx'].hash)
class MockBlockchain(blockchain.BlockchainBase):
"""Mock Blockchain interface for unit testing."""
_blockchain = {}
"""Global blockchain state accessible by other mock objects."""
def __init__(self):
"""Instantiate a Mock blockchain interface.
Returns:
MockBlockchain: instance of MockBlockchain.
"""
# Reset blockchain state
for key in list(MockBlockchain._blockchain.keys()):
del MockBlockchain._blockchain[key]
# Stores transactions as
# {
# "<txid>": {
# "tx": <serialized tx>,
# "confirmations": <number of confirmations>,
# "outputs_spent": [
# "<txid>" or None,
# ...
# ]
# },
# ...
# }
def mock_confirm(self, txid, num_confirmations=1):
self._blockchain[txid]['confirmations'] = num_confirmations
def check_confirmed(self, txid, num_confirmations=1):
if txid not in self._blockchain:
return False
return self._blockchain[txid]['confirmations'] >= num_confirmations
def lookup_spend_txid(self, txid, output_index):
if txid not in self._blockchain:
return None
if output_index >= len(self._blockchain[txid]['outputs_spent']):
raise IndexError('Output index out of bounds.')
return self._blockchain[txid]['outputs_spent'][output_index]
def lookup_tx(self, txid):
if txid not in self._blockchain:
return None
return self._blockchain[txid]['tx']
def broadcast_tx(self, tx):
txobj = bitcoin.Transaction.from_hex(tx)
txid = str(txobj.hash)
if txid in self._blockchain:
return txid
self._blockchain[txid] = {"tx": tx, "confirmations": 0, "outputs_spent": [None] * len(txobj.outputs)}
# Mark spent outputs in other blockchain transactions
for other_txid in self._blockchain:
for txinput in txobj.inputs:
if str(txinput.outpoint) == other_txid:
self._blockchain[other_txid]['outputs_spent'][txinput.outpoint_index] = txid
| 9,041 | 2,983 |
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision$
from __future__ import print_function
from cloud_gem_load_test.service_api_call import ServiceApiCall
from data_generator import DataGenerator
import metric_constant as c
#
# Load Test Transaction Handler registration
#
def add_transaction_handlers(handler_context, transaction_handlers):
service_api_name = c.RES_GEM_NAME + '.ServiceApi'
base_url = handler_context.mappings.get(service_api_name, {}).get('PhysicalResourceId')
if not base_url:
raise RuntimeError('Missing PhysicalResourceId for ' + service_api_name)
transaction_handlers.append(ServiceStatus(base_url))
transaction_handlers.append(ProduceMessage(base_url))
#
# Check for the service status of Cloud Gem Under Test
#
class ServiceStatus(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=c.RES_GEM_NAME + '.ServiceStatus', method='get', base_url=base_url,
path='/service/status')
#
# Produce Metric Messages
#
class ProduceMessage(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=c.RES_GEM_NAME + '.ProduceMessage', method='post', base_url=base_url,
path='/producer/produce/message?compression_mode=NoCompression&sensitivity_type=Insensitive&payload_type=JSON')
def build_request(self):
request = ServiceApiCall.build_request(self)
request['body'] = {
'data': build_metric_data()
}
return request
#
# Build the metric data object needed for the metric producer request body
#
def build_metric_data():
print('Building metric event data')
data_generator = DataGenerator()
return data_generator.json(1)
| 2,256 | 659 |
from __future__ import annotations
import re
import pandas as pd
from enum import Enum
from typing import Dict
class ColumnType(Enum):
integer = 'bigint'
decimal = 'double precision'
date = 'date'
short_text = 'character varying(256)'
medium_text = 'character varying(8192)'
long_text = 'character varying(32768)'
boolean = 'boolean'
@classmethod
def from_pd_column(cls, pd_column: any) -> ColumnType:
type_name = str(pd_column.dtype)
if type_name == 'int64':
return cls.integer
elif type_name == 'float64':
return cls.decimal
elif type_name == 'bool':
return cls.boolean
elif type_name == 'object':
length = pd_column.apply(str).apply(len).max()
if length > 2048:
return cls.long_text
if length < 64:
if cls._pd_column_is_date(pd_column):
return cls.date
return cls.short_text
return cls.medium_text
else:
return cls.long_text
@classmethod
def from_query_result(cls, result: str) -> ColumnType:
try:
return cls(result)
except ValueError as e:
if result.find('character varying') == 0 or result.find('USER-DEFINED') == 0:
return cls.long_text
elif result.find('integer') == 0:
return cls.integer
elif result.find('numeric') == 0:
return cls.decimal
elif result.find('timestamp') == 0:
return cls.date
raise e
@classmethod
def _pd_column_is_date(self, pd_column: any) -> bool:
empty = True
for value in pd_column:
if not value or pd.isna(value):
continue
empty = False
if not re.match(r'[0-9]{1,2}[/-][0-9]{1,2}[/-][0-9]{2,4}', value) and not re.match(r'[0-9]{2,4}[/-][0-9]{1,2}[/-][0-9]{1,2}', value):
return False
return not empty
@property
def pd_type(self) -> any:
if self is ColumnType.integer:
return pd.Int64Dtype()
elif self is ColumnType.decimal:
return float
elif self is ColumnType.boolean:
return bool
elif self is ColumnType.date:
return 'datetime64[ns]'
else:
return 'object'
def sanitized_relation_name(name: str) -> str:
return re.sub(r'[^a-z0-9_]', '_', name.lower())
def sanitized_column_name(name: str) -> str:
return re.sub(r'[^a-z0-9_ ]', '_', name.lower()) | 2,300 | 811 |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import (static, )
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls', namespace="rest_framework")),
path('api/v1/', include('superhero.urls', namespace="superhero")),
path('api/v1/', include('movie.urls', namespace="movie")),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
urlpatterns += static(settings.MEDIA_URL, document=settings.MEDIA_ROOT)
if 'silk' in settings.INSTALLED_APPS:
urlpatterns += [path('silk/', include('silk.urls', namespace='silk'))] | 756 | 245 |
"""
paper: Exploit Camera Raw Data for Video Super-Resolution via Hidden Markov Model Inference
file: config.py
author: Xiaohong Liu
date: 17/09/19
"""
def get_config(args):
scale = args.scale_ratio
save_tag = args.save_image
if scale not in [2, 4]:
raise Exception('scale {} is not supported!'.format(scale))
opt = {'train': {'dataroot_GT': './dataset/train/1080p_gt_rgb',
'dataroot_LQ': './dataset/train/1080p_lr_d_raw_{}'.format(scale),
'lr': 2e-4,
'num_epochs': 100,
'N_frames': 7,
'n_workers': 12,
'batch_size': 24 if scale == 4 else 8,
'GT_size': 256,
'LQ_size': 256 // scale,
'scale': scale,
'phase': 'train',
},
'test': {'dataroot_GT': './dataset/test/1080p_gt_rgb',
'dataroot_LQ': './dataset/test/1080p_lr_d_raw_{}'.format(scale),
'N_frames': 7,
'n_workers': 12,
'batch_size': 2,
'phase': 'test',
'save_image': save_tag,
},
'network': {'nf': 64,
'nframes': 7,
'groups': 8,
'back_RBs': 4},
'dataset': {'dataset_name': 'RawVD'
}
}
return opt
| 1,492 | 496 |
### Write some goddamn test
| 29 | 12 |
import collections
class LRUCache:
def __init__(self, capacity: int):
self.cache = collections.OrderedDict()
self.capacity = capacity
def get(self, key: int) -> int:
if key not in self.cache:
return -1
self.cache.move_to_end(key)
return self.cache[key]
def put(self, key: int, value: int) -> None:
self.cache[key] = value
self.cache.move_to_end(key)
if len(self.cache) > self.capacity:
self.cache.popitem(last=False)
if __name__ == '__main__':
cache = LRUCache(2)
cache.put(1, 1)
cache.put(2, 2)
assert cache.get(1) == 1
cache.put(3, 3)
assert cache.get(2) == -1
cache.put(4, 4)
assert cache.get(1) == -1
assert cache.get(3) == 3
assert cache.get(4) == 4
cache = LRUCache(2)
cache.put(2, 1)
cache.put(1, 1)
cache.put(2, 3)
cache.put(4, 1)
assert cache.get(1) == -1
assert cache.get(2) == 3
| 965 | 374 |
import warnings
from django.conf import settings
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def static_url(request):
warnings.warn(
"The context processor 'staticfiles.context_processors.static_url' "
"was renamed to 'staticfiles.context_processors.static'.",
DeprecationWarning)
return static(request)
| 436 | 120 |
from torch.utils.data import Subset
from PIL import Image
from torchvision.datasets import EMNIST
from base.torchvision_dataset import TorchvisionDataset
from PIL.ImageFilter import GaussianBlur
import numpy as np
import torch
import torchvision.transforms as transforms
import random
class EMNIST_Dataset(TorchvisionDataset):
def __init__(self, root: str, split: str = 'letters', normal_class: int = 1, outlier_exposure: bool = False,
oe_n_classes: int = 26, blur_oe: bool = False, blur_std: float = 1.0, seed: int = 0):
super().__init__(root)
self.image_size = (1, 28, 28)
self.n_classes = 2 # 0: normal, 1: outlier
self.shuffle = True
self.split = split
random.seed(seed) # set seed
if outlier_exposure:
self.normal_classes = None
self.outlier_classes = list(range(1, 27))
self.known_outlier_classes = tuple(random.sample(self.outlier_classes, oe_n_classes))
else:
# Define normal and outlier classes
self.normal_classes = tuple([normal_class])
self.outlier_classes = list(range(1, 27))
self.outlier_classes.remove(normal_class)
self.outlier_classes = tuple(self.outlier_classes)
# EMNIST preprocessing: feature scaling to [0, 1]
transform = []
if blur_oe:
transform += [transforms.Lambda(lambda x: x.filter(GaussianBlur(radius=blur_std)))]
transform += [transforms.ToTensor()]
transform = transforms.Compose(transform)
target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes))
# Get train set
train_set = MyEMNIST(root=self.root, split=self.split, train=True, transform=transform,
target_transform=target_transform, download=True)
if outlier_exposure:
idx = np.argwhere(np.isin(train_set.targets.cpu().data.numpy(), self.known_outlier_classes))
idx = idx.flatten().tolist()
train_set.semi_targets[idx] = -1 * torch.ones(len(idx)).long() # set outlier exposure labels
# Subset train_set to selected classes
self.train_set = Subset(train_set, idx)
self.train_set.shuffle_idxs = False
self.test_set = None
else:
# Subset train_set to normal_classes
idx = np.argwhere(np.isin(train_set.targets.cpu().data.numpy(), self.normal_classes))
idx = idx.flatten().tolist()
train_set.semi_targets[idx] = torch.zeros(len(idx)).long()
self.train_set = Subset(train_set, idx)
# Get test set
self.test_set = MyEMNIST(root=self.root, split=self.split, train=False, transform=transform,
target_transform=target_transform, download=True)
class MyEMNIST(EMNIST):
"""
Torchvision EMNIST class with additional targets for the outlier exposure setting and patch of __getitem__ method
to also return the outlier exposure target as well as the index of a data sample.
"""
def __init__(self, *args, **kwargs):
super(MyEMNIST, self).__init__(*args, **kwargs)
self.semi_targets = torch.zeros_like(self.targets)
self.shuffle_idxs = False
def __getitem__(self, index):
"""Override the original method of the EMNIST class.
Args:
index (int): Index
Returns:
tuple: (image, target, semi_target, index)
"""
img, target, semi_target = self.data[index], int(self.targets[index]), int(self.semi_targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, semi_target, index
| 4,018 | 1,219 |
from collections import defaultdict as ddict
from operator import attrgetter, methodcaller
from string import punctuation
from graphviz import Digraph
from dfdone.enums import (
Profile,
Role,
)
ASSUMPTION = 'assumption'
DATA = 'data'
MEASURE = 'measure'
THREAT = 'threat'
def table_from_list(class_name, table_headers, table_rows):
final_list = ['<thead>']
for header in table_headers:
final_list.append(F"<th>{header}</th>")
final_list.append('</thead>')
final_list.append('<tbody>')
final_list.extend(table_rows)
final_list.append('</tbody>')
table_body = '\n'.join(final_list)
return F'\n\n<table class="{class_name}">\n{table_body}\n</table>'
slugify = str.maketrans(' ', '-', punctuation)
def id_format(label):
return label.lower().replace('-', ' ').translate(slugify)
def build_table_rows(class_prefix, component_list):
table_rows = list()
for i, c in enumerate(component_list):
table_rows.append('<tr>')
table_rows.append('<td>')
table_rows.append(
F'<div class="row-number {class_prefix}-number">{i + 1}</div>'
)
table_rows.append('</td>')
style_class = ''
if class_prefix == DATA:
style_class = F" classification-{c.classification.name.lower()}"
elif class_prefix == ASSUMPTION or class_prefix == THREAT:
style_class = F" risk-{c.calculate_risk().name.lower()}"
elif class_prefix == MEASURE:
style_class = F" capability-{c.capability.name.lower()}"
table_rows.append('<td>')
table_rows.append((
F'<div id="{id_format(c.id)}" '
F'class="label {class_prefix}-label{style_class}">'
F"{c.label}</div>"
))
table_rows.append('</td>')
if class_prefix == THREAT:
table_rows.append('<td>')
for m in c.measures:
table_rows.append((
'<div class="label measure-label '
F'capability-{m.capability.name.lower()}">'
F'<a href="#{id_format(m.id)}">{m.label}</a></div>'
))
table_rows.append('</td>')
if class_prefix == MEASURE:
table_rows.append('<td>')
for t in c.threats:
table_rows.append((
'<div class="label threat-label '
F'risk-{t.calculate_risk().name.lower()}">'
F'<a href="#{id_format(t.id)}">{t.label}</a></div>'
))
table_rows.append('</td>')
table_rows.append('<td>')
table_rows.append('<div class="{}">{}</div>'.format(
F"description {class_prefix}-description" if c.description
else 'dash',
c.description or '-'
))
table_rows.append('</td>')
table_rows.append('</tr>')
return table_rows
def build_assumption_table(assumptions):
headers = ['#', 'Disprove', 'Description']
return table_from_list(
'assumption-table',
headers,
build_table_rows(ASSUMPTION, assumptions)
)
def build_data_table(data):
headers = ['#', 'Data', 'Description']
data = sorted(data, key=attrgetter('label'))
data.sort(key=attrgetter('classification'), reverse=True)
return table_from_list(
'data-table',
headers,
build_table_rows(DATA, data)
)
def build_threat_table(threats):
headers = ['#', 'Active Threat', 'Applicable Measures', 'Description']
threats = sorted(threats, key=attrgetter('label'))
threats.sort(key=methodcaller('calculate_risk'), reverse=True)
return table_from_list(
'threat-table',
headers,
build_table_rows(THREAT, threats)
)
def build_measure_table(measures):
headers = ['#', 'Security Measure', 'Mitigable Threats', 'Description']
measures = sorted(measures, key=attrgetter('label'))
measures.sort(key=attrgetter('capability'), reverse=True)
return table_from_list(
'measure-table',
headers,
build_table_rows(MEASURE, measures)
)
def organize_elements(graph, elements):
central_elements = max([
[e for e in elements if e.profile == Profile.BLACK],
[e for e in elements if e.profile == Profile.GREY],
[e for e in elements if e.profile == Profile.WHITE],
], key=lambda l: len(l))
if not central_elements:
return
row_count = max(2, len(central_elements) // 2)
row_subgraph = Digraph(name='rows')
for i in range(1, row_count):
row_subgraph.edge(F"{i}", F"{i+1}", style='invis')
row_subgraph.node_attr.update(style='invis', shape='plain')
graph.subgraph(row_subgraph)
for i in range(row_count):
rank_subgraph = Digraph()
rank_subgraph.attr(rank='same')
for e in central_elements[i::row_count]:
rank_subgraph.node(F"{i+1}")
rank_subgraph.node(e.id)
graph.subgraph(rank_subgraph)
def build_diagram(elements, interactions):
elements = list(elements) # to be able to iterate more than once.
dot = Digraph(format='svg')
dot.attr(rankdir='TB', newrank='false')
organize_elements(dot, elements)
groups = ddict(list)
for e in elements:
if e.group:
groups[e.group].append(e)
else:
add_node(dot, e)
for group, group_elements in groups.items():
# Graphviz requirement: name must start with 'cluster'.
sub = Digraph(name=F"cluster_{group}")
sub.attr(label=group, style='filled', color='lightgrey')
for e in group_elements:
add_node(sub, e)
dot.subgraph(sub)
_interactions = sorted(interactions, key=attrgetter('created'))
for i_index, interaction in enumerate(_interactions):
dot.edge(
interaction.source.id,
interaction.target.id,
label=F" {i_index + 1} ",
decorate='true',
constraint=interaction.laterally
)
# Return the SVG source:
return (
'\n\n<div class="diagram">\n'
F"{dot.pipe().decode('utf-8').strip()}\n"
'</div>'
)
def add_node(graph, element):
# Role defines node shape
shape = {
Role.SERVICE: 'oval',
Role.STORAGE: 'box3d'
}.get(element.role, 'box')
# Set proper background + text contrast
fillcolor, fontcolor = {
Profile.BLACK: ('black', 'white'),
Profile.GREY: ('dimgrey', 'white')
}.get(element.profile, ('white', 'black'))
graph.node(
element.id,
label=element.label,
shape=shape,
style='filled',
color='black',
fontcolor=fontcolor,
fillcolor=fillcolor
)
def build_threats_cell(threats, classification, interaction_table, rowspan=1):
interaction_table.append(F"<td rowspan={rowspan}>")
for t in threats:
risk_level = t.calculate_risk(classification).name.lower()
interaction_table.append((
F'<div class="label threat-label risk-{risk_level}">'
F'<a href="#{id_format(t.id)}">{t.label}</a></div>'
))
for m in t.measures:
if not m.active:
continue
interaction_table.append((
'<div class="label mitigation-label '
F"imperative-{m.imperative.name.lower()} "
F"capability-{m.capability.name.lower()} "
F'status-{m.status.name.lower()}">'
F'<a href="#{id_format(m.id)}">{m.label}</a></div>'
))
interaction_table.append('</td>')
def build_interaction_table(interactions):
interaction_table = list()
headers = ['#', 'Data', 'Data Threats', 'Interaction Threats', 'Notes']
_interactions = sorted(interactions, key=attrgetter('created'))
for i_index, interaction in enumerate(_interactions):
interaction_rowspan = len(interaction.data_threats.values())
interaction_table.append('<tr>')
interaction_table.append((
F'<td rowspan="{interaction_rowspan}">'
'<div class="row-number interaction-number">'
F"{i_index + 1}</div></td>"
))
di = 0
for datum, threats in interaction.data_threats.items():
if di > 0:
interaction_table.append('<tr>')
interaction_table.append((
F'<td><div class="label data-label '
F'classification-{datum.classification.name.lower()}">'
F'<a href="#{id_format(datum.id)}">{datum.label}</a>'
'</div></td>'
))
if not threats:
interaction_table.append('<td><div class="dash">-</div></td>')
else:
build_threats_cell(
threats,
datum.classification,
interaction_table
)
if di == 0:
if not interaction.broad_threats:
interaction_table.append((
F'<td rowspan="{interaction_rowspan}">'
'<div class="dash">-</div></td>'
))
else:
build_threats_cell(
interaction.broad_threats,
interaction.highest_classification,
interaction_table,
rowspan=interaction_rowspan
)
interaction_table.append(
F'<td rowspan="{interaction_rowspan}">'
)
interaction_table.append('<div class="{}">{}</div>'.format(
'interaction-notes' if interaction.notes
else 'dash',
interaction.notes or '-'
))
interaction_table.append('</td>')
interaction_table.append('</tr>')
di += 1
return table_from_list('interaction-table', headers, interaction_table)
| 10,037 | 3,088 |
import os
import requests
from pyquery import PyQuery as pq
def get_download_url(movie_url):
get_request = requests.get(movie_url)
get_request_str = str(get_request.content, 'utf-8')
pq_obj_items = pq(get_request_str)('div.download-list.d-hidden').eq(0)('div')('a').items()
download_urls = []
for pq_item in pq_obj_items:
if '.mp4' in str(pq_item):
download_url = pq_item('a').attr('href')
download_urls.append(download_url)
best_quality_download_url = download_urls[-1]
download_movie(best_quality_download_url)
def download_movie(download_url):
file_name = str(download_url).split(maxsplit=1)[1].replace('/', '')
file_dir = 'videos/{}.mp4'.format(file_name)
if not os.path.exists('videos'):
os.mkdir('videos')
get_video = requests.get(download_url, allow_redirects=True)
with open(file_dir, "wb") as file_stream:
video_content = get_video.content
file_stream.write(video_content)
return file_dir
url = 'http://asilmedia.net/11773-tepalikda-ajratish-olim-yaqin-emas-uzbek-tilida-2018-ozbekcha-tarjima-kino-hd.html'
get_download_url(url)
| 1,159 | 431 |
from flask_sqlalchemy import SQLAlchemy
if __name__ == "__main__":
app.run(debug=True)
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(username="donalmaher067.mysql.pythonanywhere-services.com",password="Hollyroco@9552",hostname="donalmaher067.mysql.pythonanywhere-services.com",databasename=" donalmaher067$datarepresentation",)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False | 552 | 213 |
# Copyright (c) 2014 Yaron Shani <yaron.shani@gmail.com>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# This software is provided ``as is'' and any express or implied
# warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are
# disclaimed. In no event shall author or contributors be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even if
# advised of the possibility of such damage.
import SocketServer
import struct
import types
from bitstring import Bits
# Hey StackOverflow !
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class BaseStucture:
_fields_ = []
def __init__(self, **kwargs):
self.init_from_dict(**kwargs)
for field in self._fields_:
if len(field) > 2:
if not hasattr(self, field[0]):
setattr(self, field[0], field[2])
def init_from_dict(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def size(self):
return struct.calcsize(self.format())
def format(self):
pack_format = '>'
for field in self._fields_:
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
pack_format += str(field[1].size()) + 's'
elif 'si' == field[1]:
pack_format += 'c'
elif '<' in field[1]:
pack_format += field[1][1:]
else:
pack_format += field[1]
return pack_format
def formatDevicesList(self, devicesCount):
pack_format = '>'
i = 0
for field in self._fields_:
if (i == devicesCount + 2):
break
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
pack_format += str(field[1].size()) + 's'
elif 'si' == field[1]:
pack_format += 'c'
elif '<' in field[1]:
pack_format += field[1][1:]
else:
pack_format += field[1]
i += 1
return pack_format
def pack(self):
values = []
for field in self._fields_:
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
values.append(getattr(self, field[0], 0).pack())
else:
if 'si' == field[1]:
values.append(chr(getattr(self, field[0], 0)))
else:
values.append(getattr(self, field[0], 0))
return struct.pack(self.format(), *values)
def packDevicesList(self, devicesCount):
values = []
i = 0
for field in self._fields_:
if (i == devicesCount + 2):
break
if type(field[1]) is types.InstanceType:
if BaseStucture in field[1].__class__.__bases__:
values.append(getattr(self, field[0], 0).pack())
else:
if 'si' == field[1]:
values.append(chr(getattr(self, field[0], 0)))
else:
values.append(getattr(self, field[0], 0))
i += 1
return struct.pack(self.formatDevicesList(devicesCount), *values)
def unpack(self, buf):
values = struct.unpack(self.format(), buf)
i=0
keys_vals = {}
for val in values:
if '<' in self._fields_[i][1][0]:
val = struct.unpack('<' +self._fields_[i][1][1], struct.pack('>' + self._fields_[i][1][1], val))[0]
keys_vals[self._fields_[i][0]]=val
i+=1
self.init_from_dict(**keys_vals)
def int_to_hex_string(val):
sval= format(val, 'x')
if len(sval) < 16:
for i in range(len(sval),16):
sval= '0'+sval
#sval= sval+'0'
return sval.decode('hex')
class USBIPHeader(BaseStucture):
_fields_ = [
('version', 'H', 273),
('command', 'H'),
('status', 'I')
]
class USBInterface(BaseStucture):
_fields_ = [
('bInterfaceClass', 'B'),
('bInterfaceSubClass', 'B'),
('bInterfaceProtocol', 'B'),
('align', 'B', 0)
]
class USBIPDevice(BaseStucture):
_fields_ = [
('usbPath', '256s'),
('busID', '32s'),
('busnum', 'I'),
('devnum', 'I'),
('speed', 'I'),
('idVendor', 'H'),
('idProduct', 'H'),
('bcdDevice', 'H'),
('bDeviceClass', 'B'),
('bDeviceSubClass', 'B'),
('bDeviceProtocol', 'B'),
('bConfigurationValue', 'B'),
('bNumConfigurations', 'B'),
('bNumInterfaces', 'B'),
('interfaces', USBInterface())
]
class OPREPDevList(BaseStucture):
def __init__(self, dictArg, count):
self._fields_ = [
('base', USBIPHeader(), USBIPHeader(command=5,status=0)), # Declare this here to make sure it's in the right order
('nExportedDevice', 'I', count) # Same for this guy
]
for key, value in dictArg.iteritems():
field = (str(key), value[0], value[1])
self._fields_.append(field)
for field in self._fields_:
if len(field) > 2:
if not hasattr(self, field[0]):
setattr(self, field[0], field[2])
class OPREPImport(BaseStucture):
_fields_ = [
('base', USBIPHeader()),
('usbPath', '256s'),
('busID', '32s'),
('busnum', 'I'),
('devnum', 'I'),
('speed', 'I'),
('idVendor', 'H'),
('idProduct', 'H'),
('bcdDevice', 'H'),
('bDeviceClass', 'B'),
('bDeviceSubClass', 'B'),
('bDeviceProtocol', 'B'),
('bConfigurationValue', 'B'),
('bNumConfigurations', 'B'),
('bNumInterfaces', 'B')
]
class USBIPRETSubmit(BaseStucture):
_fields_ = [
('command', 'I'),
('seqnum', 'I'),
('devid', 'I'),
('direction', 'I'),
('ep', 'I'),
('status', 'I'),
('actual_length', 'I'),
('start_frame', 'I'),
('number_of_packets', 'I'),
('error_count', 'I'),
('setup', 'Q')
]
def pack(self):
packed_data = BaseStucture.pack(self)
packed_data += self.data
return packed_data
class USBIPCMDUnlink(BaseStucture):
_fields_ = [
('seqnum', 'I'),
('devid', 'I'),
('direction', 'I'),
('ep', 'I'),
('seqnum2', 'I'),
]
class USBIPCMDSubmit(BaseStucture):
_fields_ = [
('seqnum', 'I'),
('devid', 'I'),
('direction', 'I'),
('ep', 'I'),
('transfer_flags', 'I'),
('transfer_buffer_length', 'I'),
('start_frame', 'I'),
('number_of_packets', 'I'),
('interval', 'I'),
('setup', 'Q')
]
class USBIPUnlinkReq(BaseStucture):
_fields_ = [
('command', 'I', 0x2),
('seqnum', 'I'),
('devid', 'I', 0x2),
('direction', 'I'),
('ep', 'I'),
('transfer_flags', 'I'),
('transfer_buffer_length', 'I'),
('start_frame', 'I'),
('number_of_packets', 'I'),
('interval', 'I'),
('setup', 'Q')
]
class StandardDeviceRequest(BaseStucture):
_fields_ = [
('bmRequestType', 'B'),
('bRequest', 'B'),
('wValue', 'H'),
('wIndex', 'H'),
('wLength', '<H')
]
class DeviceDescriptor(BaseStucture):
_fields_ = [
('bLength', 'B', 18),
('bDescriptorType', 'B', 1),
('bcdUSB', 'H', 0x1001),
('bDeviceClass', 'B'),
('bDeviceSubClass', 'B'),
('bDeviceProtocol', 'B'),
('bMaxPacketSize0', 'B'),
('idVendor', 'H'),
('idProduct', 'H'),
('bcdDevice', 'H'),
('iManufacturer', 'B'),
('iProduct', 'B'),
('iSerialNumber', 'B'),
('bNumConfigurations', 'B')
]
class DeviceConfigurations(BaseStucture):
_fields_ = [
('bLength', 'B', 9),
('bDescriptorType', 'B', 2),
('wTotalLength', 'H', 0x2200),
('bNumInterfaces', 'B', 1),
('bConfigurationValue', 'B', 1),
('iConfiguration', 'B', 0),
('bmAttributes', 'B', 0x80),
('bMaxPower', 'B', 0x32)
]
class InterfaceDescriptor(BaseStucture):
_fields_ = [
('bLength', 'B', 9),
('bDescriptorType', 'B', 4),
('bInterfaceNumber', 'B', 0),
('bAlternateSetting', 'B', 0),
('bNumEndpoints', 'B', 1),
('bInterfaceClass', 'B', 3),
('bInterfaceSubClass', 'B', 1),
('bInterfaceProtocol', 'B', 2),
('iInterface', 'B', 0)
]
class EndPoint(BaseStucture):
_fields_ = [
('bLength', 'B', 7),
('bDescriptorType', 'B', 0x5),
('bEndpointAddress', 'B', 0x81),
('bmAttributes', 'B', 0x3),
('wMaxPacketSize', 'H', 0x8000),
('bInterval', 'B', 0x0A)
]
class USBRequest():
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class USBDevice():
'''interfaces = [USBInterface(bInterfaceClass=0x3, bInterfaceSubClass=0x0, bInterfaceProtocol=0x0)]
speed=2
speed = 2
vendorID = 0xc410
productID = 0x0
bcdDevice = 0x0
bDeviceClass = 0x0
bDeviceSubClass = 0x0
bDeviceProtocol = 0x0
bNumConfigurations = 1
bConfigurationValue = 1
bNumInterfaces = 1'''
def __init__(self, container):
self.generate_raw_configuration()
self.usb_container = container
def generate_raw_configuration(self):
str = self.configurations[0].pack()
str += self.configurations[0].interfaces[0].pack()
str += self.configurations[0].interfaces[0].descriptions[0].pack()
str += self.configurations[0].interfaces[0].endpoints[0].pack()
self.all_configurations = str
def send_usb_req(self, usb_req, usb_res, usb_len, status=0):
self.connection.sendall(USBIPRETSubmit(command=0x3,
seqnum=usb_req.seqnum,
ep=0,
status=status,
actual_length=usb_len,
start_frame=0x0,
number_of_packets=0x0,
interval=0x0,
data=usb_res).pack())
def handle_get_descriptor(self, control_req, usb_req):
handled = False
#print "handle_get_descriptor {}".format(control_req.wValue,'n')
if control_req.wValue == 0x1: # Device
handled = True
ret=DeviceDescriptor(bDeviceClass=self.bDeviceClass,
bDeviceSubClass=self.bDeviceSubClass,
bDeviceProtocol=self.bDeviceProtocol,
bMaxPacketSize0=0x8,
idVendor=self.vendorID,
idProduct=self.productID,
bcdDevice=self.bcdDevice,
iManufacturer=0,
iProduct=0,
iSerialNumber=0,
bNumConfigurations=1).pack()
self.send_usb_req(usb_req, ret, len(ret))
elif control_req.wValue == 0x2: # configuration
handled = True
ret= self.all_configurations[:control_req.wLength]
self.send_usb_req(usb_req, ret, len(ret))
elif control_req.wValue == 0xA: # config status ???
handled = True
self.send_usb_req(usb_req,'',0,1)
return handled
def handle_set_configuration(self, control_req, usb_req):
handled = False
#print "handle_set_configuration {}".format(control_req.wValue,'n')
handled = True
self.send_usb_req(usb_req,'',0,0)
return handled
def handle_usb_control(self, usb_req):
control_req = StandardDeviceRequest()
control_req.unpack(int_to_hex_string(usb_req.setup))
handled = False
#print " UC Request Type {}".format(control_req.bmRequestType)
#print " UC Request {}".format(control_req.bRequest)
#print " UC Value {}".format(control_req.wValue)
#print " UCIndex {}".format(control_req.wIndex)
#print " UC Length {}".format(control_req.wLength)
if control_req.bmRequestType == 0x80: # Host Request
if control_req.bRequest == 0x06: # Get Descriptor
handled = self.handle_get_descriptor(control_req, usb_req)
if control_req.bRequest == 0x00: # Get STATUS
self.send_usb_req(usb_req, "\x01\x00", 2);
handled = True
if control_req.bmRequestType == 0x00: # Host Request
if control_req.bRequest == 0x09: # Set Configuration
handled = self.handle_set_configuration(control_req, usb_req)
if not handled:
self.handle_unknown_control(control_req, usb_req)
def handle_usb_request(self, usb_req):
if usb_req.ep == 0:
self.handle_usb_control(usb_req)
else:
self.handle_data(usb_req)
class USBContainer:
usb_devices = {}
attached_devices = {}
devices_count = 0
def add_usb_device(self, usb_device):
self.devices_count += 1
busID = '1-1.' + str(self.devices_count)
self.usb_devices[busID] = usb_device
self.attached_devices[busID] = False
def remove_usb_device(self, usb_device):
for busid, dev in self.usb_devices.iteritems():
if dev == usb_device:
del self.attached_devices[busid]
del self.usb_devices[busid]
break
self.devices_count -= 1
def detach_all(self):
self.attached_devices = {}
self.usb_devices = {}
self.devices_count = 0
def handle_attach(self, busid):
if (self.usb_devices[busid] != None):
busnum = int(busid[4:])
return OPREPImport(base=USBIPHeader(command=3, status=0),
usbPath='/sys/devices/pci0000:00/0000:00:01.2/usb1/' + busid,
busID=busid,
busnum=busnum,
devnum=2,
speed=2,
idVendor=self.usb_devices[busid].vendorID,
idProduct=self.usb_devices[busid].productID,
bcdDevice=self.usb_devices[busid].bcdDevice,
bDeviceClass=self.usb_devices[busid].bDeviceClass,
bDeviceSubClass=self.usb_devices[busid].bDeviceSubClass,
bDeviceProtocol=self.usb_devices[busid].bDeviceProtocol,
bNumConfigurations=self.usb_devices[busid].bNumConfigurations,
bConfigurationValue=self.usb_devices[busid].bConfigurationValue,
bNumInterfaces=self.usb_devices[busid].bNumInterfaces)
def handle_device_list(self):
devices = {}
i = 0
for busid, usb_dev in self.usb_devices.iteritems():
i += 1
devices['device' + str(i)] = [USBIPDevice(), USBIPDevice(
usbPath='/sys/devices/pci0000:00/0000:00:01.2/usb1/' + busid,
busID=busid,
busnum=i,
devnum=2,
speed=2,
idVendor=self.usb_devices[busid].vendorID,
idProduct=self.usb_devices[busid].productID,
bcdDevice=self.usb_devices[busid].bcdDevice,
bDeviceClass=self.usb_devices[busid].bDeviceClass,
bDeviceSubClass=self.usb_devices[busid].bDeviceSubClass,
bDeviceProtocol=self.usb_devices[busid].bDeviceProtocol,
bNumConfigurations=self.usb_devices[busid].bNumConfigurations,
bConfigurationValue=self.usb_devices[busid].bConfigurationValue,
bNumInterfaces=self.usb_devices[busid].bNumInterfaces,
interfaces=USBInterface(bInterfaceClass=self.usb_devices[busid].configurations[0].interfaces[0].bInterfaceClass,
bInterfaceSubClass=self.usb_devices[busid].configurations[0].interfaces[0].bInterfaceSubClass,
bInterfaceProtocol=self.usb_devices[busid].configurations[0].interfaces[0].bInterfaceProtocol)
)]
return OPREPDevList(devices, len(self.usb_devices))
def run(self, ip='0.0.0.0', port=3240):
#SocketServer.TCPServer.allow_reuse_address = True
self.server = SocketServer.ThreadingTCPServer((ip, port), USBIPConnection)
self.server.usbcontainer = self
self.server.serve_forever()
class USBIPConnection(SocketServer.BaseRequestHandler):
attached = False
attachedBusID = ''
def handle(self):
print '[' + bcolors.OKBLUE + 'USBIP' + bcolors.ENDC + '] New connection from', self.client_address
req = USBIPHeader()
while 1:
if not self.attached:
data = self.request.recv(8)
if not data:
break
req.unpack(data)
print '[' + bcolors.OKBLUE + 'USBIP' + bcolors.ENDC + '] Header packet is valid'
print '[' + bcolors.OKBLUE + 'USBIP' + bcolors.ENDC + '] Command is', hex(req.command)
if req.command == 0x8005:
print '[' + bcolors.OKBLUE + 'USBIP' + bcolors.ENDC + '] Querying device list'
self.request.sendall(self.server.usbcontainer.handle_device_list().pack())
elif req.command == 0x8003:
busid = self.request.recv(5).strip() # receive bus id
print '[' + bcolors.OKBLUE + 'USBIP' + bcolors.ENDC + '] Attaching to device with busid', busid
self.request.recv(27)
self.request.sendall(self.server.usbcontainer.handle_attach(str(busid)).pack())
self.attached = True
self.attachedBusID = busid
else:
if (not self.attachedBusID in self.server.usbcontainer.usb_devices):
self.request.close()
break
else:
#print '----------------'
#print 'handles requests'
command = self.request.recv(4)
if (command == 0x00000003):
cmd = USBIPCMDUnlink()
data = self.request.recv(cmd.size())
cmd.unpack(data)
print '[' + bcolors.OKBLUE + 'USBIP' + bcolors.ENDC + '] Detaching device with seqnum', cmd.seqnum
# We probably don't even need to handle that, the windows client doesn't even send this packet
else :
cmd = USBIPCMDSubmit()
data = self.request.recv(cmd.size())
cmd.unpack(data)
#print "usbip cmd {}".format(cmd.command,'x')
#print "usbip seqnum {}".format(cmd.seqnum,'x')
#print "usbip devid {}".format(cmd.devid,'x')
#print "usbip direction {}".format(cmd.direction,'x')
#print "usbip ep {}".format(cmd.ep,'x')
#print "usbip flags {}".format(cmd.transfer_flags,'x')
#print "usbip number of packets {}".format(cmd.number_of_packets,'x')
#print "usbip interval {}".format(cmd.interval,'x')
#print "usbip setup {}".format(cmd.setup,'x')
#print "usbip buffer lenght {}".format(cmd.transfer_buffer_length,'x')
usb_req = USBRequest(seqnum=cmd.seqnum,
devid=cmd.devid,
direction=cmd.direction,
ep=cmd.ep,
flags=cmd.transfer_flags,
numberOfPackets=cmd.number_of_packets,
interval=cmd.interval,
setup=cmd.setup,
data=data)
self.server.usbcontainer.usb_devices[self.attachedBusID].connection = self.request
try:
self.server.usbcontainer.usb_devices[self.attachedBusID].handle_usb_request(usb_req)
except:
print '[' + bcolors.FAIL + 'USBIP' + bcolors.ENDC + '] Connection with client ' + str(self.client_address) + ' ended'
break
self.request.close()
| 22,470 | 6,843 |
from hashlib import sha256
from typing import Union
from eth_typing import Hash32
def hash_eth2(data: Union[bytes, bytearray]) -> Hash32:
"""
Return SHA-256 hash of ``data``.
Note: it's a placeholder and we aim to migrate to a S[T/N]ARK-friendly hash function in
a future Ethereum 2.0 deployment phase.
"""
return Hash32(sha256(data).digest())
| 371 | 133 |
"""Contains functions to annotate macromolecular entities."""
from cogent.core.entity import HIERARCHY
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "mpc4p@virginia.edu"
__status__ = "Development"
def xtradata(data, entity):
"""Annotates an entity with data from a ``{full_id:data}`` dictionary. The
``data`` should also be a dictionary.
Arguments:
- data: a dictionary, which is a mapping of full_id's (keys) and data
dictionaries.
- entity: top-level entity, which contains the entities which will hold
the data."""
for full_id, data in data.iteritems():
sub_entity = entity
strip_full_id = [i for i in full_id if i is not None]
for short_id in strip_full_id:
sub_entity = sub_entity[(short_id,)]
sub_entity.xtra.update(data)
| 1,011 | 341 |
# Copyright (c) 2019, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from typing import List
class KafkaOffsetsHandler:
def store_or_update_Kafka_offset(self, topic_partition: str, offset_start: str, offset_until: str)->bool:
"""
Store or Update kafka topic offsets. Offsets are used to track what messages have been processed.
Args:
topic (str): name of the kafka topic
offset_start (str): starting of offset
offset_until (str): last processed offset
Raises:
ValueError: All params are required.
Exception: Cannot add/update kafka offsets because ERROR-MESSAGE
Returns:
bool: returns True if offsets are add/updated or throws an exception.
"""
if not topic_partition and not offset_start and not offset_until:
raise ValueError("All params are required.")
try:
qry = "REPLACE INTO " + self.kafkaOffsetsTable + " (topic, topic_partition, offset_start, offset_until) VALUES(%s, %s, %s, %s)"
vals = str(self.study_name), str(topic_partition), str(offset_start), json.dumps(offset_until)
self.execute(qry, vals, commit=True)
return True
except Exception as e:
raise Exception("Cannot add/update kafka offsets because "+str(e))
def get_kafka_offsets(self) -> List[dict]:
"""
Get last stored kafka offsets
Returns:
list[dict]: list of kafka offsets. This method will return empty list if topic does not exist and/or no offset is stored for the topic.
Raises:
ValueError: Topic name cannot be empty/None
Examples:
>>> CC = CerebralCortex("/directory/path/of/configs/")
>>> CC.get_kafka_offsets("live-data")
>>> [{"id","topic", "topic_partition", "offset_start", "offset_until", "offset_update_time"}]
"""
results = []
qry = "SELECT * from " + self.kafkaOffsetsTable + " where topic = %(topic)s order by id DESC"
vals = {'topic': str(self.study_name)}
rows = self.execute(qry, vals)
if rows:
for row in rows:
results.append(row)
return results
else:
return []
| 3,618 | 1,114 |
# -*- coding: utf-8 -*-
from celery.task import task
from webservice.mail import send_reply_mail
@task
def send_email_task(receiver, title, content, did, username, diary_title):
send_reply_mail(receiver, title, content, did, username, diary_title)
| 255 | 90 |
import os
import tensorflow as tf
import numpy as np
import rejection_network
class RejectionSystem():
def __init__(self):
self.dir_path = os.path.dirname(os.path.abspath(__file__))
self._train_dir = os.path.join(self.dir_path, "Data/Train/")
self._valid_dir = os.path.join(self.dir_path, "Data/Valid/")
# training setting
self._training_epoches = 100
self._number_of_minibatches = 20
self._rejection_net = rejection_network.Network()
self._initialize_training = True
self._debug = False
def load_data(self):
train_images = np.load(self._train_dir + "train_images.npy")
train_targets = np.load(self._train_dir + "train_targets.npy")
valid_images = np.load(self._valid_dir + "valid_images.npy")
valid_targets = np.load(self._valid_dir + "valid_targets.npy")
return train_images, train_targets, valid_images, valid_targets
def prepare_training_batches(self, inputs, targets):
data_amount = np.shape(targets)[0]
perm = np.arange(data_amount)
np.random.shuffle(perm)
inputs = inputs[perm]
targets = targets[perm]
inputs_batches = np.split(inputs, self._number_of_minibatches)
targets_batches = np.split(targets, self._number_of_minibatches)
return inputs_batches, targets_batches
def train_model(self, train_images, train_targets, valid_images, valid_targets):
TFgraph, images_placeholder, targets_placeholder, whether_training_placeholder, safety_scores, loss, train_step = self._rejection_net.build_rejection_network()
model_loc = self._rejection_net.model_loc
with TFgraph.as_default():
with tf.Session() as sess:
saver = tf.train.Saver()
if(self._initialize_training):
print("Initialize parameters and train from scratch...")
sess.run(tf.global_variables_initializer())
else:
print("Resume training on model loaded from {}...".format(model_loc))
saver.restore(sess, model_loc)
for i in range(1, self._training_epoches+1):
train_images_batches, train_targets_batches = self.prepare_training_batches(train_images, train_targets)
train_loss_avg = 0
for j in range(self._number_of_minibatches):
_, train_loss, train_scores = sess.run([train_step, loss, safety_scores], feed_dict={
images_placeholder: train_images_batches[j],
targets_placeholder: train_targets_batches[j],
whether_training_placeholder: True
})
train_loss_avg += train_loss/self._number_of_minibatches
valid_loss, valid_scores = sess.run([loss, safety_scores], feed_dict={
images_placeholder: valid_images,
targets_placeholder: valid_targets,
whether_training_placeholder: False
})
if(self._debug):
print(valid_scores)
print("{}/{} Epoch. Avg CE: Train {} | Valid {}".format(i, self._training_epoches, train_loss_avg, valid_loss))
saver.save(sess, model_loc)
print("Trained model saved at {}!".format(model_loc))
return
if(__name__=="__main__"):
rejection_system = RejectionSystem()
train_images, train_targets, valid_images, valid_targets = rejection_system.load_data()
print("Data Loading Completed!")
rejection_system.train_model(train_images, train_targets, valid_images, valid_targets)
| 3,790 | 1,097 |
import requests
import inflect
#create a file called secrets.py and place your googleAPI key in a var called youtube_api_key DO NOT POSTS THIS TO GITHUB
from lifxlan import *
# from random import randint, betavariate
from time import sleep
from examples.secrets import youtube_api_key
from pylifxtiles import actions
from pylifxtiles import objects
from pylifxtiles.alphanum import nums
from pylifxtiles import colors
channel_name = 'UCQHfJyIROQhDFUOJKVBiLog'
my_tile = 'T1'
def main():
target_tilechain = my_tile
lan = LifxLAN()
tilechain_lights = lan.get_tilechain_lights()
print(len(tilechain_lights))
if len(tilechain_lights) != 0:
for tile in tilechain_lights:
if tile.get_label() == target_tilechain:
print(tile.label)
# if tile.get_label() == 'TEST':
target_tilechain = tile
duration_ms = 1000
try:
# original_colors = reset_tiles(T1)
run = 0
target_color_map = actions.reset_tiles(target_tilechain)
original_colors = [actions.blank_tile()] * 5
objects.draw_youtube(target_tilechain, 0)
while (True):
# T1.set_tile_colors(0,youtube,rapid=True)
subs = get_subs(channel_name, youtube_api_key)
tile = 1
for number in subs:
blank_tile = actions.blank_tile()
print(number)
for led in nums[number]:
target_color_map[tile][led] = (colors.dblue, 65535, colors.fourty, 4900)
target_tilechain.set_tile_colors(tile, target_color_map[tile])
print(tile)
tile += 1
run += 1
print('This is run ' + str(run) + ' with ' + str(subs) + ' subscribers')
# sleeps for 1/2h
sleep(1200)
except KeyboardInterrupt:
print("Done.")
else:
print("No TileChain lights found.")
def get_subs(channel_name, api_key):
num_of_subs = []
data = requests.get(
"https://www.googleapis.com/youtube/v3/channels?part=statistics&id=" + channel_name + "&key=" + api_key)
subs = data.json()['items'][0]['statistics']['subscriberCount']
for i in subs:
p = inflect.engine()
num_of_subs.append(p.number_to_words(int(i)))
return num_of_subs
if __name__ == "__main__":
main() | 2,370 | 807 |
import socket
import boto3
from botocore.exceptions import ClientError
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def delete_route(client, vpn_endpoint, subnet, cidr):
client.delete_client_vpn_route(
ClientVpnEndpointId=vpn_endpoint,
TargetVpcSubnetId=subnet,
DestinationCidrBlock=cidr,
)
def create_route(client, event, cidr):
client.create_client_vpn_route(
ClientVpnEndpointId=event['ClientVpnEndpointId'],
DestinationCidrBlock=cidr,
TargetVpcSubnetId=event['TargetSubnet'],
Description=f"cfnvpn auto generated route for endpoint {event['Record']}. {event['Description']}"
)
def revoke_route_auth(client, event, cidr, group = None):
args = {
'ClientVpnEndpointId': event['ClientVpnEndpointId'],
'TargetNetworkCidr': cidr
}
if group is None:
args['RevokeAllGroups'] = True
else:
args['AccessGroupId'] = group
client.revoke_client_vpn_ingress(**args)
def authorize_route(client, event, cidr, group = None):
args = {
'ClientVpnEndpointId': event['ClientVpnEndpointId'],
'TargetNetworkCidr': cidr,
'Description': f"cfnvpn auto generated authorization for endpoint {event['Record']}. {event['Description']}"
}
if group is None:
args['AuthorizeAllGroups'] = True
else:
args['AccessGroupId'] = group
client.authorize_client_vpn_ingress(**args)
def get_routes(client, event):
response = client.describe_client_vpn_routes(
ClientVpnEndpointId=event['ClientVpnEndpointId'],
Filters=[
{
'Name': 'origin',
'Values': ['add-route']
}
]
)
routes = [route for route in response['Routes'] if event['Record'] in route['Description']]
logger.info(f"found {len(routes)} exisiting routes for {event['Record']}")
return routes
def get_rules(client, vpn_endpoint, cidr):
response = client.describe_client_vpn_authorization_rules(
ClientVpnEndpointId=vpn_endpoint,
Filters=[
{
'Name': 'destination-cidr',
'Values': [cidr]
}
]
)
return response['AuthorizationRules']
def handler(event,context):
# DNS lookup on the dns record and return all IPS for the endpoint
try:
cidrs = [ ip + "/32" for ip in socket.gethostbyname_ex(event['Record'])[-1]]
logger.info(f"resolved endpoint {event['Record']} to {cidrs}")
except socket.gaierror as e:
logger.exception(f"failed to resolve record {event['Record']}")
return 'KO'
client = boto3.client('ec2')
routes = get_routes(client, event)
for cidr in cidrs:
route = next((route for route in routes if route['DestinationCidr'] == cidr), None)
# if there are no existing routes for the endpoint cidr create a new route
if route is None:
try:
create_route(client, event, cidr)
if 'Groups' in event:
for group in event['Groups']:
authorize_route(client, event, cidr, group)
else:
authorize_route(client, event, cidr)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidClientVpnDuplicateRoute':
logger.error(f"route for CIDR {cidr} already exists with a different endpoint")
continue
raise e
# if the route already exists
else:
logger.info(f"route for cidr {cidr} is already in place")
# if the target subnet has changed in the payload, recreate the routes to use the new subnet
if route['TargetSubnet'] != event['TargetSubnet']:
logger.info(f"target subnet for route for {cidr} has changed, recreating the route")
delete_route(client, event['ClientVpnEndpointId'], route['TargetSubnet'], cidr)
create_route(client, event, cidr)
logger.info(f"checking authorization rules for the route")
# check the rules match the payload
rules = get_rules(client, event['ClientVpnEndpointId'], cidr)
existing_groups = [rule['GroupId'] for rule in rules]
if 'Groups' in event:
# remove expired rules not defined in the payload anymore
expired_rules = [rule for rule in rules if rule['GroupId'] not in event['Groups']]
for rule in expired_rules:
logger.info(f"removing expired authorization rule for group {rule['GroupId']} for route {cidr}")
revoke_route_auth(client, event, cidr, rule['GroupId'])
# add new rules defined in the payload
new_rules = [group for group in event['Groups'] if group not in existing_groups]
for group in new_rules:
logger.info(f"creating new authorization rule for group {rule['GroupId']} for route {cidr}")
authorize_route(client, event, cidr, group)
else:
# if amount of rules for the cidr is greater than 1 when no groups are specified in the payload
# we'll assume that all groups have been removed from the payload so we'll remove all existing rules and add a rule for allow all
if len(rules) > 1:
logger.info(f"creating an allow all rule for route {cidr}")
revoke_route_auth(client, event, cidr)
authorize_route(client, event, cidr)
# clean up any expired routes when the ips for an endpoint change
expired_routes = [route for route in routes if route['DestinationCidr'] not in cidrs]
for route in expired_routes:
logger.info(f"removing expired route {route['DestinationCidr']} for endpoint {event['Record']}")
try:
revoke_route_auth(client, event, route['DestinationCidr'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidClientVpnEndpointAuthorizationRuleNotFound':
pass
else:
raise e
try:
delete_route(client, event['ClientVpnEndpointId'], route['TargetSubnet'], route['DestinationCidr'])
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidClientVpnRouteNotFound':
pass
else:
raise e
return 'OK' | 6,031 | 1,843 |