id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
108442
|
import os
import json
from numbers import Number
from collections import Iterable, Mapping
from operator import itemgetter
from .config import set_class_path, JavaSettingsConstructorParams
set_class_path()
from jnius import autoclass, MetaJavaClass
# Java DataTypes
jMap = autoclass('java.util.HashMap')
jArrayList = autoclass('java.util.ArrayList')
jList = autoclass('java.util.List')
jInt = autoclass('java.lang.Integer')
jLong = autoclass('java.lang.Long')
jFloat = autoclass('java.lang.Float')
jDouble = autoclass('java.lang.Double')
jString = autoclass('java.lang.String')
jBoolean = autoclass('java.lang.Boolean')
# Custom Java Classes
Settings = autoclass('ai.digamma.entities.Settings')
Service = autoclass('ai.digamma.service.DateTimeExtractor')
SettingsBuilder = autoclass('ai.digamma.utils.SettingsBuilder')
class JavaPrimitive(object):
'''
Convert primitives to their corresponding Java-types based on size
'''
def __return_value(self, javaObj, isValue, attr):
if isValue:
return getattr(javaObj, attr)()
else:
return javaObj
def __call__(self, obj, isValue = False):
if isinstance(obj, int):
if isinstance(obj, bool):
return self.__return_value(jBoolean(obj), isValue, 'booleanValue')
if obj <= jInt.MAX_VALUE:
return self.__return_value(jInt(obj), isValue, 'intValue')
else:
return self.__return_value(jLong(obj), isValue, 'longValue')
elif isinstance(obj, float):
if obj < jFloat.MAX_VALUE:
return self.__return_value(jFloat(obj), isValue, 'floatValue')
else:
return self.__return_value(jDouble(obj), isValue, 'doubleValue')
elif isinstance(obj, str):
return jString(obj)
class JavaComposite(object):
def __init__(self):
self.primitives = (Number, str)
self.primitiveConverter = JavaPrimitive()
def __call__(self, obj, isValue = False):
'''
Recursively convert Python objects to composite Java oobjects (e.g. Java Map<String, Object>)
:param obj: Python object
'''
try:
if isinstance(obj, self.primitives):
return self.primitiveConverter(obj, isValue)
elif isinstance(obj.__class__, MetaJavaClass):
return obj
elif isinstance(obj, Mapping):
HashMap = jMap()
for key, value in obj.items():
hashMapKey = self(key, isValue)
hashMapValue = self(value, isValue = False)
HashMap.put(hashMapKey, hashMapValue)
return HashMap
elif isinstance(obj, Iterable):
JavaArrayList = jArrayList()
for element in obj:
temp = self(element)
JavaArrayList.add(temp)
return JavaArrayList
else:
return jString(str(obj))
except Exception as e:
print(repr(e))
raise e
class PySettings(object):
JavaSettings = Settings
Converter = JavaComposite()
def __init__(self, **kwargs):
self.kwargs = kwargs
self.build_java_settings_obj()
def build_java_settings_obj(self):
if self.kwargs:
temp = dict()
for param, value in self.kwargs.items():
temp[param] = self.Converter(value, isValue = True)
JavaParams = itemgetter(*JavaSettingsConstructorParams)(temp)
self.javaSettingsObj = self.JavaSettings(*JavaParams)
else:
self.javaSettingsObj = self.JavaSettings()
def __call__(self):
return self.javaSettingsObj
class PySettingsBuilder(object):
JavaSettingsBuilder = SettingsBuilder
Converter = JavaComposite()
def __init__(self, javaBuilderObj=None):
self.javaBuilderObj = javaBuilderObj if javaBuilderObj else self.JavaSettingsBuilder()
def __set_java_builder(self, newJavaBuilderObj):
self.javaBuilderObj = newJavaBuilderObj
return self
def build(self):
pySettings = PySettings()
pySettings.javaSettingsObj = self.javaBuilderObj.build()
return pySettings
def __getattr__(self, attr):
if hasattr(self.javaBuilderObj, attr):
def wrapper(*args, **kwargs):
args = [self.Converter(arg, isValue=True) for arg in args]
for key, value in kwargs.items():
kwargs[key] = self.Converter(value, isValue=True)
rez = getattr(self.javaBuilderObj, attr)(*args, **kwargs)
return self.__set_java_builder(rez)
return wrapper
raise AttributeError(attr)
class ExtractionService(object):
JavaService = Service
Converter = JavaComposite()
@classmethod
def extract(cls, text, settings = None):
if not isinstance(text, (str, jString)):
raise TypeError('Text argument should be of type str or java.lang.String. Got {0} instead'.format(type(text)))
if settings:
if not isinstance(settings, (PySettings, Settings)):
raise TypeError('Settings argument should be of type PySettings or ai.digamma.entities.Settings. Got {0} instead'.format(type(settings)))
elif isinstance(settings, PySettings):
settings = settings()
ServiceParams = (cls.Converter(text), cls.Converter(settings))
else:
ServiceParams = (cls.Converter(text),)
rez = cls.JavaService.extractJSON(*ServiceParams)
return json.loads(rez)
@classmethod
def extractFromCsv(cls, csvPath, outputPath, settings, separator = ','):
if not isinstance(settings, (PySettings, Settings)):
raise TypeError('Settings argument should be of type PySettings or ai.digamma.entities.Settings. Got {0} instead'.format(type(settings)))
elif isinstance(settings, PySettings):
settings = settings()
rez = cls.JavaService.extractJSONFromCsv(csvPath, separator, outputPath, settings)
return json.loads(rez)
if __name__=='__main__':
settings = (PySettingsBuilder()
.addRulesGroup('DurationGroup')
.excludeRules("holidaysRule")
.addUserDate("2017-10-23T18:40:40.931Z")
.addTimeZoneOffset("100")
.includeOnlyLatestDates(True)
.build()
)
text = "10-15 month"
rez = ExtractionService.extract(text, settings)
print(rez)
|
108444
|
from django.urls import path
from django.conf.urls import url
from project_first_app.views import *
urlpatterns = [
path(r'getowners/<int:ow_id>',detail,name='detail'),
path(r'allowners',show_owners,name='showowners'),
path(r'allcars', Show_cars.as_view(template_name="cars_list.html")),
path(r'createowners',createowner,name='createowner'),
path('createcars/', Car_create.as_view()),
]
|
108512
|
import FWCore.ParameterSet.Config as cms
ecalSCDynamicDPhiParametersESProducer = cms.ESProducer("EcalSCDynamicDPhiParametersESProducer",
# Parameters from the analysis by <NAME> [https://indico.cern.ch/event/949294/contributions/3988389/attachments/2091573/3514649/2020_08_26_Clustering.pdf]
# dynamic dPhi parameters depending on cluster energy and seed crystal eta
dynamicDPhiParameterSets = cms.VPSet(
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(2.),
yoffset = cms.double(0.0928887),
scale = cms.double(1.22321),
xoffset = cms.double(-0.260256),
width = cms.double(0.345852),
saturation = cms.double(0.12),
cutoff = cms.double(0.3)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(1.75),
yoffset = cms.double(0.05643),
scale = cms.double(1.60429),
xoffset = cms.double(-0.642352),
width = cms.double(0.458106),
saturation = cms.double(0.12),
cutoff = cms.double(0.45)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(1.479),
yoffset = cms.double(0.0497038),
scale = cms.double(0.975707),
xoffset = cms.double(-0.18149),
width = cms.double(0.431729),
saturation = cms.double(0.14),
cutoff = cms.double(0.55)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(0.),
yoffset = cms.double(0.0280506),
scale = cms.double(0.946048),
xoffset = cms.double(-0.101172),
width = cms.double(0.432767),
saturation = cms.double(0.14),
cutoff = cms.double(0.6)
)
)
)
|
108532
|
import unittest, penmon as pm
class Test(unittest.TestCase):
def test_daylight_hours(self):
station = pm.Station(41.42, 109)
day = station.day_entry(135)
day.temp_min = 19.5
day.temp_max = 28
self.assertEqual(day.daylight_hours(), 14.3, "daylighth_hours")
if __name__ == "__main__":
unittest.main()
|
108574
|
from pymol.cgo import *
from pymol import cmd
from random import random, seed
from chempy import cpv
# CGO cones
# first draw some walls
obj = [
COLOR, 1.0, 1.0, 1.0,
BEGIN, TRIANGLE_STRIP,
NORMAL, 0.0, 0.0, 1.0,
VERTEX, 0.0, 0.0, 0.0,
VERTEX, 10.0, 0.0, 0.0,
VERTEX, 0.0, 10.0, 0.0,
VERTEX, 10.0, 10.0, 0.0,
END,
BEGIN, TRIANGLE_STRIP,
NORMAL, 1.0, 0.0, 0.0,
VERTEX, 0.0, 0.0, 0.0,
VERTEX, 0.0, 10.0, 0.0,
VERTEX, 0.0, 0.0, 10.0,
VERTEX, 0.0, 10.0, 10.0,
END,
BEGIN, TRIANGLE_STRIP,
NORMAL, 0.0, 1.0, 0.0,
VERTEX, 0.0, 0.0, 0.0,
VERTEX, 0.0, 0.0, 10.0,
VERTEX, 10.0, 0.0, 0.0,
VERTEX, 10.0, 0.0, 10.0,
END
]
seed(0x1)
def random_conic(box, size, min_axis):
# return a random ellipsoid record of the form:
# [ ELLIPSOID, x_pos, y_pos, z_pos, size, x0, y0, z0, x1, y1, z2, x2, y2, z2 ]
# where the xyz vectors are orthogonal and of length 1.0 or less.
box = box - size
tmp0 = [ size + random() * box, size + random() * box, size + random() * box ]
tmp1 = cpv.random_vector()
tmp2 = cpv.scale(tmp1,box/10)
tmp1 = cpv.add(tmp2,tmp0)
return [ CONE,
tmp0[0], tmp0[1], tmp0[2], # coordinates
tmp1[0], tmp1[1], tmp1[2],
(abs(random())*0.4+0.2) * size, # radii
(abs(random())*0.1+0.01) * size,
random(), random(), random(), # colors
random(), random(), random(),
1.0, 1.0 ]
for count in range(50):
obj.extend( random_conic(10.0, 1.5, 0.2) )
# then we load it into PyMOL
cmd.load_cgo(obj,'cgo08')
# rotate the view
cmd.turn('y',-45)
cmd.turn('x',30)
# zoom out a bit
cmd.zoom('all', 2)
# move the read clipping plane back a bit to brighten things up
cmd.clip('far',-5)
|
108651
|
import pandas as pd
import glob
import os
import copy
'''
BankStatementAnalyzer --> To instantiate this class, following parameters are mandatory:
statementfolder ---> Local file system folder where your bank statements are present. The folder can contain
multiple files. All the files should have same header and the first line of the file should be the header.
File can be of .csv or .txt or any plain text file extension.
statementname --> A name for this Bank statement. Example values., creditcard, debit, savingaccount
columnmap --> User needs to provide the following column mapping for the following 3 columns of your csv as these are the important
columns in a bank statement and it will be used to analyze the data.
Consider your Bank statement contains these 3 columns in header
Posted Date, Payee, Amount
then, you need to provide the following json as columnmap
{"Posted Date": "posted_date", "Payee": "payee", "Amount": "amount"}
dateformat --> This the format in which the posted_date column data will have data
outputfolder --> This is the folder where the analyzed monthly splitup will be saved by the program. The user running the python script
should have access to this folder and the folder should have been already present.
'''
class BankStatementAnalyzer:
def __init__(self, statementfolder, statementname, columnmap, dateformat, outputfolder):
self.statementfolder = statementfolder
self.columnmap = columnmap
self.outputfolder = outputfolder
self.statementname = statementname
self.dateformat = dateformat
'''
This function gets the list of files present on the folder provided by the user
'''
def __getfilelist(self, statementfolder):
if os.path.isdir(statementfolder) is True:
return [f for f in glob.glob(statementfolder + "*") if os.path.isfile(f) is True]
else:
raise "Path provided is not a folder or check if the path name is properly ended with a /"
''''
This function gets the list of all files and creates a single dataframe consisting of data from all files.
'''
def __createdataframe(self):
transdataframe = None
filelist = self.__getfilelist(self.statementfolder)
for ind, itm in enumerate(filelist):
if ind is 0:
transdataframe = pd.read_csv(itm, parse_dates = True, infer_datetime_format= True )
transdataframe.rename(index=str, columns=self.columnmap, inplace=True)
else:
tempdataframe = pd.read_csv(itm)
tempdataframe.rename(index=str, columns=self.columnmap, inplace=True)
transdataframe = transdataframe.append(tempdataframe, ignore_index=True)
transdataframe = transdataframe.assign(
mon_year=(lambda x: pd.to_datetime(x.posted_date, format= self.dateformat).dt.year * 100 +
pd.to_datetime(x.posted_date, format= self.dateformat).dt.month))
return transdataframe
''''
This function splits the input data into months and stores it as csv files on the output folder
'''
def writeoutput(self):
if os.path.isdir(self.outputfolder) is True:
transdataframe = self.__createdataframe()
for i in transdataframe.mon_year.unique():
mon_year_mask = transdataframe['mon_year'].map(lambda x: x == i)
((transdataframe[mon_year_mask])[['posted_date','payee','amount']]).to_csv(self.outputfolder + str(i) + "_" + self.statementname + ".csv",index=False)
else:
raise "Path provided is not a folder or check if the path name is properly ended with a /"
''''
This function returns all the transactions that have positive value
'''
def grouptransbyposval(self):
transdataframe = self.__createdataframe()
pos_amt_mask = transdataframe['amount'].map(lambda x: x > 0)
posvaldataframe = (transdataframe[pos_amt_mask]).groupby(['mon_year'],
as_index=False)[["amount"]].sum()
posvaldataframe.set_index("mon_year", inplace=True)
return posvaldataframe
''''
This function returns all the transactions that have negative value
'''
def grouptransbynegval(self):
transdataframe = self.__createdataframe()
neg_amt_mask = transdataframe['amount'].map(lambda x: x < 0)
negvaldataframe = (transdataframe[neg_amt_mask]).groupby(['mon_year'], as_index=False)[["amount"]].sum()
negvaldataframe.set_index("mon_year", inplace=True)
return negvaldataframe
''''
This function groups transcation by payee
'''
def grouptransbypayee(self):
transdataframe = self.__createdataframe()
payeegroupbydf = ((transdataframe).groupby(['payee', 'mon_year'], as_index=False)[["amount"]]).sum()
return payeegroupbydf
''''
This static function can merge 2 BankStatementAnalyzer objects
'''
@staticmethod
def mergestatement(*statements):
returnstatement = None
for stmnt in statements:
if returnstatement is None:
returnstatement = copy.deepcopy(stmnt)
else:
returnstatement = returnstatement.add(stmnt, fill_value=0)
return returnstatement
|
108686
|
import os
import tempfile
import zipfile
from datetime import datetime, timedelta, timezone
import boto3
import pytz
from busshaming.models import Feed, FeedTimetable
from busshaming.data_processing import upsert_timetable_data
S3_BUCKET_NAME = os.environ.get('S3_BUCKET_NAME', 'busshaming-timetable-dumps')
FEED_TIMEZONE = pytz.timezone('Australia/Sydney')
def download_zip(timetable_feed, temp_dir):
client = boto3.client('s3')
file_prefix = f'{timetable_feed.feed.slug}/{timetable_feed.id}/'
last_processed_file = timetable_feed.last_processed_zip
if last_processed_file is not None:
response = client.list_objects_v2(Bucket=S3_BUCKET_NAME, Prefix=file_prefix, StartAfter=last_processed_file)
else:
response = client.list_objects_v2(Bucket=S3_BUCKET_NAME, Prefix=file_prefix)
if response['KeyCount'] != 0:
print(f'{response["KeyCount"]} new timetable data for {timetable_feed}')
key = response['Contents'][0]['Key']
print(f'Downloading file: {key}')
s3 = boto3.resource('s3')
tmp_path = os.path.join(temp_dir, key.split('/')[-1])
s3.Object(S3_BUCKET_NAME, key).download_file(tmp_path)
return tmp_path, key
print(f'No new timetable data for {timetable_feed}')
return None, None
def fill_tripdate_gap(feed, timetable_feed, until_time, temp_dir):
if not timetable_feed.last_processed_zip:
return
if not timetable_feed.processed_watermark:
timetable_feed.processed_watermark = datetime_from_s3_key(timetable_feed.last_processed_zip) + timedelta(days=13)
# If it's been a long time since the last timetable, we need to update the trip dates
# since we only fill them 2 weeks in advance
if timetable_feed.processed_watermark < until_time:
s3 = boto3.resource('s3')
tmp_path = os.path.join(temp_dir, timetable_feed.last_processed_zip.split('/')[-1])
s3.Object(S3_BUCKET_NAME, timetable_feed.last_processed_zip).download_file(tmp_path)
while timetable_feed.processed_watermark < until_time:
new_fetchtime = timetable_feed.processed_watermark
print(f'Updating tripdate gap from time: {new_fetchtime} using file {timetable_feed.last_processed_zip}')
success, limit = process_zip(feed, tmp_path, new_fetchtime)
if success:
timetable_feed.processed_watermark = limit
timetable_feed.save()
new_fetchtime = limit
print(f'Updated up to {limit} now.')
def datetime_from_s3_key(obj_key):
datestr = os.path.split(obj_key)[1].rstrip('.zip')
fetchtime = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=timezone.utc)
return fetchtime
def fetch_and_process_timetables():
feed = Feed.objects.get(slug='nsw-buses')
timetable_feeds = FeedTimetable.objects.filter(feed=feed, active=True).order_by('id').prefetch_related('feed')
for timetable_feed in timetable_feeds:
print(f'Looking at {timetable_feed}')
with tempfile.TemporaryDirectory() as temp_dir:
tmp_path, obj_key = download_zip(timetable_feed, temp_dir)
if tmp_path is not None:
# Fill potential trip date gap:
fill_tripdate_gap(feed, timetable_feed, datetime_from_s3_key(obj_key), temp_dir)
# Get datetime from filename
fetchtime = datetime_from_s3_key(obj_key)
success, limit = process_zip(feed, tmp_path, fetchtime)
if success:
timetable_feed.processed_watermark = limit
timetable_feed.last_processed_zip = obj_key
timetable_feed.save()
os.remove(tmp_path)
else:
# If there's no updates, we need to make sure we keep filling out the tripdates
# We need the timetable to be filled out up to 7 days from now.
last_week = datetime.utcnow().replace(tzinfo=timezone.utc) + timedelta(days=7)
fill_tripdate_gap(feed, timetable_feed, datetime.utcnow().replace(tzinfo=timezone.utc), temp_dir)
def process_zip(feed, tmp_path, fetchtime):
with zipfile.ZipFile(tmp_path) as zfile:
return upsert_timetable_data.process_zip(feed, zfile, fetchtime)
return False, None
|
108723
|
from unittest import mock
from nimoy.specification import Specification
class FeatureBlockRuleEnforcerSpec(Specification):
def where_function_is_called_before_feature(self):
with expect:
class SomeSpec(Specification):
def test_something(self, where_visited=False):
where_visited == True
def test_something_where(self, data_to_inject):
isinstance(data_to_inject, dict) == True
data_to_inject['where_visited'] = [True]
SomeSpec().test_something()
def normal_feature_is_called(self):
with expect:
class SomeSpec(Specification):
def test_something(self, where_visited=False):
where_visited == False
SomeSpec().test_something()
class InternalSpecificationMethodsSpec(Specification):
# By default the unittest mock fails when a mocked method begins with the string "assert" or "assret"
@staticmethod
def _mark_mock_as_unsafe(m):
m.return_value._mock_unsafe = True
def feature_block_context_is_returned(self):
with given:
class SomeSpec(Specification):
pass
spec = SomeSpec()
with when:
feature_block = spec._feature_block_context('jimbob')
with then:
feature_block.block_type == 'jimbob'
feature_block.thrown_exceptions == spec.thrown_exceptions
@mock.patch('nimoy.specification.Compare')
def internal_comparison_is_called(self, compare_mock):
with given:
class SomeSpec(Specification):
pass
with when:
SomeSpec()._compare('a', 'b', 'some_name')
with then:
1 * compare_mock.return_value.compare('a', 'b', 'some_name')
@mock.patch('nimoy.specification.PowerAssertions')
def internal_power_assertion_is_called(self, power_assert_mock):
with given:
InternalSpecificationMethodsSpec._mark_mock_as_unsafe(power_assert_mock)
class SomeSpec(Specification):
pass
with when:
SomeSpec()._power_assert({'a': 'b'})
with then:
1 * power_assert_mock.return_value.assert_and_raise({'a': 'b'})
@mock.patch('nimoy.specification.MockAssertions')
def internal_mock_assertion_is_performed(self, mock_assertions_mock):
with given:
InternalSpecificationMethodsSpec._mark_mock_as_unsafe(mock_assertions_mock)
class SomeSpec(Specification):
pass
some_mock = mock.Mock()
args = ['a', 'b']
with when:
SomeSpec()._assert_mock(1, some_mock, 'some_method', args)
with then:
1 * mock_assertions_mock.return_value.assert_mock(1, some_mock, 'some_method', args)
@mock.patch('nimoy.specification.ExceptionAssertions')
def internal_exception_assertion_is_performed(self, exception_assertions_mock):
with given:
InternalSpecificationMethodsSpec._mark_mock_as_unsafe(exception_assertions_mock)
class SomeSpec(Specification):
pass
spec = SomeSpec()
with when:
spec._exception_thrown(ArithmeticError)
with then:
1 * exception_assertions_mock.return_value.assert_exception(spec.thrown_exceptions, ArithmeticError)
|
108738
|
import requests
import Models.network
LEADERBOARD_HEROKU = 'https://lmtservice.herokuapp.com/leaderboard/'
HISTORY_HEROKU = 'https://lormaster.herokuapp.com/history/'
SEARCH_HEROKU = 'https://lormaster.herokuapp.com/search/'
TAG_HEROKU = 'https://lormaster.herokuapp.com/tag/'
class Heroku():
def __init__(self, leaderboard) -> None:
self.leaderboard = leaderboard
self.session = requests.Session()
def getTag(self, server, name):
tagLink = TAG_HEROKU + server + '/' + name
try:
tagRequest = self.session.get(
tagLink, proxies=Models.network.getProxy())
except requests.exceptions.RequestException as e:
print('getMatches error: ', e)
return None
if tagRequest.ok:
return tagRequest.json()
print(tagRequest.headers)
print(tagRequest.status_code)
return None
def getHistory(self, server, name, id):
leaderboardLink = HISTORY_HEROKU + server + '/' + name + '/' + id
try:
historyRequest = self.session.get(
leaderboardLink, proxies=Models.network.getProxy())
except requests.exceptions.RequestException as e:
print('getMatches error: ', e)
return None
if historyRequest.ok:
return historyRequest.json()
print(historyRequest.headers)
print(historyRequest.status_code)
return None
def getSearch(self, server, name, id):
detailLink = SEARCH_HEROKU + server + '/' + name + '/' + id
try:
detailRequest = self.session.get(
detailLink, proxies=Models.network.getProxy())
except requests.exceptions.RequestException as e:
print('getMatches error: ', e)
return None
if detailRequest.ok:
details = detailRequest.json()
for detail in details:
self.addPlayerInfo(detail, server)
return details
print(detailRequest.headers)
print(detailRequest.status_code)
return None
def addPlayerInfo(self, detail, server):
try:
playerNames = detail['playernames']
except Exception as e:
print('processMatchDetail error', e)
return detail
playernames = []
player_info = []
for name in playerNames:
fullName = name.split('#', 1)
name, tag = fullName[0], fullName[1]
rank, lp = self.leaderboard.checkRank(name, server)
playernames.append(name + '#' + tag)
player_info.append(
{'name': name, 'tag': tag, 'rank': rank, 'lp': lp})
detail['player_info'] = player_info
|
108764
|
from pyradioconfig.parts.bobcat.calculators.calc_aox import Calc_AoX_Bobcat
class calc_aox_viper(Calc_AoX_Bobcat):
pass
|
108774
|
from fastecdsa.curve import Curve
from fastecdsa.point import Point
from starkware.crypto.signature import (
ALPHA, BETA, CONSTANT_POINTS, EC_ORDER, FIELD_PRIME, N_ELEMENT_BITS_HASH, SHIFT_POINT)
curve = Curve(
'Curve0',
FIELD_PRIME,
ALPHA,
BETA,
EC_ORDER,
*SHIFT_POINT)
LOW_PART_BITS = 248
LOW_PART_MASK = 2**248 - 1
HASH_SHIFT_POINT = Point(*SHIFT_POINT, curve=curve)
P_0 = Point(*CONSTANT_POINTS[2], curve=curve)
P_1 = Point(*CONSTANT_POINTS[2 + LOW_PART_BITS], curve=curve)
P_2 = Point(*CONSTANT_POINTS[2 + N_ELEMENT_BITS_HASH], curve=curve)
P_3 = Point(*CONSTANT_POINTS[2 + N_ELEMENT_BITS_HASH + LOW_PART_BITS], curve=curve)
def process_single_element(element: bytes, p1, p2) -> Point:
assert len(element) == 32, 'Unexpected element length'
val = int.from_bytes(element, 'big', signed=False)
assert val < EC_ORDER, 'Element int value >= EC_ORDER'
high_nibble = val >> LOW_PART_BITS
low_part = val & LOW_PART_MASK
return low_part * p1 + high_nibble * p2
def pedersen_hash_func(x: bytes, y: bytes) -> bytes:
"""
Computes the Starkware version of the Pedersen hash of x and y.
The hash is defined by:
shift_point + x_low * P_0 + x_high * P1 + y_low * P2 + y_high * P3
where x_low is the 248 low bits of x, x_high is the 4 high bits of x and similarly for y.
shift_point, P_0, P_1, P_2, P_3 are constant points generated from the digits of pi.
"""
return (HASH_SHIFT_POINT + process_single_element(x, P_0, P_1) +
process_single_element(y, P_2, P_3)).x.to_bytes(32, 'big')
async def async_pedersen_hash_func(x: bytes, y: bytes) -> bytes:
"""
Async variant of pedersen_hash_func.
"""
return pedersen_hash_func(x, y)
|
108780
|
import logging
import os
import random
import socket
from typing import Tuple
import magic
import yaml
import utils
from validators import port_validation, check_port_open
LOGGER_FILE = "./logs/server.log"
# Настройки логирования
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
handlers=[logging.FileHandler(LOGGER_FILE)],
level=logging.INFO,
)
logger = logging.getLogger(__name__)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
def read_config() -> dict:
"""Чтение настроек из файла yaml"""
with open("settings.yml", "r") as file:
return yaml.safe_load(file)
class BrowserRequest:
"""Экземпляр запроса браузера"""
def __init__(self, data: bytes):
lines = []
# Удаляем все пробелы с запроса браузера
for d in data.decode("utf8", "replace").split("\n"):
line = d.strip()
if line:
lines.append(line)
self.method, self.path, self.http_version = lines.pop(0).split(" ")
self.info = {k: v for k, v in (line.split(": ") for line in lines)}
def __repr__(self) -> str:
return f"<BrowserRequest {self.method} {self.path} {self.http_version}>"
def __getattr__(self, name: str):
try:
return self.info["-".join([n.capitalize() for n in name.split("_")])]
except IndexError:
raise AttributeError(name)
class LocaleSocket:
"""Класс для работы с сокетами"""
def __init__(self, host="", port=80, buffer_size=1024, max_queued_connections=5):
self._connection = None
self._socket = None
self.host = host
self.port = port
self.buffer_size = buffer_size
self.max_queued_connections = max_queued_connections
def __repr__(self) -> str:
status = "closed" if self._socket is None else "open"
return f"<{status} ServerSocket {self.host}:{self.port}>"
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self):
assert self._socket is None, "ServerSocket уже открыт"
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self._socket.bind((self.host, self.port))
except Exception:
self.close()
raise
else:
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def close(self):
assert self._socket is not None, "Данный ServerSocket уже был закрыт"
if self._connection:
self._connection.close()
self._connection = None
self._socket.close()
self._socket = None
def listen(self) -> Tuple[BrowserRequest, str]:
assert (self._socket is not None), "ServerSocket должен быть открыт для получения данных"
self._socket.listen(self.max_queued_connections)
self._connection, address = self._socket.accept()
data = self._connection.recv(self.buffer_size)
return BrowserRequest(data), address[0]
def send(self, data: bytes):
assert self._socket is not None, "ServerSocket должен быть открыт для ответа"
self._connection.send(data)
self._connection.close()
class WebServer:
"""Класс сервера"""
STATUSES = {
200: "Ok",
404: "File not found",
403: "Forbidden"
}
def __init__(self, config: dict, port: int = 80):
"""
Инициализирует сервер
port -- порт, на котором разворачивается
homedir -- домашняя директория
"""
self.socket = LocaleSocket(port=port, buffer_size=config["buffer_size"])
self.homedir = os.path.abspath(config["homedir"])
def start(self):
"""Запуск web-сервера"""
self.socket.open()
logger.info(f"Запустили web-сервер на порту {self.socket.host}:{self.socket.port}, директория {self.homedir}")
while True:
self.new_client_request()
def stop(self):
"""Приостановка работы web-сервера"""
self.socket.close()
def router(self, path: str) -> Tuple[bytes, int, str]:
"""Роутер для ассоциации между путями и файлами"""
allowed_extensions = ["js", "html", "css", "png", "jpg"]
router_dict = {
"/": "index.html",
"/index.html": "index.html",
"/index": "index.html",
"/test": "cat.meow",
"/image": "image.jpg"
}
# Если такой маппинг действительно существует
if path in router_dict:
# Имя файла, которое запрашиваем
file_name = router_dict[path]
# Если это разрешенное имя файла
if file_name.split(".")[1] in allowed_extensions:
path_str = os.path.join(self.homedir, file_name)
mime = magic.Magic(mime=True)
mime_str = mime.from_file(path_str)
with open(path_str, "rb") as f:
return f.read(), 200, mime_str
# Ошибка 403
else:
with open(os.path.join(self.homedir, "403.html"), "rb") as f:
return f.read(), 403, "text/html"
# Если ничего подобного нет, то 404
else:
with open(os.path.join(self.homedir, "404.html"), "rb") as f:
return f.read(), 404, "text/html"
def new_client_request(self):
""""Обработка запроса клиента"""
cli_request, ip_addr = self.socket.listen()
path = cli_request.path
# Получаем результат существования файла от роутера
body, status_code, mime = self.router(path)
header = self.get_header(status_code, body, mime)
self.socket.send(header.encode() + body)
logger.info(
f"{utils.get_date()} -> {ip_addr}, {path} {status_code} - {cli_request.method} {cli_request.user_agent}")
def get_header(self, status_code: int, body: bytes, mime: str):
"""Получает заголовок для ответа сервера"""
return "\n".join(
[
f"HTTP/1.1 {status_code} {self.STATUSES[status_code]}",
f"Content-Type: {mime}",
f"Date: {utils.get_date()}",
f"Content-length: {len(body)}",
"Connection: close"
"Server: MyServer" "\n\n",
]
)
def main():
# Чтение конфигурации сервера
config = read_config()
default_port = config["default_port"]
port_input = input("Введите номер порта для сервера -> ")
# Тут проверка на то, занят ли порт
port_flag = port_validation(port_input, check_open=True)
if not port_flag:
port_input = default_port
# Если порт по-умолчанию уже занят, то перебираем свободные порты
if not check_port_open(default_port):
logger.info(
f"Порт по умолчанию {default_port} уже занят! Подбираем рандомный порт.."
)
stop_flag = False
current_port = None
while not stop_flag:
current_port = random.randint(49152, 65535)
logger.info(f"Сгенерировали рандомный порт {current_port}")
stop_flag = check_port_open(current_port)
port_input = current_port
logger.info(f"Выставили порт {port_input} по умолчанию")
web_server = WebServer(config=config, port=int(port_input))
web_server.start()
web_server.stop()
if __name__ == "__main__":
main()
|
108842
|
def poly(a, x):
val = 0
for ai in reversed(a):
val *= x
val += ai
return val
def diff(a):
return [a[i + 1] * (i + 1) for i in range(len(a) - 1)]
def divroot(a, x0):
b, a[-1] = a[-1], 0
for i in reversed(range(len(a) - 1)):
a[i], b = a[i + 1] * x0 + b, a[i]
a.pop()
return a
|
108875
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.classification import accuracy_score, f1_score
def prediction_score(train_X, train_y, test_X, test_y, metric, model):
# if the train labels are always the same
values_train = set(train_y)
if len(values_train) == 1:
# predict always that value
only_value_train = list(values_train)[0]
test_pred = np.ones_like(test_y) * only_value_train
# if the train labels have different values
else:
# create the model
if model == "random_forest_classifier":
m = RandomForestClassifier(n_estimators=10)
elif model == "logistic_regression":
m = LogisticRegression()
else:
raise Exception("Invalid model name.")
# fit and predict
m.fit(train_X, train_y)
test_pred = m.predict(test_X)
# calculate the score
if metric == "f1":
return f1_score(test_y, test_pred)
elif metric == "accuracy":
return accuracy_score(test_y, test_pred)
else:
raise Exception("Invalid metric name.")
|
108939
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateSystemPackagedVav
log = logging.getLogger(__name__)
class TestHvactemplateSystemPackagedVav(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatesystempackagedvav(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateSystemPackagedVav()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_system_availability_schedule_name = "object-list|System Availability Schedule Name"
obj.system_availability_schedule_name = var_system_availability_schedule_name
# real
var_supply_fan_maximum_flow_rate = 0.0001
obj.supply_fan_maximum_flow_rate = var_supply_fan_maximum_flow_rate
# real
var_supply_fan_minimum_flow_rate = 0.0
obj.supply_fan_minimum_flow_rate = var_supply_fan_minimum_flow_rate
# alpha
var_supply_fan_placement = "DrawThrough"
obj.supply_fan_placement = var_supply_fan_placement
# real
var_supply_fan_total_efficiency = 0.50005
obj.supply_fan_total_efficiency = var_supply_fan_total_efficiency
# real
var_supply_fan_delta_pressure = 0.0
obj.supply_fan_delta_pressure = var_supply_fan_delta_pressure
# real
var_supply_fan_motor_efficiency = 0.50005
obj.supply_fan_motor_efficiency = var_supply_fan_motor_efficiency
# real
var_supply_fan_motor_in_air_stream_fraction = 0.5
obj.supply_fan_motor_in_air_stream_fraction = var_supply_fan_motor_in_air_stream_fraction
# alpha
var_cooling_coil_type = "TwoSpeedDX"
obj.cooling_coil_type = var_cooling_coil_type
# object-list
var_cooling_coil_availability_schedule_name = "object-list|Cooling Coil Availability Schedule Name"
obj.cooling_coil_availability_schedule_name = var_cooling_coil_availability_schedule_name
# object-list
var_cooling_coil_setpoint_schedule_name = "object-list|Cooling Coil Setpoint Schedule Name"
obj.cooling_coil_setpoint_schedule_name = var_cooling_coil_setpoint_schedule_name
# real
var_cooling_coil_design_setpoint = 13.13
obj.cooling_coil_design_setpoint = var_cooling_coil_design_setpoint
# real
var_cooling_coil_gross_rated_total_capacity = 14.14
obj.cooling_coil_gross_rated_total_capacity = var_cooling_coil_gross_rated_total_capacity
# real
var_cooling_coil_gross_rated_sensible_heat_ratio = 0.75
obj.cooling_coil_gross_rated_sensible_heat_ratio = var_cooling_coil_gross_rated_sensible_heat_ratio
# real
var_cooling_coil_gross_rated_cop = 0.0001
obj.cooling_coil_gross_rated_cop = var_cooling_coil_gross_rated_cop
# alpha
var_heating_coil_type = "HotWater"
obj.heating_coil_type = var_heating_coil_type
# object-list
var_heating_coil_availability_schedule_name = "object-list|Heating Coil Availability Schedule Name"
obj.heating_coil_availability_schedule_name = var_heating_coil_availability_schedule_name
# object-list
var_heating_coil_setpoint_schedule_name = "object-list|Heating Coil Setpoint Schedule Name"
obj.heating_coil_setpoint_schedule_name = var_heating_coil_setpoint_schedule_name
# real
var_heating_coil_design_setpoint = 20.2
obj.heating_coil_design_setpoint = var_heating_coil_design_setpoint
# real
var_heating_coil_capacity = 21.21
obj.heating_coil_capacity = var_heating_coil_capacity
# real
var_gas_heating_coil_efficiency = 0.5
obj.gas_heating_coil_efficiency = var_gas_heating_coil_efficiency
# real
var_gas_heating_coil_parasitic_electric_load = 0.0
obj.gas_heating_coil_parasitic_electric_load = var_gas_heating_coil_parasitic_electric_load
# real
var_maximum_outdoor_air_flow_rate = 0.0
obj.maximum_outdoor_air_flow_rate = var_maximum_outdoor_air_flow_rate
# real
var_minimum_outdoor_air_flow_rate = 0.0
obj.minimum_outdoor_air_flow_rate = var_minimum_outdoor_air_flow_rate
# alpha
var_minimum_outdoor_air_control_type = "FixedMinimum"
obj.minimum_outdoor_air_control_type = var_minimum_outdoor_air_control_type
# object-list
var_minimum_outdoor_air_schedule_name = "object-list|Minimum Outdoor Air Schedule Name"
obj.minimum_outdoor_air_schedule_name = var_minimum_outdoor_air_schedule_name
# alpha
var_economizer_type = "FixedDryBulb"
obj.economizer_type = var_economizer_type
# alpha
var_economizer_lockout = "NoLockout"
obj.economizer_lockout = var_economizer_lockout
# real
var_economizer_maximum_limit_drybulb_temperature = 30.3
obj.economizer_maximum_limit_drybulb_temperature = var_economizer_maximum_limit_drybulb_temperature
# real
var_economizer_maximum_limit_enthalpy = 31.31
obj.economizer_maximum_limit_enthalpy = var_economizer_maximum_limit_enthalpy
# real
var_economizer_maximum_limit_dewpoint_temperature = 32.32
obj.economizer_maximum_limit_dewpoint_temperature = var_economizer_maximum_limit_dewpoint_temperature
# real
var_economizer_minimum_limit_drybulb_temperature = 33.33
obj.economizer_minimum_limit_drybulb_temperature = var_economizer_minimum_limit_drybulb_temperature
# object-list
var_supply_plenum_name = "object-list|Supply Plenum Name"
obj.supply_plenum_name = var_supply_plenum_name
# object-list
var_return_plenum_name = "object-list|Return Plenum Name"
obj.return_plenum_name = var_return_plenum_name
# alpha
var_supply_fan_partload_power_coefficients = "InletVaneDampers"
obj.supply_fan_partload_power_coefficients = var_supply_fan_partload_power_coefficients
# alpha
var_night_cycle_control = "StayOff"
obj.night_cycle_control = var_night_cycle_control
# object-list
var_night_cycle_control_zone_name = "object-list|Night Cycle Control Zone Name"
obj.night_cycle_control_zone_name = var_night_cycle_control_zone_name
# alpha
var_heat_recovery_type = "None"
obj.heat_recovery_type = var_heat_recovery_type
# real
var_sensible_heat_recovery_effectiveness = 0.5
obj.sensible_heat_recovery_effectiveness = var_sensible_heat_recovery_effectiveness
# real
var_latent_heat_recovery_effectiveness = 0.5
obj.latent_heat_recovery_effectiveness = var_latent_heat_recovery_effectiveness
# alpha
var_cooling_coil_setpoint_reset_type = "None"
obj.cooling_coil_setpoint_reset_type = var_cooling_coil_setpoint_reset_type
# alpha
var_heating_coil_setpoint_reset_type = "None"
obj.heating_coil_setpoint_reset_type = var_heating_coil_setpoint_reset_type
# alpha
var_dehumidification_control_type = "None"
obj.dehumidification_control_type = var_dehumidification_control_type
# object-list
var_dehumidification_control_zone_name = "object-list|Dehumidification Control Zone Name"
obj.dehumidification_control_zone_name = var_dehumidification_control_zone_name
# real
var_dehumidification_setpoint = 50.0
obj.dehumidification_setpoint = var_dehumidification_setpoint
# alpha
var_humidifier_type = "None"
obj.humidifier_type = var_humidifier_type
# object-list
var_humidifier_availability_schedule_name = "object-list|Humidifier Availability Schedule Name"
obj.humidifier_availability_schedule_name = var_humidifier_availability_schedule_name
# real
var_humidifier_rated_capacity = 0.0
obj.humidifier_rated_capacity = var_humidifier_rated_capacity
# real
var_humidifier_rated_electric_power = 0.0
obj.humidifier_rated_electric_power = var_humidifier_rated_electric_power
# object-list
var_humidifier_control_zone_name = "object-list|Humidifier Control Zone Name"
obj.humidifier_control_zone_name = var_humidifier_control_zone_name
# real
var_humidifier_setpoint = 50.0
obj.humidifier_setpoint = var_humidifier_setpoint
# alpha
var_sizing_option = "Coincident"
obj.sizing_option = var_sizing_option
# alpha
var_return_fan = "Yes"
obj.return_fan = var_return_fan
# real
var_return_fan_total_efficiency = 0.50005
obj.return_fan_total_efficiency = var_return_fan_total_efficiency
# real
var_return_fan_delta_pressure = 0.0
obj.return_fan_delta_pressure = var_return_fan_delta_pressure
# real
var_return_fan_motor_efficiency = 0.50005
obj.return_fan_motor_efficiency = var_return_fan_motor_efficiency
# real
var_return_fan_motor_in_air_stream_fraction = 0.5
obj.return_fan_motor_in_air_stream_fraction = var_return_fan_motor_in_air_stream_fraction
# alpha
var_return_fan_partload_power_coefficients = "InletVaneDampers"
obj.return_fan_partload_power_coefficients = var_return_fan_partload_power_coefficients
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].name, var_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].system_availability_schedule_name, var_system_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_maximum_flow_rate, var_supply_fan_maximum_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_minimum_flow_rate, var_supply_fan_minimum_flow_rate)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_placement, var_supply_fan_placement)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_total_efficiency, var_supply_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_delta_pressure, var_supply_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_motor_efficiency, var_supply_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_motor_in_air_stream_fraction, var_supply_fan_motor_in_air_stream_fraction)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_type, var_cooling_coil_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_availability_schedule_name, var_cooling_coil_availability_schedule_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_setpoint_schedule_name, var_cooling_coil_setpoint_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_design_setpoint, var_cooling_coil_design_setpoint)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_gross_rated_total_capacity, var_cooling_coil_gross_rated_total_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_gross_rated_sensible_heat_ratio, var_cooling_coil_gross_rated_sensible_heat_ratio)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_gross_rated_cop, var_cooling_coil_gross_rated_cop)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].heating_coil_type, var_heating_coil_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].heating_coil_availability_schedule_name, var_heating_coil_availability_schedule_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].heating_coil_setpoint_schedule_name, var_heating_coil_setpoint_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].heating_coil_design_setpoint, var_heating_coil_design_setpoint)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].heating_coil_capacity, var_heating_coil_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].gas_heating_coil_efficiency, var_gas_heating_coil_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].gas_heating_coil_parasitic_electric_load, var_gas_heating_coil_parasitic_electric_load)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].maximum_outdoor_air_flow_rate, var_maximum_outdoor_air_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].minimum_outdoor_air_flow_rate, var_minimum_outdoor_air_flow_rate)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].minimum_outdoor_air_control_type, var_minimum_outdoor_air_control_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].minimum_outdoor_air_schedule_name, var_minimum_outdoor_air_schedule_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].economizer_type, var_economizer_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].economizer_lockout, var_economizer_lockout)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].economizer_maximum_limit_drybulb_temperature, var_economizer_maximum_limit_drybulb_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].economizer_maximum_limit_enthalpy, var_economizer_maximum_limit_enthalpy)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].economizer_maximum_limit_dewpoint_temperature, var_economizer_maximum_limit_dewpoint_temperature)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].economizer_minimum_limit_drybulb_temperature, var_economizer_minimum_limit_drybulb_temperature)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].supply_plenum_name, var_supply_plenum_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].return_plenum_name, var_return_plenum_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].supply_fan_partload_power_coefficients, var_supply_fan_partload_power_coefficients)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].night_cycle_control, var_night_cycle_control)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].night_cycle_control_zone_name, var_night_cycle_control_zone_name)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].heat_recovery_type, var_heat_recovery_type)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].sensible_heat_recovery_effectiveness, var_sensible_heat_recovery_effectiveness)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].latent_heat_recovery_effectiveness, var_latent_heat_recovery_effectiveness)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].cooling_coil_setpoint_reset_type, var_cooling_coil_setpoint_reset_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].heating_coil_setpoint_reset_type, var_heating_coil_setpoint_reset_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].dehumidification_control_type, var_dehumidification_control_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].dehumidification_control_zone_name, var_dehumidification_control_zone_name)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].dehumidification_setpoint, var_dehumidification_setpoint)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].humidifier_type, var_humidifier_type)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].humidifier_availability_schedule_name, var_humidifier_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].humidifier_rated_capacity, var_humidifier_rated_capacity)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].humidifier_rated_electric_power, var_humidifier_rated_electric_power)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].humidifier_control_zone_name, var_humidifier_control_zone_name)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].humidifier_setpoint, var_humidifier_setpoint)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].sizing_option, var_sizing_option)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].return_fan, var_return_fan)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].return_fan_total_efficiency, var_return_fan_total_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].return_fan_delta_pressure, var_return_fan_delta_pressure)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].return_fan_motor_efficiency, var_return_fan_motor_efficiency)
self.assertAlmostEqual(idf2.hvactemplatesystempackagedvavs[0].return_fan_motor_in_air_stream_fraction, var_return_fan_motor_in_air_stream_fraction)
self.assertEqual(idf2.hvactemplatesystempackagedvavs[0].return_fan_partload_power_coefficients, var_return_fan_partload_power_coefficients)
|
108986
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .Model import Model
from .TransE import TransE
from .TransD import TransD
from .TransR import TransR
from .TransH import TransH
from .DistMult import DistMult
from .ComplEx import ComplEx
from .RESCAL import RESCAL
from .Analogy import Analogy
from .SimplE import SimplE
from .RotatE import RotatE
__all__ = [
'Model',
'TransE',
'TransD',
'TransR',
'TransH',
'DistMult',
'ComplEx',
'RESCAL',
'Analogy',
'SimplE',
'RotatE'
]
|
108992
|
class NCBaseError(Exception):
def __init__(self, message) -> None:
super(NCBaseError, self).__init__(message)
class DataTypeMismatchError(Exception):
def __init__(self, provided_data, place:str=None, required_data_type:str=None) -> None:
message = f"{provided_data} datatype isn't supported for {place}.\nRequired datatype is: {required_data_type}, got: {str(type(provided_data).__name__)}"
super(DataTypeMismatchError, self).__init__(message)
class InsufficientArgumentsError(Exception):
def __init__(self, message):
message = f"Insufficient arguments.\n{message}"
super(InsufficientArgumentsError, self).__init__(message)
class InvalidArgumentsError(Exception):
def __init__(self, message:str) -> None:
super(InvalidArgumentsError, self).__init__(message)
class DirectoryAlreadyExistsError(Exception):
def __init__(self,project_dir):
message = f"{project_dir} already exists at the location."
super(DirectoryAlreadyExistsError, self).__init__(message)
class ImportNameNotFoundError(Exception):
def __init__(self, location) -> None:
message = f"import_name notm provided for the sister app at {location}"
super(ImportNameNotFoundError, self).__init__(message)
class ConfigurationError(Exception):
def __init__(self, message) -> None:
super(ConfigurationError, self).__init__(message)
|
108994
|
from accessify.access import (
accessify,
private,
protected,
)
from accessify.interfaces import (
implements,
throws,
)
|
109029
|
import json
import os
import subprocess
import base64
import sys
from tree.node import Node
from util.config_parser import parse_configs
from util.file_iterator import process_directory
from util.variable_resolver import resolve_variables
from util.should_run import should_run
from values.array import handle_array_arg
from values.fixed import handle_fixed_arg
from values.none import handle_none_arg
from values.steps import handle_step_arg
value_handlers = {
"steps": handle_step_arg,
"array": handle_array_arg,
"none": handle_none_arg,
"fixed": handle_fixed_arg
}
config_files = []
process_directory(os.path.abspath("./tests"), config_files)
configs = parse_configs(config_files)
jar_root_dir = configs["base"]['jarRootDir']
t2_bin = configs["base"]['t2Bin']
jdk_dir = configs["base"]["jdk"]
partial_run = len(sys.argv) > 1
tests_to_run = []
if partial_run:
tests_to_run = str(sys.argv[1]).split(",")
for test in configs['tests']:
test_id = test['id']
if partial_run and not should_run(test_id, configs['tests_map'], configs['parents_map'],
tests_to_run):
print("skipping.." + test_id)
continue
jar_dir = jar_root_dir
if test['directory']['relativeToRoot']:
jar_dir += test['directory']['path']
else:
jar_dir = test['directory']['path']
jar = os.path.join(jar_dir, test['jar'])
class_name = test['className']
meta = {
"id": test_id,
"resultsFile": test['resultsFile'],
"args": []
} # benchmark metadata
root_node = Node()
for arg in test['args']:
if "omitInCSV" not in arg:
meta["args"].append({
"arg": arg['id'], # arg value can be read in java Config with this id
"column": arg['name']
})
value_type = arg['values']['type']
if value_type in value_handlers:
value_handlers[value_type](arg, root_node)
root_node.finalize_iteration()
leaf_nodes = []
root_node.collect_leaf_nodes(leaf_nodes)
existing_env = os.environ.copy()
if jdk_dir:
existing_env["JAVA_HOME"] = jdk_dir
command = ""
for leaf_node in leaf_nodes:
command = leaf_node.get_code("")
command = resolve_variables(leaf_node, command)
args = [t2_bin, "submit", "standalone", "jar", jar, class_name]
args.extend(command.strip().split(" "))
args.extend(["-bmeta", base64.b64encode(json.dumps(meta).encode("utf-8"))])
print("\nRunning twister2 job with following args...")
print(args)
# subprocess.run(args, env=existing_env)
p = subprocess.Popen(args, stdout=subprocess.PIPE, bufsize=1, env=existing_env)
for line in iter(p.stdout.readline, b''):
print(line)
p.stdout.close()
p.wait()
|
109041
|
import tkinter as tk
from ttkbootstrap import Style
from random import choice
root = tk.Tk()
root.minsize(500, 500)
style = Style('superhero')
def new_theme():
theme = choice(style.theme_names())
print(theme)
style.theme_use(theme)
btn = tk.Button(root, text='Primary')
btn.configure(command=new_theme)
btn.pack(padx=10, pady=10, fill=tk.BOTH, expand=tk.YES)
label = tk.Label(text="Hello world!")
label.pack(padx=10, pady=10)
text = tk.Text()
text.pack(padx=10, pady=10)
text.insert(tk.END, 'This is a demo of themes applied to regular tk widgets.')
frame = tk.Frame()
frame.pack(padx=10, pady=10, fill=tk.X)
cb1 = tk.Checkbutton(frame, text="Check 1")
cb1.pack(padx=10, pady=10, side=tk.LEFT)
cb1.invoke()
cb2 = tk.Checkbutton(frame, text="Check 2")
cb2.pack(padx=10, pady=10, side=tk.LEFT)
rb_var = tk.Variable(value=1)
rb1 = tk.Radiobutton(frame, text='Radio 1', value=1, variable=rb_var)
rb1.pack(padx=10, pady=10, side=tk.LEFT)
rb2 = tk.Radiobutton(frame, text='Radio 2', value=2, variable=rb_var)
rb2.pack(padx=10, pady=10, side=tk.LEFT)
frame2 = tk.LabelFrame(text="Items")
frame2.pack(padx=10, pady=10, fill=tk.X)
entry = tk.Entry(frame2)
entry.pack(padx=10, pady=10, side=tk.LEFT)
scale = tk.Scale(frame2, orient=tk.HORIZONTAL)
scale.set(25)
scale.pack(padx=10, pady=10, side=tk.LEFT)
sb = tk.Spinbox(frame2)
sb.pack(padx=10, pady=10, side=tk.LEFT)
lb = tk.Listbox(height=3)
lb.insert(tk.END, 'one', 'two', 'three')
lb.pack(padx=10, pady=10)
mb = tk.Menubutton(frame2, text="Hello world")
menu = tk.Menu(mb)
menu.add_checkbutton(label="Option 1")
menu.add_checkbutton(label="Option 2")
mb['menu'] = menu
mb.pack(padx=10, pady=10)
root.mainloop()
|
109054
|
from config.redfish1_0_config import config
from config.auth import *
from config.settings import *
from logger import Log
from json import loads, dumps
import pexpect
import pxssh
import subprocess
LOG = Log(__name__)
class Auth(object):
"""
Class to abstract python authentication functionality
"""
@staticmethod
def get_auth_token():
""" call /SessionService/Sessions to get auth_token """
resource_path = '/redfish/v1/SessionService/Sessions'
method = 'POST'
body_params = {
'UserName': 'admin',
'Password': '<PASSWORD>'
}
config.api_client.host = config.host_authed
config.api_client.call_api(resource_path, method, body=body_params)
return config.api_client.last_response.getheader('X-Auth-Token')
@staticmethod
def enable():
""" update config to enable auth """
if config.auth_enabled:
LOG.info('auth already enabled.')
config.api_client.default_headers['X-Auth-Token'] = Auth.get_auth_token()
config.api_client.host = config.host_authed + config.api_root
config.auth_enabled = True
LOG.info('Enable auth successfully.')
@staticmethod
def disable():
""" update config to disable auth """
if not config.auth_enabled:
LOG.info('auth already disabled.')
del config.api_client.default_headers['X-Auth-Token']
config.api_client.host = config.host + config.api_root
config.auth_enabled = False
LOG.info('Disable auth successfully.')
|
109091
|
from yacv.grammar import Production
from yacv.constants import YACV_EPSILON
from pprint import pformat
class AbstractSyntaxTree(object):
def __init__(self, *args):
if len(args) == 0:
self.root = None
self.desc = []
self.prod_id = None
self.node_id = None
if len(args) == 1:
if isinstance(args[0], Production):
self.root = args[0].lhs
desc = []
for symbol in args[0].rhs:
desc.append(AbstractSyntaxTree(symbol))
self.desc = desc
self.prod_id = None
elif isinstance(args[0], str):
self.root = args[0]
self.desc = []
self.prod_id = None
def __str__(self):
return '{}->{}'.format(self.root, pformat(self.desc))
def __repr__(self):
return str(self)
|
109096
|
from ext import parent
class A(parent):
def fn(self):
self.parent_fn()
a = A()
a.fn()
|
109120
|
import pictureobject_functions as pic_functions
class Picture_object():
"""Gives an interface to access the colors of a picture."""
def __init__(self, filepath, conf):
self.conf = conf
self.filepath = filepath
self.is_changed = False
self.filename = pic_functions.get_filename_from_path(filepath)
self.size = pic_functions.get_pixelcount(filepath)
self.unmutable_list = pic_functions.get_list_from_file(filepath)
self.reset_list()
def reset_list(self):
# it takes the unmutable_list and calls
# the necessary functions depending on the configuration
settings = self.conf.get_picture_import_settings()
if settings["reduce_colors"] is False:
colors = pic_functions.all_colors_to_palette(self.unmutable_list)
elif settings["maxcolors"][0] is True:
maximal_colors = settings["maxcolors"][1]
colors = pic_functions.maxcolors(
self.unmutable_list, maximal_colors)
elif settings["threshold"][0] is True:
percent = settings["threshold"][1]
colors = pic_functions.percent_threshold(
self.unmutable_list, percent, self.size)
elif settings["quantize"][0] is True:
maximal_colors = settings["quantize"][1]
colors = pic_functions.get_quantize(self.filepath, maximal_colors)
# add more checks if you want to add new options
self.mutable_list = colors
def get_colors(self):
# a getter for other classes
return self.mutable_list
|
109135
|
from . import basis
from . import prior
from . import lik
from . import inf
from .core import model
from .predictor import predictor
__all__ = ["basis", "prior", "lik", "inf", "model", "predictor"]
|
109147
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import (
CONF_COLOR_TEMPERATURE,
CONF_GAIN,
CONF_ID,
CONF_ILLUMINANCE,
CONF_GLASS_ATTENUATION_FACTOR,
CONF_INTEGRATION_TIME,
DEVICE_CLASS_ILLUMINANCE,
ICON_LIGHTBULB,
STATE_CLASS_MEASUREMENT,
UNIT_PERCENT,
ICON_THERMOMETER,
UNIT_KELVIN,
UNIT_LUX,
)
DEPENDENCIES = ["i2c"]
CONF_RED_CHANNEL = "red_channel"
CONF_GREEN_CHANNEL = "green_channel"
CONF_BLUE_CHANNEL = "blue_channel"
CONF_CLEAR_CHANNEL = "clear_channel"
tcs34725_ns = cg.esphome_ns.namespace("tcs34725")
TCS34725Component = tcs34725_ns.class_(
"TCS34725Component", cg.PollingComponent, i2c.I2CDevice
)
TCS34725IntegrationTime = tcs34725_ns.enum("TCS34725IntegrationTime")
TCS34725_INTEGRATION_TIMES = {
"auto": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_AUTO,
"2.4ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_2_4MS,
"24ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_24MS,
"50ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_50MS,
"101ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_101MS,
"120ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_120MS,
"154ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_154MS,
"180ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_180MS,
"199ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_199MS,
"240ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_240MS,
"300ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_300MS,
"360ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_360MS,
"401ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_401MS,
"420ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_420MS,
"480ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_480MS,
"499ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_499MS,
"540ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_540MS,
"600ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_600MS,
"614ms": TCS34725IntegrationTime.TCS34725_INTEGRATION_TIME_614MS,
}
TCS34725Gain = tcs34725_ns.enum("TCS34725Gain")
TCS34725_GAINS = {
"1X": TCS34725Gain.TCS34725_GAIN_1X,
"4X": TCS34725Gain.TCS34725_GAIN_4X,
"16X": TCS34725Gain.TCS34725_GAIN_16X,
"60X": TCS34725Gain.TCS34725_GAIN_60X,
}
color_channel_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_PERCENT,
icon=ICON_LIGHTBULB,
accuracy_decimals=1,
state_class=STATE_CLASS_MEASUREMENT,
)
color_temperature_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_KELVIN,
icon=ICON_THERMOMETER,
accuracy_decimals=1,
state_class=STATE_CLASS_MEASUREMENT,
)
illuminance_schema = sensor.sensor_schema(
unit_of_measurement=UNIT_LUX,
accuracy_decimals=1,
device_class=DEVICE_CLASS_ILLUMINANCE,
state_class=STATE_CLASS_MEASUREMENT,
)
CONFIG_SCHEMA = (
cv.Schema(
{
cv.GenerateID(): cv.declare_id(TCS34725Component),
cv.Optional(CONF_RED_CHANNEL): color_channel_schema,
cv.Optional(CONF_GREEN_CHANNEL): color_channel_schema,
cv.Optional(CONF_BLUE_CHANNEL): color_channel_schema,
cv.Optional(CONF_CLEAR_CHANNEL): color_channel_schema,
cv.Optional(CONF_ILLUMINANCE): illuminance_schema,
cv.Optional(CONF_COLOR_TEMPERATURE): color_temperature_schema,
cv.Optional(CONF_INTEGRATION_TIME, default="auto"): cv.enum(
TCS34725_INTEGRATION_TIMES, lower=True
),
cv.Optional(CONF_GAIN, default="1X"): cv.enum(TCS34725_GAINS, upper=True),
cv.Optional(CONF_GLASS_ATTENUATION_FACTOR, default=1.0): cv.float_range(
min=1.0
),
}
)
.extend(cv.polling_component_schema("60s"))
.extend(i2c.i2c_device_schema(0x29))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await i2c.register_i2c_device(var, config)
cg.add(var.set_integration_time(config[CONF_INTEGRATION_TIME]))
cg.add(var.set_gain(config[CONF_GAIN]))
cg.add(var.set_glass_attenuation_factor(config[CONF_GLASS_ATTENUATION_FACTOR]))
if CONF_RED_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_RED_CHANNEL])
cg.add(var.set_red_sensor(sens))
if CONF_GREEN_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_GREEN_CHANNEL])
cg.add(var.set_green_sensor(sens))
if CONF_BLUE_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_BLUE_CHANNEL])
cg.add(var.set_blue_sensor(sens))
if CONF_CLEAR_CHANNEL in config:
sens = await sensor.new_sensor(config[CONF_CLEAR_CHANNEL])
cg.add(var.set_clear_sensor(sens))
if CONF_ILLUMINANCE in config:
sens = await sensor.new_sensor(config[CONF_ILLUMINANCE])
cg.add(var.set_illuminance_sensor(sens))
if CONF_COLOR_TEMPERATURE in config:
sens = await sensor.new_sensor(config[CONF_COLOR_TEMPERATURE])
cg.add(var.set_color_temperature_sensor(sens))
|
109204
|
from functools import wraps
from typing import Any, Optional
from pedantic.type_checking_logic.check_docstring import _check_docstring
from pedantic.constants import ReturnType, F
from pedantic.models.decorated_function import DecoratedFunction
from pedantic.models.function_call import FunctionCall
from pedantic.env_var_logic import is_enabled
def pedantic(func: Optional[F] = None, require_docstring: bool = False) -> F:
"""
A PedanticException is raised if one of the following happened:
- The decorated function is called with positional arguments.
- The function has no type annotation for their return type or one or more parameters do not have type
annotations.
- A type annotation is incorrect.
- A type annotation misses type arguments, e.g. typing.List instead of typing.List[int].
- The documented arguments do not match the argument list or their type annotations.
Example:
>>> @pedantic
... def my_function(a: int, b: float, c: str) -> bool:
... return float(a) == b and str(b) == c
>>> my_function(a=42.0, b=14.0, c='hi')
Traceback (most recent call last):
...
pedantic.exceptions.PedanticTypeCheckException: In function my_function:
Type hint is incorrect: Argument a=42.0 of type <class 'float'> does not match expected type <class 'int'>.
>>> my_function(a=42, b=None, c='hi')
Traceback (most recent call last):
...
pedantic.exceptions.PedanticTypeCheckException: In function my_function:
Type hint is incorrect: Argument b=None of type <class 'NoneType'> does not match expected type <class 'float'>.
>>> my_function(a=42, b=42, c='hi')
Traceback (most recent call last):
...
pedantic.exceptions.PedanticTypeCheckException: In function my_function:
Type hint is incorrect: Argument b=42 of type <class 'int'> does not match expected type <class 'float'>.
>>> my_function(5, 4.0, 'hi')
Traceback (most recent call last):
...
pedantic.exceptions.PedanticCallWithArgsException: In function my_function:
Use kwargs when you call function my_function. Args: (5, 4.0, 'hi')
"""
def decorator(f: F) -> F:
if not is_enabled():
return f
decorated_func = DecoratedFunction(func=f)
if require_docstring or len(decorated_func.docstring.params) > 0:
_check_docstring(decorated_func=decorated_func)
@wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> ReturnType:
call = FunctionCall(func=decorated_func, args=args, kwargs=kwargs)
call.assert_uses_kwargs()
return call.check_types()
async def async_wrapper(*args: Any, **kwargs: Any) -> ReturnType:
call = FunctionCall(func=decorated_func, args=args, kwargs=kwargs)
call.assert_uses_kwargs()
return await call.async_check_types()
if decorated_func.is_coroutine:
return async_wrapper
else:
return wrapper
return decorator if func is None else decorator(f=func)
def pedantic_require_docstring(func: Optional[F] = None) -> F:
"""Shortcut for @pedantic(require_docstring=True) """
return pedantic(func=func, require_docstring=True)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False, optionflags=doctest.ELLIPSIS)
|
109214
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
from config import params, data, w2v
class RNN(nn.Module):
def __init__(self, params, data):
super(RNN, self).__init__()
self.params = params
self.data = data
self.BATCH_SIZE = params["BATCH_SIZE"]
self.SELECTION_SIZE = params["SELECTION_SIZE"]
self.MAX_SENT_LEN = params["MAX_SENT_LEN"]
self.WORD_DIM = params["WORD_DIM"]
self.VOCAB_SIZE = params["VOCAB_SIZE"]
self.CLASS_SIZE = params["CLASS_SIZE"]
self.FILTERS = params["FILTERS"]
self.FILTER_NUM = params["FILTER_NUM"]
self.DROPOUT_EMBED_PROB = params["DROPOUT_EMBED"]
self.DROPOUT_MODEL_PROB = params["DROPOUT_MODEL"]
self.EMBEDDING = params["EMBEDDING"]
self.input_size = self.WORD_DIM
self.hidden_size = params["HIDDEN_SIZE"]
self.hidden_layers = params["HIDDEN_LAYERS"]
self.output_size = params["CLASS_SIZE"]
self.NUM_EMBEDDINGS = self.VOCAB_SIZE + 2
assert (len(self.FILTERS) == len(self.FILTER_NUM))
if self.EMBEDDING != "random":
self.wv_matrix = w2v["w2v"]
self.init_model()
def init_model(self):
self.embed = nn.Embedding(self.NUM_EMBEDDINGS, self.WORD_DIM, padding_idx=self.VOCAB_SIZE + 1)
if self.EMBEDDING != "random":
self.embed.weight.data.copy_(torch.from_numpy(self.wv_matrix))
self.bigru = nn.GRU(self.WORD_DIM, self.hidden_size, dropout=self.DROPOUT_MODEL_PROB, num_layers=self.hidden_layers, bidirectional=True)
self.hidden2label = nn.Linear(self.hidden_size * 2, self.CLASS_SIZE)
self.dropout = nn.Dropout(self.DROPOUT_EMBED_PROB)
if self.params["CUDA"]:
self.cuda()
def forward(self, input):
if len(input.size()) == 1:
input = input.unsqueeze(0)
hidden = self.init_hidden(self.hidden_layers, len(input))
# print(hidden)
input = input.transpose(0, 1)
embed = self.embed(input)
# print(embed)
embed = self.dropout(embed) # add this reduce the acc
input = embed.view(len(input), embed.size(1), -1)
gru_out, hidden = self.bigru(input, hidden)
# gru_out = (59 x 25 x 2400)
gru_out = gru_out.permute(1, 2, 0)
# gru_out = (25 x 2400 x 59)
gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
# gru_out = (25 x 2400)
gru_out = F.relu(gru_out)
y = self.hidden2label(gru_out)
return y
def init_hidden(self, num_layers, batch_size):
hidden = Variable(torch.zeros(num_layers * 2, batch_size, self.hidden_size))
if self.params["CUDA"]:
hidden = hidden.cuda()
return hidden
"""
load word2vec pre trained vectors
"""
def load_word2vec(self):
print("loading word2vec...")
word_vectors = KeyedVectors.load_word2vec_format(
"GoogleNews-vectors-negative300.bin", binary=True)
wv_matrix = []
for word in self.data["vocab"]:
if word in word_vectors.vocab:
wv_matrix.append(word_vectors.word_vec(word))
else:
wv_matrix.append(
np.random.uniform(-0.01, 0.01, 300).astype("float32"))
# one for UNK and one for zero padding
wv_matrix.append(np.random.uniform(-0.01, 0.01, 300).astype("float32"))
wv_matrix.append(np.zeros(300).astype("float32"))
wv_matrix = np.array(wv_matrix)
return wv_matrix
|
109265
|
from dataviva.api.hedu.models import Ybu, Ybc_hedu, Yu, Yuc, Yc_hedu, Ybuc
from dataviva.api.attrs.models import University as uni, Course_hedu, Bra
from dataviva import db
from sqlalchemy.sql.expression import func, desc, not_
class University:
def __init__(self, university_id):
self._hedu = None
self._hedu_sorted_by_enrolled = None
self._hedu_sorted_by_entrants = None
self._hedu_sorted_by_graduates = None
self.university_id = university_id
if university_id is None:
self.max_year_query = db.session.query(func.max(Yu.year))
self.hedu_query = Yu.query.filter(Yu.year == self.max_year_query)
else:
self.max_year_query = db.session.query(
func.max(Yu.year)).filter_by(university_id=university_id)
self.hedu_query = Yu.query.filter(
Yu.university_id == self.university_id,
Yu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.first_or_404()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def __hedu_sorted_by_entrants__(self):
if not self._hedu_sorted_by_entrants:
self._hedu_sorted_by_entrants = self.__hedu_list__()
self._hedu_sorted_by_entrants.sort(
key=lambda hedu: hedu.entrants, reverse=True)
return self._hedu_sorted_by_entrants
def __hedu_sorted_by_graduates__(self):
if not self._hedu_sorted_by_graduates:
self._hedu_sorted_by_graduates = self.__hedu_list__()
self._hedu_sorted_by_graduates.sort(
key=lambda hedu: hedu.graduates, reverse=True)
return self._hedu_sorted_by_graduates
def name(self):
return self.__hedu__().university.name()
def university_type(self):
return self.__hedu__().university.school_type()
def enrolled(self):
return self.__hedu__().enrolled
def entrants(self):
return self.__hedu__().entrants
def graduates(self):
return self.__hedu__().graduates
def profile(self):
return self.__hedu__().university.desc_pt
def year(self):
return self.max_year_query.first()[0]
def highest_enrolled_number(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.enrolled
def highest_entrants_number(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.entrants
def highest_graduates_number(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.graduates
def highest_enrolled_by_university(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_university_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.university.name()
else:
return None
class UniversityMajors(University):
def __init__(self, university_id):
University.__init__(self, university_id)
self.max_year_query = db.session.query(func.max(Yuc.year))
self.hedu_query = Yuc.query.filter(
Yuc.university_id == self.university_id,
Yuc.year == self.max_year_query,
func.length(Yuc.course_hedu_id) == 6)
def major_with_more_enrollments(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.course_hedu.name()
def major_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.course_hedu.name()
def major_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.course_hedu.name()
class Major:
def __init__(self, course_hedu_id, bra_id):
self._hedu = None
self._hedu_sorted_by_enrolled = None
self._hedu_sorted_by_entrants = None
self._hedu_sorted_by_graduates = None
self._hedu_major_rank = None
self.course_hedu_id = course_hedu_id
self.bra_id = bra_id
if course_hedu_id is None and bra_id is None:
self.max_year_query = db.session.query(func.max(Yc_hedu.year))
self.hedu_query = Ybc_hedu.query.filter(Ybc_hedu.year == self.max_year_query)
else:
self.max_year_query = db.session.query(
func.max(Yc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id != '':
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.bra_id == self.bra_id,
Ybc_hedu.year == self.max_year_query)
else:
self.hedu_query = Yc_hedu.query.filter(
Yc_hedu.course_hedu_id == self.course_hedu_id,
Yc_hedu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.first_or_404()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def __hedu_sorted_by_entrants__(self):
if not self._hedu_sorted_by_entrants:
self._hedu_sorted_by_entrants = self.__hedu_list__()
self._hedu_sorted_by_entrants.sort(
key=lambda hedu: hedu.entrants, reverse=True)
return self._hedu_sorted_by_entrants
def __hedu_sorted_by_graduates__(self):
if not self._hedu_sorted_by_graduates:
self._hedu_sorted_by_graduates = self.__hedu_list__()
self._hedu_sorted_by_graduates.sort(
key=lambda hedu: hedu.graduates, reverse=True)
return self._hedu_sorted_by_graduates
def name(self):
return self.__hedu__().course_hedu.name()
def enrolled(self):
return self.__hedu__().enrolled
def entrants(self):
return self.__hedu__().entrants
def graduates(self):
return self.__hedu__().graduates
def profile(self):
return self.__hedu__().course_hedu.desc_pt
def year(self):
return self.__hedu__().year
def highest_enrolled_number(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.enrolled
def highest_entrants_number(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.entrants
def highest_graduates_number(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.graduates
def location_name(self):
return Bra.query.filter(Bra.id == self.bra_id).first().name()
def highest_enrolled_by_major(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_major_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.course_hedu.name()
else:
return None
class MajorUniversities(Major):
def __init__(self, course_hedu_id, bra_id):
Major.__init__(self, course_hedu_id, bra_id)
self.course_hedu_id = course_hedu_id
self.max_year_query = db.session.query(
func.max(Yuc.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id == '':
self.hedu_query = Yuc.query.filter(
Yuc.course_hedu_id == self.course_hedu_id,
Yuc.year == self.max_year_query)
else:
self.hedu_query = Ybuc.query.filter(
Ybuc.course_hedu_id == self.course_hedu_id,
Ybuc.bra_id == self.bra_id,
Ybuc.year == self.max_year_query)
def university_with_more_enrolled(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.university.name()
def university_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.university.name()
def university_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.university.name()
class MajorMunicipalities(Major):
def __init__(self, course_hedu_id, bra_id):
Major.__init__(self, course_hedu_id, bra_id)
self.course_hedu_id = course_hedu_id
self.max_year_query = db.session.query(
func.max(Ybc_hedu.year)).filter_by(course_hedu_id=course_hedu_id)
if bra_id == '':
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.year == self.max_year_query,
not_(Ybc_hedu.bra_id.like('0xx%')),
func.length(Ybc_hedu.bra_id) == 9)
else:
self.hedu_query = Ybc_hedu.query.filter(
Ybc_hedu.course_hedu_id == self.course_hedu_id,
Ybc_hedu.year == self.max_year_query,
Ybc_hedu.bra_id.like(self.bra_id+'%'),
not_(Ybc_hedu.bra_id.like('0xx%')),
func.length(Ybc_hedu.bra_id) == 9)
def municipality_with_more_enrolled(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.bra.name()
def municipality_with_more_enrolled_state(self):
hedu = self.__hedu_sorted_by_enrolled__()[0]
return hedu.bra.abbreviation
def municipality_with_more_entrants(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.bra.name()
def municipality_with_more_entrants_state(self):
hedu = self.__hedu_sorted_by_entrants__()[0]
return hedu.bra.abbreviation
def municipality_with_more_graduates(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.bra.name()
def municipality_with_more_graduates_state(self):
hedu = self.__hedu_sorted_by_graduates__()[0]
return hedu.bra.abbreviation
class LocationUniversity:
def __init__(self, bra_id):
self._hedu_sorted_by_enrolled = None
self._hedu = None
self.bra_id = bra_id
self.max_year_query = db.session.query(
func.max(Ybu.year)).filter_by(bra_id=bra_id)
self.hedu_query = Ybu.query.join(uni).filter(
Ybu.bra_id == self.bra_id,
Ybu.year == self.max_year_query)
def __hedu__(self):
if not self._hedu:
hedu_data = self.hedu_query.one()
self._hedu = hedu_data
return self._hedu
def __hedu_list__(self):
if not self._hedu:
hedu_data = self.hedu_query.all()
self._hedu = hedu_data
return self._hedu
def __hedu_sorted_by_enrolled__(self):
if not self._hedu_sorted_by_enrolled:
self._hedu_sorted_by_enrolled = self.__hedu_list__()
self._hedu_sorted_by_enrolled.sort(
key=lambda hedu: hedu.enrolled, reverse=True)
return self._hedu_sorted_by_enrolled
def year(self):
return self.max_year_query.first()[0]
def highest_enrolled_by_university(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_university_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.university.name()
else:
return None
class LocationMajor(LocationUniversity):
def __init__(self, bra_id):
LocationUniversity.__init__(self, bra_id)
self._hedu = None
self.bra_id = bra_id
self.max_year_query = db.session.query(
func.max(Ybc_hedu.year)).filter_by(bra_id=bra_id)
self.hedu_query = Ybc_hedu.query.join(Course_hedu).filter(
Ybc_hedu.bra_id == self.bra_id,
Ybc_hedu.course_hedu_id_len == 6,
Ybc_hedu.year == self.max_year_query)
def highest_enrolled_by_major(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.enrolled
else:
return None
def highest_enrolled_by_major_name(self):
hedu_list = self.__hedu_sorted_by_enrolled__()
if len(hedu_list) != 0:
hedu = hedu_list[0]
return hedu.course_hedu.name()
else:
return None
|
109299
|
from json import dumps
from .utils import debug_coro
from aiohttp import ClientSession
from ..logger import get_logger
logger = get_logger("LPBv2.Caller")
class Caller:
def __init__(self):
self.headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
@debug_coro
async def get(self, url):
try:
async with ClientSession(headers=self.headers) as session:
response = await session.get(url)
response_json = await response.json()
return response_json
except Exception as e:
logger.error(e)
@debug_coro
async def post(self, url, payload):
try:
async with ClientSession(headers=self.headers) as session:
response = await session.post(
url=url,
json=payload,
)
response_json = await response.json()
return response_json
except Exception as e:
logger.error(e)
|
109330
|
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.standard.hid import HIDService
from kmk.hid import AbstractHID
BLE_APPEARANCE_HID_KEYBOARD = 961
# Hardcoded in CPy
MAX_CONNECTIONS = 2
class BLEHID(AbstractHID):
def post_init(self, ble_name='KMK Keyboard', **kwargs):
self.conn_id = -1
self.ble = BLERadio()
self.ble.name = ble_name
self.hid = HIDService()
self.hid.protocol_mode = 0 # Boot protocol
# Security-wise this is not right. While you're away someone turns
# on your keyboard and they can pair with it nice and clean and then
# listen to keystrokes.
# On the other hand we don't have LESC so it's like shouting your
# keystrokes in the air
if not self.ble.connected or not self.hid.devices:
self.start_advertising()
self.conn_id = 0
@property
def devices(self):
'''Search through the provided list of devices to find the ones with the
send_report attribute.'''
if not self.ble.connected:
return []
result = []
# Security issue:
# This introduces a race condition. Let's say you have 2 active
# connections: Alice and Bob - Alice is connection 1 and Bob 2.
# Now Chuck who has already paired with the device in the past
# (this assumption is needed only in the case of LESC)
# wants to gather the keystrokes you send to Alice. You have
# selected right now to talk to Alice (1) and you're typing a secret.
# If Chuck kicks Alice off and is quick enough to connect to you,
# which means quicker than the running interval of this function,
# he'll be earlier in the `self.hid.devices` so will take over the
# selected 1 position in the resulted array.
# If no LESC is in place, Chuck can sniff the keystrokes anyway
for device in self.hid.devices:
if hasattr(device, 'send_report'):
result.append(device)
return result
def _check_connection(self):
devices = self.devices
if not devices:
return False
if self.conn_id >= len(devices):
self.conn_id = len(devices) - 1
if self.conn_id < 0:
return False
if not devices[self.conn_id]:
return False
return True
def hid_send(self, evt):
if not self._check_connection():
return
device = self.devices[self.conn_id]
while len(evt) < len(device._characteristic.value) + 1:
evt.append(0)
return device.send_report(evt[1:])
def clear_bonds(self):
import _bleio
_bleio.adapter.erase_bonding()
def next_connection(self):
self.conn_id = (self.conn_id + 1) % len(self.devices)
def previous_connection(self):
self.conn_id = (self.conn_id - 1) % len(self.devices)
def start_advertising(self):
advertisement = ProvideServicesAdvertisement(self.hid)
advertisement.appearance = BLE_APPEARANCE_HID_KEYBOARD
self.ble.start_advertising(advertisement)
def stop_advertising(self):
self.ble.stop_advertising()
|
109352
|
from setuptools import setup
from sapversion import version
setup(
name = 'sapling',
version = version(),
author = '<NAME>',
author_email = '<EMAIL>',
description = 'A git porcelain to manage bidirectional subtree syncing with foreign git '
'repositories',
license = 'Apache License Version 2.0',
url = 'http://github.com/jsirois/sapling',
provides = 'sapling',
install_requires = (
'gitdb >= 0.5.1',
'GitPython > 0.2, < 0.4',
),
packages = [ 'saplib', 'sapversion' ],
package_data = { 'sapversion': [ 'version.txt' ] },
scripts = [ 'sapling.py' ],
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
# TODO(jsirois): the sapling.py --install action is actually unix/symlink dependant -
# perhaps detect windows and just copy the sapling.py script to git-core/git-sap ?
'Operating System :: OS Independent',
'Topic :: Software Development :: Version Control'
],
)
|
109418
|
from django.db.models.query import QuerySet
from odata_query.grammar import ODataLexer, ODataParser # type: ignore
from .django_q import AstToDjangoQVisitor
def apply_odata_query(queryset: QuerySet, odata_query: str) -> QuerySet:
"""
Shorthand for applying an OData query to a Django QuerySet.
Args:
queryset: Django QuerySet to apply the OData query to.
odata_query: OData query string.
Returns:
QuerySet: The modified QuerySet
"""
lexer = ODataLexer()
parser = ODataParser()
model = queryset.model
ast = parser.parse(lexer.tokenize(odata_query))
transformer = AstToDjangoQVisitor(model)
where_clause = transformer.visit(ast)
if transformer.queryset_annotations:
queryset = queryset.annotate(**transformer.queryset_annotations)
return queryset.filter(where_clause)
|
109437
|
import unittest
import pyrtl
import pyrtl.corecircuits
from pyrtl.rtllib import aes, testingutils
class TestAESDecrypt(unittest.TestCase):
"""
Test vectors are retrieved from:
http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
"""
def setUp(self):
pyrtl.reset_working_block()
self.aes_decrypt = aes.AES()
self.in_vector = pyrtl.Input(bitwidth=128, name='in_vector')
self.out_vector = pyrtl.Output(bitwidth=128, name='out_vector')
def test_inv_shift_rows(self):
self.out_vector <<= self.aes_decrypt._inv_shift_rows(self.in_vector)
in_vals = [0x3e1c22c0b6fcbf768da85067f6170495, 0x2d6d7ef03f33e334093602dd5bfb12c7]
true_result = [0x3e175076b61c04678dfc2295f6a8bfc0, 0x2dfb02343f6d12dd09337ec75b36e3f0]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, true_result)
def test_inv_sub_bytes(self):
self.out_vector <<= self.aes_decrypt._sub_bytes(self.in_vector, True)
in_vals = [0x3e175076b61c04678dfc2295f6a8bfc0, 0x2dfb02343f6d12dd09337ec75b36e3f0]
true_result = [0xd1876c0f79c4300ab45594add66ff41f, 0xfa636a2825b339c940668a3157244d17]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, true_result)
def test_inv_mix_columns(self):
self.out_vector <<= self.aes_decrypt._mix_columns(self.in_vector, True)
in_vals = [0xe9f74eec023020f61bf2ccf2353c21c7, 0xbaa03de7a1f9b56ed5512cba5f414d23]
real_res = [0x54d990a16ba09ab596bbf40ea111702f, 0x3e1c22c0b6fcbf768da85067f6170495]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, real_res)
@unittest.skip
def test_key_expansion(self):
# This is not at all correct. Needs to be completely rewritten
self.out_vector <<=\
pyrtl.corecircuits.concat_list(self.aes_decrypt._key_gen(self.in_vector))
in_vals = [0xd1876c0f79c4300ab45594add66ff41f, 0xfa636a2825b339c940668a3157244d17]
true_result = [0x3e175076b61c04678dfc2295f6a8bfc0, 0x2dfb02343f6d12dd09337ec75b36e3f0]
calculated_result = testingutils.sim_and_ret_out(self.out_vector,
(self.in_vector,), (in_vals,))
self.assertEqual(calculated_result, true_result)
def test_aes_full(self):
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
self.out_vector <<= self.aes_decrypt.decryption(self.in_vector, aes_key)
ciphers = [0x3ad77bb40d7a3660a89ecaf32466ef97, 0x66e94bd4ef8a2c3b884cfa59ca342b2e]
keys = [0x2b7e151628aed2a6abf7158809cf4f3c, 0x0]
plain_text = [0x6bc1bee22e409f96e93d7e117393172a, 0x0]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector, aes_key),
(ciphers, keys))
self.assertEqual(calculated_result, plain_text)
def test_aes_state_machine(self):
# self.longMessage = True
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
reset = pyrtl.Input(1)
ready = pyrtl.Output(1, name='ready')
decrypt_ready, decrypt_out =\
self.aes_decrypt.decryption_statem(self.in_vector, aes_key, reset)
self.out_vector <<= decrypt_out
ready <<= decrypt_ready
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({
self.in_vector: 0x69c4e0d86a7b0430d8cdb78070b4c55a,
aes_key: 0x000102030405060708090a0b0c0d0e0f,
reset: 1
})
true_vals = [0x69c4e0d86a7b0430d8cdb78070b4c55a, 0x7ad5fda789ef4e272bca100b3d9ff59f,
0x54d990a16ba09ab596bbf40ea111702f, 0x3e1c22c0b6fcbf768da85067f6170495,
0xb458124c68b68a014b99f82e5f15554c, 0xe8dab6901477d4653ff7f5e2e747dd4f,
0x36339d50f9b539269f2c092dc4406d23, 0x2d6d7ef03f33e334093602dd5bfb12c7,
0x3bd92268fc74fb735767cbe0c0590e2d, 0xa7be1a6997ad739bd8c9ca451f618b61,
0x6353e08c0960e104cd70b751bacad0e7, 0x00112233445566778899aabbccddeeff,
0x00112233445566778899aabbccddeeff, ]
for cycle in range(1, 13): # Bogus data for while the state machine churns
sim.step({
self.in_vector: 0x0, aes_key: 0x1, reset: 0
})
circuit_out = sim_trace.trace[self.out_vector][cycle]
self.assertEqual(circuit_out, true_vals[cycle], "\nAssertion failed on cycle: "
+ str(cycle) + " Gotten value: " + hex(circuit_out))
for ready_signal in sim_trace.trace[ready][:11]:
self.assertEquals(ready_signal, 0)
for ready_signal in sim_trace.trace[ready][11:]:
self.assertEquals(ready_signal, 1)
class TestAESEncrypt(unittest.TestCase):
"""
Test vectors are retrieved from:
http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf
"""
def setUp(self):
pyrtl.reset_working_block()
self.aes_encrypt = aes.AES()
self.in_vector = pyrtl.Input(bitwidth=128, name='in_vector')
self.out_vector = pyrtl.Output(bitwidth=128, name='out_vector')
def test_shift_rows(self):
self.out_vector <<= self.aes_encrypt._shift_rows(self.in_vector)
in_vals = [0x3b59cb73fcd90ee05774222dc067fb68, 0xb415f8016858552e4bb6124c5f998a4c]
true_result = [0x3bd92268fc74fb735767cbe0c0590e2d, 0xb458124c68b68a014b99f82e5f15554c]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, true_result)
def test_sub_bytes(self):
self.out_vector <<= self.aes_encrypt._sub_bytes(self.in_vector)
in_vals = [0x4915598f55e5d7a0daca94fa1f0a63f7, 0xc62fe109f75eedc3cc79395d84f9cf5d]
true_result = [0x3b59cb73fcd90ee05774222dc067fb68, 0xb415f8016858552e4bb6124c5f998a4c]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, true_result)
def test_mix_columns(self):
self.out_vector <<= self.aes_encrypt._mix_columns(self.in_vector)
in_vals = [0x6353e08c0960e104cd70b751bacad0e7, 0xa7be1a6997ad739bd8c9ca451f618b61]
real_res = [0x5f72641557f5bc92f7be3b291db9f91a, 0xff87968431d86a51645151fa773ad009]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, real_res)
@unittest.skip
def test_key_expansion(self):
# This is not at all correct. Needs to be completely rewritten
self.out_vector <<= pyrtl.concat_list(self.aes_encrypt._key_gen(self.in_vector))
in_vals = [0x4c9c1e66f771f0762c3f868e534df256, 0xc57e1c159a9bd286f05f4be098c63439]
true_result = [0x3bd92268fc74fb735767cbe0c0590e2d, 0xb458124c68b68a014b99f82e5f15554c]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector,),
(in_vals,))
self.assertEqual(calculated_result, true_result)
def test_aes_full(self):
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
self.out_vector <<= self.aes_encrypt.encryption(self.in_vector, aes_key)
plain_text = [0x00112233445566778899aabbccddeeff, 0x0]
keys = [<KEY>, 0x0]
ciphers = [0x69c4e0d86a7b0430d8cdb78070b4c55a, 0x66e94bd4ef8a2c3b884cfa59ca342b2e]
calculated_result = testingutils.sim_and_ret_out(self.out_vector, (self.in_vector, aes_key),
(plain_text, keys))
self.assertEqual(calculated_result, ciphers)
def test_aes_state_machine(self):
# self.longMessage = True
aes_key = pyrtl.Input(bitwidth=128, name='aes_key')
reset = pyrtl.Input(1)
ready = pyrtl.Output(1, name='ready')
encrypt_ready, encrypt_out = self.aes_encrypt.encrypt_state_m(self.in_vector, aes_key,
reset)
self.out_vector <<= encrypt_out
ready <<= encrypt_ready
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
sim.step({
self.in_vector: 0x00112233445566778899aabbccddeeff,
aes_key: <KEY>,
reset: 1
})
true_vals = [0x00112233445566778899aabbccddeeff, 0x00102030405060708090a0b0c0d0e0f0,
0x89d810e8855ace682d1843d8cb128fe4, 0x4915598f55e5d7a0daca94fa1f0a63f7,
0xfa636a2825b339c940668a3157244d17, 0x247240236966b3fa6ed2753288425b6c,
0xc81677bc9b7ac93b25027992b0261996, 0xc62fe109f75eedc3cc79395d84f9cf5d,
0xd1876c0f79c4300ab45594add66ff41f, 0xfde3bad205e5d0d73547964ef1fe37f1,
0xbd6e7c3df2b5779e0b61216e8b10b689, 0x69c4e0d86a7b0430d8cdb78070b4c55a,
0x69c4e0d86a7b0430d8cdb78070b4c55a, ]
for cycle in range(1, 13): # Bogus data for while the state machine churns
sim.step({
self.in_vector: 0x0, aes_key: 0x1, reset: 0
})
circuit_out = sim_trace.trace[self.out_vector][cycle]
# sim_trace.render_trace(symbol_len=40)
self.assertEqual(circuit_out, true_vals[cycle], "\nAssertion failed on cycle: "
+ str(cycle) + " Gotten value: " + hex(circuit_out))
for ready_signal in sim_trace.trace[ready][:11]:
self.assertEquals(ready_signal, 0)
for ready_signal in sim_trace.trace[ready][11:]:
self.assertEquals(ready_signal, 1)
|
109462
|
import caffe.proto.caffe_pb2 as caffe_pb2
from os.path import join
from caffe_config import CaffeConfig
from to_string import to_string
class SolverConfig(CaffeConfig):
def __init__(self, base_lr=0.001, lr_policy="step", solver_type="SGD", step_size=10000, display=20, momentum=0.9, gamma=0.1, weight_decay=5e-4):
CaffeConfig.__init__(self)
self.base_lr=base_lr
self.solver_type=solver_type
self.step_size=step_size
self.display=display
self.momentum=momentum
self.gamma=gamma
self.weight_decay=weight_decay
self.lr_policy=lr_policy
self.snapshot_prefix=''
def path(self):
self.dir_exists_or_create()
return join(self.scen_dir, 'stage%s_%s_solver.prototxt' % (self.stage, self.net_type))
def generate(self, net):
if net == None:
raise Exception("Net not provided!")
self.setScenario(net.scenarios_dir, net.scenario)
self.stage=net.stage
self.net_type=net.network_type
s = caffe_pb2.SolverParameter()
# Specify locations of the train and (maybe) test networks.
s.train_net = net.path()
s.lr_policy=self.lr_policy
# The number of iterations over which to average the gradient.
# Effectively boosts the training batch size by the given factor, without
# affecting memory utilization.
s.iter_size = 1
# Solve using the stochastic gradient descent (SGD) algorithm.
# Other choices include 'Adam' and 'RMSProp'.
s.type = self.solver_type
# Set the initial learning rate for SGD.
s.base_lr = self.base_lr
# Set `lr_policy` to define how the learning rate changes during training.
# Here, we 'step' the learning rate by multiplying it by a factor `gamma`
# every `stepsize` iterations.
s.gamma = self.gamma
s.stepsize = self.step_size
# Set other SGD hyperparameters. Setting a non-zero `momentum` takes a
# weighted average of the current gradient and previous gradients to make
# learning more stable. L2 weight decay regularizes learning, to help prevent
# the model from overfitting.
s.momentum = self.momentum
s.weight_decay = self.weight_decay
# Display the current training loss and accuracy every 1000 iterations.
s.display = self.display
# Snapshots are files used to store networks we've trained. Here, we'll
# snapshot every 10K iterations -- ten times during training.
s.snapshot = 0
# Train on the GPU. Using the CPU to train large networks is very slow.
s.solver_mode = caffe_pb2.SolverParameter.GPU
s.snapshot_prefix=self.snapshot_prefix
return self.save(s)
def __repr__(self):
return to_string(self)
def generate(solver_config):
solver_config.stage=1
solver_config.stage = 1
solver_config.generate()
run=0
|
109465
|
import torch
try:
from torch.utils.data import IterableDataset
except ImportError:
class IterableDataset:
pass
class BatchChecker:
def __init__(self, data, init_counter=0):
self.counter = init_counter
self.data = data
self.true_batch = None
def check(self, batch):
self.true_batch = self.data[self.counter % len(self.data)]
self.counter += 1
res = self.true_batch == batch
return res.all() if not isinstance(res, bool) else res
class IterationCounter:
def __init__(self, start_value=1):
self.current_iteration_count = start_value
def __call__(self, engine):
assert engine.state.iteration == self.current_iteration_count
self.current_iteration_count += 1
class EpochCounter:
def __init__(self, start_value=1):
self.current_epoch_count = start_value
def __call__(self, engine):
assert engine.state.epoch == self.current_epoch_count
self.current_epoch_count += 1
def setup_sampler(sampler_type, num_iters, batch_size):
if sampler_type is None:
return None, batch_size
if sampler_type == "weighted":
from torch.utils.data.sampler import WeightedRandomSampler
w = torch.ones(num_iters * batch_size, dtype=torch.float)
for i in range(num_iters):
w[batch_size * i : batch_size * (i + 1)] += i * 1.0
return WeightedRandomSampler(w, num_samples=num_iters * batch_size, replacement=True), batch_size
if sampler_type == "distributed":
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
num_replicas = 1
rank = 0
if dist.is_available() and dist.is_initialized():
num_replicas = dist.get_world_size()
rank = dist.get_rank()
dataset = torch.zeros(num_iters * batch_size)
return DistributedSampler(dataset, num_replicas=num_replicas, rank=rank), batch_size // num_replicas
class MyIterableDataset(IterableDataset):
def __init__(self, start, end):
super(MyIterableDataset).__init__()
assert end > start, "this example code only works with end >= start"
self.start = start
self.end = end
def __iter__(self):
return iter(range(self.start, self.end))
def get_iterable_dataset(*args, **kwargs):
return MyIterableDataset(*args, **kwargs)
|
109471
|
import vessel_scoring.add_measures
import six
class BaseModel(object):
# def train_on_messages(self, messages):
# messages = AddMeasures(messages, self.windows)
# y_train = utils.is_fishy(train_data)
# model.fit(train_data, y_train)
# return model
def predict_messages(self, messages):
for msg in vessel_scoring.add_measures.AddMeasures(messages, self.windows):
if (msg.get('timestamp', None) is not None and
msg.get('speed', None) is not None and
msg.get('course', None) is not None):
msg['measure_new_score'] = float(self.predict_proba({
key: [value]
for key, value in six.iteritems(msg)
})[0][1])
yield msg
def dump_arg_dict(self):
return None
def dump_dict(self):
args = self.dump_arg_dict()
if args is None:
return None
model_class = type(self)
return {'model': "%s.%s" % (model_class.__module__, model_class.__name__),
'args': args,
}
|
109496
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class User(AbstractUser, models.Model):
email = models.EmailField(unique = True)
class Choices(models.Model):
choice = models.CharField(max_length=5000)
is_answer = models.BooleanField(default=False)
class Questions(models.Model):
question = models.CharField(max_length= 10000)
question_type = models.CharField(max_length=20)
required = models.BooleanField(default= False)
answer_key = models.CharField(max_length = 5000, blank = True)
score = models.IntegerField(blank = True, default=0)
feedback = models.CharField(max_length = 5000, null = True)
choices = models.ManyToManyField(Choices, related_name = "choices")
class Answer(models.Model):
answer = models.CharField(max_length=5000)
answer_to = models.ForeignKey(Questions, on_delete = models.CASCADE ,related_name = "answer_to")
class Form(models.Model):
code = models.CharField(max_length=30)
title = models.CharField(max_length=200)
description = models.CharField(max_length=10000, blank = True)
creator = models.ForeignKey(User, on_delete = models.CASCADE, related_name = "creator")
background_color = models.CharField(max_length=20, default = "#d9efed")
text_color = models.CharField(max_length=20, default="#272124")
collect_email = models.BooleanField(default=False)
authenticated_responder = models.BooleanField(default = False)
edit_after_submit = models.BooleanField(default=False)
confirmation_message = models.CharField(max_length = 10000, default = "Your response has been recorded.")
is_quiz = models.BooleanField(default=False)
allow_view_score = models.BooleanField(default= True)
createdAt = models.DateTimeField(auto_now_add = True)
updatedAt = models.DateTimeField(auto_now = True)
questions = models.ManyToManyField(Questions, related_name = "questions")
class Responses(models.Model):
response_code = models.CharField(max_length=20)
response_to = models.ForeignKey(Form, on_delete = models.CASCADE, related_name = "response_to")
responder_ip = models.CharField(max_length=30)
responder = models.ForeignKey(User, on_delete = models.CASCADE, related_name = "responder", blank = True, null = True)
responder_email = models.EmailField(blank = True)
response = models.ManyToManyField(Answer, related_name = "response")
|
109507
|
import hashlib
import hmac
from Crypto.Protocol.KDF import PBKDF2
def mnemonics_to_seed(seed, passphrase=b""):
salt = b"mn<PASSWORD>" + passphrase
def prf(p, s):
hx = hmac.new(p, msg=s, digestmod=hashlib.sha512)
return hx.digest()
res = PBKDF2(password=seed, salt=salt, dkLen=64, prf=prf, count=2048)
return res
|
109514
|
from tir import Webapp
import unittest
class CTBA211(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGACTB", "30/06/2015", "T1", "M PR 02 ", "34")
inst.oHelper.Program("CTBA211")
###########################################################################################
# Caso de teste 001 - Incluir Rateio #
# 29/08/2019 #
###########################################################################################
def test_CTBA211_001(self):
self.oHelper.ClickTree("Apuracao de Lucros / Perdas > Perguntas")
#Perguntes
self.oHelper.SetValue("mv_par01", "01062015")
self.oHelper.SetValue("mv_par02", "30062015")
self.oHelper.SetValue("mv_par03", "APUR15")
self.oHelper.SetValue("mv_par04", "001")
self.oHelper.SetValue("mv_par05", "000001")
self.oHelper.SetValue("mv_par06", "001")
self.oHelper.SetValue("mv_par07", "CTB211ELC")
self.oHelper.SetValue("mv_par08", "CTB211ELD")
self.oHelper.SetValue("Moedas ?", "Todas") #Todas / Específica
self.oHelper.SetValue("mv_par10", "01")
self.oHelper.SetValue("Considera Ent.Ponte ?", "Sim")
self.oHelper.SetValue("mv_par12", "1")
self.oHelper.SetValue("Considera Entidades ?", "Rotina de Apur.")
self.oHelper.SetValue("mv_par14", "CTB211CP")
self.oHelper.SetValue("mv_par15", "CTB211CA")
self.oHelper.SetValue("mv_par16", "")
self.oHelper.SetValue("mv_par17", "")
self.oHelper.SetValue("mv_par18", "")
self.oHelper.SetValue("mv_par19", "")
self.oHelper.SetValue("mv_par20", "")
self.oHelper.SetValue("mv_par21", "")
self.oHelper.SetValue("mv_par22", "")
self.oHelper.SetValue("mv_par23", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par24", "")
self.oHelper.SetValue("mv_par25", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par26", "")
self.oHelper.SetValue("mv_par27", "ZZZZZZZZZ")
self.oHelper.SetValue("Reproces. Saldos ?", "Sim")#
self.oHelper.SetValue("Seleciona Filiais ?", "Sim")#
self.oHelper.SetValue("mv_par30", "M PR 02")
self.oHelper.SetValue("mv_par31", "M PR 02")
self.oHelper.ClickIcon("Iniciar Execução")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Sim")
self.oHelper.SetButton("Sim")
self.oHelper.Program("CTBA211")
self.oHelper.AssertTrue()
def test_CTBA211_002(self):
self.oHelper.ClickTree("Apuracao de Lucros / Perdas > Perguntas")
#Perguntes
self.oHelper.SetValue("mv_par01", "01052014")
self.oHelper.SetValue("mv_par02", "31052014")
self.oHelper.SetValue("mv_par03", "")
self.oHelper.SetValue("mv_par04", "")
self.oHelper.SetValue("mv_par05", "")
self.oHelper.SetValue("mv_par06", "006")
self.oHelper.SetValue("mv_par07", "")
self.oHelper.SetValue("mv_par08", "")
self.oHelper.SetValue("Moedas ?", "Todas") #Todas / Específica
self.oHelper.SetValue("mv_par10", "")
self.oHelper.SetValue("Considera Ent.Ponte ?", "Sim")
self.oHelper.SetValue("mv_par12", "")
self.oHelper.SetValue("Considera Entidades ?", "Rotina de Apur.")
self.oHelper.SetValue("mv_par14", "")
self.oHelper.SetValue("mv_par15", "")
self.oHelper.SetValue("mv_par16", "")
self.oHelper.SetValue("mv_par17", "")
self.oHelper.SetValue("mv_par18", "")
self.oHelper.SetValue("mv_par19", "")
self.oHelper.SetValue("mv_par20", "")
self.oHelper.SetValue("mv_par21", "")
self.oHelper.SetValue("mv_par22", "")
self.oHelper.SetValue("mv_par23", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par24", "")
self.oHelper.SetValue("mv_par25", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par26", "")
self.oHelper.SetValue("mv_par27", "ZZZZZZZZZ")
self.oHelper.SetValue("Reproces. Saldos ?", "Sim")#
self.oHelper.SetValue("Seleciona Filiais ?", "Sim")#
self.oHelper.SetValue("mv_par30", "M PR 02")
self.oHelper.SetValue("mv_par31", "M PR 02")
self.oHelper.ClickIcon("Iniciar Execução")
self.oHelper.CheckHelp(text_help="CT210NOHP", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCT210LOT", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCTSUBLOT", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCT210DOC", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCT210CT", button="Fechar")
self.oHelper.CheckHelp(text_help="NO210TPSLD", button="Fechar")
self.oHelper.SetButton("Fechar")
self.oHelper.Program("CTBA211")
self.oHelper.AssertTrue()
def test_CTBA211_003(self):
self.oHelper.ClickTree("Apuracao de Lucros / Perdas > Perguntas")
#Perguntes
self.oHelper.SetValue("mv_par01", "")
self.oHelper.SetValue("mv_par02", "31052014")
self.oHelper.SetValue("mv_par03", "")
self.oHelper.SetValue("mv_par04", "")
self.oHelper.SetValue("mv_par05", "")
self.oHelper.SetValue("mv_par06", "")
self.oHelper.SetValue("mv_par07", "")
self.oHelper.SetValue("mv_par08", "")
self.oHelper.SetValue("Moedas ?", "Específica") #Todas / Específica
self.oHelper.SetValue("mv_par10", "01")
self.oHelper.SetValue("Considera Ent.Ponte ?", "Não")
self.oHelper.SetValue("mv_par12", "")
self.oHelper.SetValue("Considera Entidades ?", "Rotina de Apur.") #Cadastros
self.oHelper.SetValue("mv_par14", "")
self.oHelper.SetValue("mv_par15", "")
self.oHelper.SetValue("mv_par16", "")
self.oHelper.SetValue("mv_par17", "")
self.oHelper.SetValue("mv_par18", "")
self.oHelper.SetValue("mv_par19", "")
self.oHelper.SetValue("mv_par20", "")
self.oHelper.SetValue("mv_par21", "")
self.oHelper.SetValue("mv_par22", "")
self.oHelper.SetValue("mv_par23", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par24", "")
self.oHelper.SetValue("mv_par25", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par26", "")
self.oHelper.SetValue("mv_par27", "ZZZZZZZZZ")
self.oHelper.SetValue("Reproces. Saldos ?", "Sim")#
self.oHelper.SetValue("Seleciona Filiais ?", "Sim")#
self.oHelper.SetValue("mv_par30", "M PR 02")
self.oHelper.SetValue("mv_par31", "M PR 02")
self.oHelper.ClickIcon("Iniciar Execução")
self.oHelper.CheckHelp(text_help="CTHPVAZIO", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCT210LOT", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCTSUBLOT", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCT210DOC", button="Fechar")
self.oHelper.CheckHelp(text_help="NOCT210CT", button="Fechar")
self.oHelper.CheckHelp(text_help="NO210TPSLD", button="Fechar")
self.oHelper.SetButton("Fechar")
self.oHelper.Program("CTBA211")
self.oHelper.AssertTrue()
def test_CTBA211_004(self):
self.oHelper.ClickTree("Apuracao de Lucros / Perdas > Perguntas")
#Perguntes
self.oHelper.SetValue("mv_par01", "01052014")
self.oHelper.SetValue("mv_par02", "31052014")
self.oHelper.SetValue("mv_par03", "")
self.oHelper.SetValue("mv_par04", "")
self.oHelper.SetValue("mv_par05", "")
self.oHelper.SetValue("mv_par06", "")
self.oHelper.SetValue("mv_par07", "")
self.oHelper.SetValue("mv_par08", "")
self.oHelper.SetValue("Moedas ?", "Específica") #Todas / Específica
self.oHelper.SetValue("mv_par10", "")
self.oHelper.SetValue("Considera Ent.Ponte ?", "Não")
self.oHelper.SetValue("mv_par12", "")
self.oHelper.SetValue("Considera Entidades ?", "Rotina de Apur.") #Cadastros
self.oHelper.SetValue("mv_par14", "")
self.oHelper.SetValue("mv_par15", "")
self.oHelper.SetValue("mv_par16", "")
self.oHelper.SetValue("mv_par17", "")
self.oHelper.SetValue("mv_par18", "")
self.oHelper.SetValue("mv_par19", "")
self.oHelper.SetValue("mv_par20", "")
self.oHelper.SetValue("mv_par21", "")
self.oHelper.SetValue("mv_par22", "")
self.oHelper.SetValue("mv_par23", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par24", "")
self.oHelper.SetValue("mv_par25", "ZZZZZZZZZ")
self.oHelper.SetValue("mv_par26", "")
self.oHelper.SetValue("mv_par27", "ZZZZZZZZZ")
self.oHelper.SetValue("Reproces. Saldos ?", "Sim")#
self.oHelper.SetValue("Seleciona Filiais ?", "Sim")#
self.oHelper.SetValue("mv_par30", "M PR 02")
self.oHelper.SetValue("mv_par31", "M PR 02")
self.oHelper.ClickIcon("Iniciar Execução")
self.oHelper.AssertTrue()
self.oHelper.WaitShow("NOMOEDA")
# self.oHelper.SetButton("Fechar")
self.oHelper.CheckHelp(text_help="NOMOEDA",button="Fechar")
# self.oHelper.CheckHelp(text_problem="Moeda não preenchida e/ou não existente.",button="Fechar")
self.oHelper.WaitShow("TOTVS")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
109515
|
import mimetypes
import os
import boto3
BUCKET = "hc-flask-assets"
def write_to_s3(bucket_name: str, from_file: str, to_file: str, mimetype) -> None:
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).upload_file(
from_file, to_file, ExtraArgs={"ACL": "public-read", "ContentType": mimetype}
)
def get_mimetype(file):
mimetype, _ = mimetypes.guess_type(file)
if mimetype is None:
raise Exception("Failed to guess mimetype")
return mimetype
def load_assets():
for root, dirs, files in os.walk("static"):
if files:
s3_folder = root.replace("static/", "")
for file in files:
mimetype = get_mimetype(file)
write_to_s3(BUCKET, f"{root}/{file}", f"{s3_folder}/{file}", mimetype)
if __name__ == "__main__":
load_assets()
|
109549
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def python_dependencies_early():
http_archive(
name = "rules_python",
url = "https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
)
|
109565
|
import math
def buildSparseTable(arr, n):
for i in range(0, n):
lookup[i][0] = arr[i]
j = 1
while (1 << j) <= n:
i = 0
while (i + (1 << j) - 1) < n:
if (lookup[i][j - 1] <
lookup[i + (1 << (j - 1))][j - 1]):
lookup[i][j] = lookup[i][j - 1]
else:
lookup[i][j] = lookup[i + (1 << (j - 1))][j - 1]
i += 1
j += 1
def query(L, R):
j = int(math.log2(R - L + 1))
if lookup[L][j] <= lookup[R - (1 << j) + 1][j]:
return lookup[L][j]
else:
return lookup[R - (1 << j) + 1][j]
if __name__ == "__main__":
a = [7, 2, 3, 0, 5, 10, 3, 12, 18]
n = len(a)
MAX = 500
lookup = [[0 for i in range(MAX)] for j in range(MAX)]
buildSparseTable(a, n)
print(query(0, 4))
print(query(4, 7))
print(query(7, 8))
|
109574
|
from datetime import datetime
from django.conf import settings
import requests
import olympia.core.logger
from olympia.devhub.models import BlogPost
log = olympia.core.logger.getLogger('z.cron')
def update_blog_posts():
"""Update the blog post cache."""
items = requests.get(settings.DEVELOPER_BLOG_URL, timeout=10).json()
if not items:
return
BlogPost.objects.all().delete()
for item in items[:5]:
BlogPost.objects.create(
title=item['title']['rendered'],
date_posted=datetime.strptime(item['date'], '%Y-%m-%dT%H:%M:%S'),
permalink=item['link'],
)
log.info(f'Adding {BlogPost.objects.count():d} blog posts.')
|
109577
|
import daiquiri.core.env as env
FILES_BASE_PATH = env.get_abspath('FILES_BASE_PATH')
FILES_BASE_URL = env.get('FILES_BASE_URL')
|
109588
|
import sys
import django
django.setup()
from busshaming.data_processing import realtime_validator
if __name__ == '__main__':
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <route_id>')
sys.exit(1)
realtime_validator.validate_route(sys.argv[1])
|
109623
|
from clang.cindex import TranslationUnit
from tests.cindex.util import get_cursor
def test_comment():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
/// x
void test2(void);
void f() {
}
""")]
# make a comment-aware TU
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
test1 = get_cursor(tu, 'test1')
assert test1 is not None, "Could not find test1."
assert test1.type.is_pod()
raw = test1.raw_comment
brief = test1.brief_comment
assert raw == """/// Aaa."""
assert brief == """Aaa."""
test2 = get_cursor(tu, 'test2')
raw = test2.raw_comment
brief = test2.brief_comment
assert raw == """/// Bbb.\n/// x"""
assert brief == """Bbb. x"""
f = get_cursor(tu, 'f')
raw = f.raw_comment
brief = f.brief_comment
assert raw is None
assert brief is None
|
109638
|
import os
import numpy as np
from numpy.lib.stride_tricks import as_strided
import nibabel as nib
def nib_load(file_name):
proxy = nib.load(file_name)
data = proxy.get_data().astype('float32')
proxy.uncache()
return data
def crop(x, ksize, stride=3):
shape = (np.array(x.shape[:3]) - ksize)/stride + 1
shape = tuple(shape) + (ksize, )*3 + (x.shape[3], )
strides = np.array(x.strides[:3])*3
strides = tuple(strides) + x.strides
x = as_strided(x, shape=shape, strides=strides)
return x
modalities = ('flair', 't1ce', 't1', 't2')
root = '/home/thuyen/Data/brats17/Brats17TrainingData/'
file_list = root + 'file_list.txt'
subjects = open(file_list).read().splitlines()
subj = subjects[0]
name = subj.split('/')[-1]
path = os.path.join(root, subj, name + '_')
x0 = np.stack([
nib_load(path + modal + '.nii.gz') \
for modal in modalities], 3)
y0 = nib_load(path + 'seg.nii.gz')[..., None]
x0 = np.pad(x0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')
y0 = np.pad(y0, ((0, 0), (0, 0), (0, 1), (0, 0)), mode='constant')
x1 = crop(x0, 9)
x2 = crop(np.pad(x0, ((8, 8), (8, 8), (8, 8), (0, 0)), mode='constant'), 25)
x3 = crop(np.pad(x0, ((24, 24), (24, 24), (24, 24), (0, 0)), mode='constant'), 57)
y1 = crop(y0, 9)
m = x1.reshape(x1.shape[:3] + (-1, )).sum(3) > 0
x1 = x1[m]
x2 = x2[m]
x3 = x3[m]
y1 = y1[m]
print(x1.shape)
print(x2.shape)
print(x3.shape)
print(y1.shape)
|
109672
|
import os
import tuned.logs
from . import base
from tuned.utils.commands import commands
class strip(base.Function):
"""
Makes string from all arguments and strip it
"""
def __init__(self):
# unlimited number of arguments, min 1 argument
super(strip, self).__init__("strip", 0, 1)
def execute(self, args):
if not super(strip, self).execute(args):
return None
return "".join(args).strip()
|
109714
|
from enum import Enum
class ItemProperty(Enum):
"""
See crawl wiki for lists of these:
weapons: http://crawl.chaosforge.org/Brand
armour: http://crawl.chaosforge.org/Ego
"""
NO_PROPERTY = 0
# Melee Weapon Brands
Antimagic_Brand = 1
Chaos_Brand = 2
Disruption_Brand = 3
Distortion_Brand = 4
Dragon_slaying_Brand = 5
Draining_Brand = 6
Electrocution_Brand = 7
Flaming_Brand = 8
Freezing_Brand = 9
Holywrath_Brand = 10
Pain_Brand = 11
Necromancy_Brand = 12
Protection_Brand = 13
Reaping_Brand = 14
Speed_Brand = 15
Vampiricism_Brand = 16
Venom_Brand = 17
Vorpal_Brand = 18
# Thrown weapon brands
Dispersal_Brand = 19
Exploding_Brand = 20
Penetration_Brand = 21
Poisoned_Brand = 22
Returning_Brand = 23
Silver_Brand = 24
Steel_Brand = 25
# Needles
Confusion_Brand = 26
Curare_Brand = 27
Frenzy_Brand = 28
Paralysis_Brand = 29
Sleeping_Brand = 30
# Armour Properties (Egos)
Resistance_Ego = 31
Fire_Resistance_Ego = 32
Cold_Resistance_Ego = 33
Poison_Resistance_Ego = 34
Positive_Energy_Ego = 35
Protection_Ego = 36
Invisibility_Ego = 37
Magic_Resistance_Ego = 38
Strength_Ego = 39
Dexterity_Ego = 40
Intelligence_Ego = 41
Running_Ego = 42
Flight_Ego = 43
Stealth_Ego = 44
See_Invisible_Ego = 45
Archmagi_Ego = 46
Ponderousness_Ego = 47
Reflection_Ego = 48
Spirit_Shield_Ego = 49
Archery_Ego = 50
|
109721
|
import logging
import json
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
try:
req_body = req.get_json()
request_as_text = json.dumps(req_body, default=lambda o: o.__dict__)
logging.info(request_as_text)
except e as Exception:
logging.exception(e)
return func.HttpResponse(
"An error occurred.",
status_code=500
)
return func.HttpResponse(f"Success")
|
109732
|
from app.tests.utilities import selenium_utility
class SelectTracks(selenium_utility.SeleniumUtility):
_first_playlist = '//li[@data-toggle="collapse"][1]'
_track = '(//li[contains(@class, "track")])[1]'
_next_btn = '//button[@id="next-btn"]'
def __init__(self, driver):
self.driver = driver
self.first_playlists = None
from selenium.webdriver.common.action_chains import ActionChains
self.actions = ActionChains(driver)
super().__init__(driver)
def click_next_without_any_selection(self):
self.scroll_to_element(self._next_btn)
self.get_element(self._next_btn).click()
def get_alert_text(self):
alert = self.driver.switch_to_alert()
alert_text = alert.text
alert.accept()
return alert_text
def get_first_playlist(self):
self.first_playlists = self.wait_for_element(self._first_playlist)
def check_if_expanded(self):
self.get_first_playlist()
is_expanded = self.first_playlists.get_attribute('aria-expanded')
return True if is_expanded == 'true' else False
def expand_first_playlists(self):
self.first_playlists.click()
def click_on_first_track(self):
"""
clicking on the first track
"""
self.scroll_to_element(self._track)
track = self.get_element(self._track)
track.click()
def get_current_track_state(self):
"""
:return: True if track is selected else False
"""
track = self.get_element(self._track)
att = track.get_attribute('class')
return True if 'selected' in att else False
def click_next(self):
self.scroll_to_element(self._next_btn)
next_btn = self.get_element(self._next_btn)
next_btn.click()
|
109760
|
from unittest import TestCase
import unittest
from equadratures import *
import numpy as np
from copy import deepcopy
def model(x):
return x[0]**2 + x[1]**3 - x[0]*x[1]**2
class TestF(TestCase):
def test_tensor_grid_with_nans(self):
# Without Nans!
param = Parameter(distribution='uniform', lower=-1., upper=1., order=4)
basis = Basis('tensor-grid')
poly = Poly(parameters=[param, param], basis=basis, method='numerical-integration')
pts, wts = poly.get_points_and_weights()
model_evals = evaluate_model(pts, model)
poly.set_model(model_evals)
mean, variance = poly.get_mean_and_variance()
# With Nans!
model_evals_with_NaNs = deepcopy(model_evals)
indices_to_set_to_NaN = np.asarray([1, 3, 9, 13])
model_evals_with_NaNs[indices_to_set_to_NaN] = np.nan * indices_to_set_to_NaN.reshape(len(indices_to_set_to_NaN),1)
basis2 = Basis('tensor-grid')
poly2 = Poly(parameters=[param, param], basis=basis2, method='numerical-integration')
poly2.set_model(model_evals_with_NaNs)
mean, variance = poly2.get_mean_and_variance()
mean_with_nans, variance_with_nans = poly2.get_mean_and_variance()
# Verify!
np.testing.assert_almost_equal(mean, mean_with_nans, decimal=7, err_msg='Problem!')
np.testing.assert_almost_equal(variance, variance_with_nans, decimal=7, err_msg='Problem!')
if __name__== '__main__':
unittest.main()
|
109779
|
import unittest
from rdkit import Chem
from reinvent_chemistry.library_design import BondMaker, AttachmentPoints
from reaction_filters.reaction_filter_enum import ReactionFiltersEnum
from reaction_filters.reaction_filter import ReactionFilter
from running_modes.configurations import ReactionFilterConfiguration
from tests.unit_tests.fixtures.compounds import REACTION_SUZUKI, DECORATION_SUZUKI, SCAFFOLD_SUZUKI, SCAFFOLD_NO_SUZUKI, \
DECORATION_NO_SUZUKI
class TestNonSelectiveReactionFilters(unittest.TestCase):
def setUp(self):
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._enum = ReactionFiltersEnum()
reactions = {"0": [REACTION_SUZUKI]}
configuration = ReactionFilterConfiguration(type=self._enum.NON_SELECTIVE, reactions=reactions)
self.reaction_filter = ReactionFilter(configuration)
def test_with_suzuki_reagents(self):
scaffold = SCAFFOLD_SUZUKI
decoration = DECORATION_SUZUKI
scaffold = self._attachment_points.add_attachment_point_numbers(scaffold, canonicalize=False)
molecule: Chem.Mol = self._bond_maker.join_scaffolds_and_decorations(scaffold, decoration)
score = self.reaction_filter.evaluate(molecule)
self.assertEqual(1.0, score)
def test_with_non_suzuki_reagents(self):
scaffold = SCAFFOLD_NO_SUZUKI
decoration = DECORATION_NO_SUZUKI
scaffold = self._attachment_points.add_attachment_point_numbers(scaffold, canonicalize=False)
molecule: Chem.Mol = self._bond_maker.join_scaffolds_and_decorations(scaffold, decoration)
score = self.reaction_filter.evaluate(molecule)
self.assertEqual(0.0, score)
class TestNonSelectiveReactionFiltersNoReaction(unittest.TestCase):
def setUp(self):
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._enum = ReactionFiltersEnum()
reactions = {"1": []}
configuration = ReactionFilterConfiguration(type=self._enum.NON_SELECTIVE, reactions=reactions)
self.reaction_filter = ReactionFilter(configuration)
def test_with_suzuki_reagents(self):
scaffold = SCAFFOLD_SUZUKI
decoration = DECORATION_SUZUKI
scaffold = self._attachment_points.add_attachment_point_numbers(scaffold, canonicalize=False)
molecule: Chem.Mol = self._bond_maker.join_scaffolds_and_decorations(scaffold, decoration)
score = self.reaction_filter.evaluate(molecule)
self.assertEqual(1.0, score)
def test_with_any_reagents(self):
scaffold = SCAFFOLD_NO_SUZUKI
decoration = DECORATION_NO_SUZUKI
scaffold = self._attachment_points.add_attachment_point_numbers(scaffold, canonicalize=False)
molecule: Chem.Mol = self._bond_maker.join_scaffolds_and_decorations(scaffold, decoration)
score = self.reaction_filter.evaluate(molecule)
self.assertEqual(1.0, score)
|
109863
|
from pathlib import Path
current_dir = Path(__file__).parent.absolute()
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
# [2021-06-30] TD: Somehow I get segfault if I import pl_bolts *after* torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from torchvision import transforms, datasets
from src.utils.utils import get_logger
from src.utils.tuples import to_2tuple
# From https://github.com/PyTorchLightning/lightning-bolts/blob/bd392ad858039290c72c20cc3f10df39384e90b9/pl_bolts/transforms/dataset_normalizations.py#L20
def cifar10_normalization():
return transforms.Normalize(
mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]],
)
def cifar10_grayscale_normalization():
return transforms.Normalize(mean=122.6 / 255.0, std=61.0 / 255.0)
def cifar100_normalization():
return transforms.Normalize(
mean=[x / 255.0 for x in [129.3, 124.1, 112.4]],
std=[x / 255.0 for x in [68.2, 65.4, 70.4]],
)
def cifar100_grayscale_normalization():
return transforms.Normalize(mean=124.3 / 255.0, std=63.9 / 255.0)
# Adapted from https://github.com/PyTorchLightning/lightning-bolts/blob/master/pl_bolts/datamodules/cifar10_datamodule.py
class CIFAR10(CIFAR10DataModule):
default_image_size = (32, 32)
def __init__(self, data_dir=current_dir, sequential=False, grayscale=False,
data_augmentation=None, image_size=32, to_int=False, **kwargs):
super().__init__(data_dir, **kwargs)
self.data_augmentation = data_augmentation
self.grayscale = grayscale
self.sequential = sequential
self.to_int = to_int
self.image_size = to_2tuple(image_size)
logger = get_logger()
logger.info(f'Datamodule {self.__class__}: normalize={self.normalize}')
if to_int:
assert not self.normalize, 'to_int option is not compatible with normalize option'
self._set_augmentation()
self.dims = self._calculate_dimensions()
if to_int and grayscale:
self.vocab_size = 256
def default_transforms(self):
transform_list = [] if not self.grayscale else [transforms.Grayscale()]
transform_list.append(transforms.ToTensor())
if self.normalize:
transform_list.append(self.normalize_fn())
if self.to_int:
transform_list.append(transforms.Lambda(lambda x: (x * 255).long()))
if self.sequential:
# If grayscale and to_int, it makes more sense to get rid of the channel dimension
transform_list.append(Rearrange('1 h w -> (h w)') if self.grayscale and self.to_int
else Rearrange('c h w -> (h w) c'))
return transforms.Compose(transform_list)
def normalize_fn(self):
return cifar10_normalization() if not self.grayscale else cifar10_grayscale_normalization()
def _set_augmentation(self, data_augmentation=None):
assert data_augmentation in [None, 'standard', 'autoaugment']
augment_list = []
if self.image_size != self.default_image_size:
augment_list.append(transforms.Resize(self.image_size))
self.val_transforms = self.test_transforms = transforms.Compose(
augment_list + self.default_transforms().transforms
)
if data_augmentation is not None:
if data_augmentation == 'standard':
augment_list += [
transforms.RandomCrop(self.image_size, padding=4),
transforms.RandomHorizontalFlip(),
]
elif data_augmentation == 'autoaugment':
from src.utils.autoaug import CIFAR10Policy
augment_list += [CIFAR10Policy()]
# By default it only converts to Tensor and normalizes
self.train_transforms = transforms.Compose(augment_list
+ self.default_transforms().transforms)
def _calculate_dimensions(self):
nchannels = 3 if not self.grayscale else 1
if not self.sequential:
return (nchannels, self.image_size[0], self.image_size[1])
else:
length = self.image_size[0] * self.image_size[1]
return (length, nchannels) if not (self.grayscale and self.to_int) else (length,)
class CIFAR100(CIFAR10):
dataset_cls = datasets.CIFAR100
@property
def num_classes(self):
return 100
def normalize_fn(self):
return (cifar100_normalization() if not self.grayscale
else cifar100_grayscale_normalization())
|
109947
|
import enolib
def test_querying_a_missing_field_on_the_document_when_all_elements_are_required_raises_the_expected_validationerror():
error = None
input = ("")
try:
document = enolib.parse(input)
document.all_elements_required()
document.field('field')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The field 'field' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
def test_querying_a_missing_fieldset_on_the_document_when_all_elements_are_required_raises_the_expected_validationerror():
error = None
input = ("")
try:
document = enolib.parse(input)
document.all_elements_required()
document.fieldset('fieldset')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The fieldset 'fieldset' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
def test_querying_a_missing_list_on_the_document_when_all_elements_are_required_raises_the_expected_validationerror():
error = None
input = ("")
try:
document = enolib.parse(input)
document.all_elements_required()
document.list('list')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The list 'list' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
def test_querying_a_missing_section_on_the_document_when_all_elements_are_required_raises_the_expected_validationerror():
error = None
input = ("")
try:
document = enolib.parse(input)
document.all_elements_required()
document.section('section')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The section 'section' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
def test_querying_a_missing_field_on_the_document_when_requiring_all_elements_is_explicitly_disabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required(False)
document.field('field')
assert bool('it passes') is True
def test_querying_a_missing_field_on_the_document_when_requiring_all_elements_is_enabled_and_disabled_again_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required(True)
document.all_elements_required(False)
document.field('field')
assert bool('it passes') is True
def test_querying_a_missing_but_explicitly_optional_element_on_the_document_when_requiring_all_elements_is_enabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required()
document.optional_element('element')
assert bool('it passes') is True
def test_querying_a_missing_but_explicitly_optional_empty_on_the_document_when_requiring_all_elements_is_enabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required()
document.optional_empty('empty')
assert bool('it passes') is True
def test_querying_a_missing_but_explicitly_optional_field_on_the_document_when_requiring_all_elements_is_enabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required()
document.optional_field('field')
assert bool('it passes') is True
def test_querying_a_missing_but_explicitly_optional_fieldset_on_the_document_when_requiring_all_elements_is_enabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required()
document.optional_fieldset('fieldset')
assert bool('it passes') is True
def test_querying_a_missing_but_explicitly_optional_list_on_the_document_when_requiring_all_elements_is_enabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required()
document.optional_list('list')
assert bool('it passes') is True
def test_querying_a_missing_but_explicitly_optional_section_on_the_document_when_requiring_all_elements_is_enabled_produces_the_expected_result():
input = ("")
document = enolib.parse(input)
document.all_elements_required()
document.optional_section('section')
assert bool('it passes') is True
|
109958
|
import torch
import torch.nn as nn
import torchvision
import torch.backends.cudnn as cudnn
import torch.optim
import os
import sys
import argparse
import time
import DCE.dce_model
import numpy as np
from torchvision import transforms
from PIL import Image
import glob
import time
from tqdm import tqdm
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def lowlight(DCE_net, image_path):
scale_factor = 12
data_lowlight = Image.open(image_path)
data_lowlight = (np.asarray(data_lowlight)/255.0)
data_lowlight = torch.from_numpy(data_lowlight).float()
h=(data_lowlight.shape[0]//scale_factor)*scale_factor
w=(data_lowlight.shape[1]//scale_factor)*scale_factor
data_lowlight = data_lowlight[0:h,0:w,:]
data_lowlight = data_lowlight.permute(2,0,1)
data_lowlight = data_lowlight.cuda().unsqueeze(0)
enhanced_image,params_maps = DCE_net(data_lowlight)
image_path = image_path.replace('train_clip','train_clip_enhanced')
result_path = image_path
if not os.path.exists(image_path.replace('/'+image.split("/")[-1],'')):
os.makedirs(image_path.replace('/'+image_path.split("/")[-1],''))
# import pdb;pdb.set_trace()
torchvision.utils.save_image(enhanced_image, result_path)
if __name__ == '__main__':
with torch.no_grad():
filePath = '/Your/PATH/NAT2021-train/train_clip/' # the path of original imgs
file_list = os.listdir(filePath)
file_list.sort()
scale_factor = 12
DCE_net = DCE.dce_model.enhance_net_nopool(scale_factor).cuda()
DCE_net.eval()
DCE_net.load_state_dict(torch.load('DCE/Epoch99.pth'))
for file_name in tqdm(file_list):
test_list = glob.glob(filePath+file_name+"/*")
for image in test_list:
if not os.path.exists(image.replace('train_clip','train_clip_enhanced')):
lowlight(DCE_net, image)
|
109981
|
import argparse
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
import structuring
def plot_frame(data_path, frame_index):
"""Show landmarks of a frame into a graph.
Arguments:
csv {str} -- path to the .csv file
frame_index {int} -- index of the frame
"""
frame_content = structuring.get_row(data_path, frame_index)
x, y = frame_content.T
plt.scatter(x, y)
plt.show()
def plot_correlation_matrix(data_path):
"""Show correlation matrix between variables.
Arguments:
data {str} -- path to the .csv file
"""
rcParams['figure.figsize'] = 15, 20
fig = plt.figure()
data = pd.read_csv(data_path)
sns.heatmap(data.corr(), annot=True, fmt=".2f")
fig.savefig('correlation.png')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot frame into a graph.")
parser.add_argument("-i", "--input_dataset_path",
help="csv file path.",
required=True)
parser.add_argument("-f", "--frame",
help="Frame index (same as row index).",
type=int)
arguments = parser.parse_args()
input_data_path = arguments.input_dataset_path
frame = arguments.frame
if frame is not None:
plot_frame(input_data_path, frame)
else:
plot_correlation_matrix(input_data_path)
|
109989
|
import logging
from flask import Blueprint
import ckan.plugins.toolkit as tk
from ckanext.hdx_users.controller_logic.dashboard_dataset_logic import DashboardDatasetLogic
log = logging.getLogger(__name__)
render = tk.render
get_action = tk.get_action
request = tk.request
g = tk.g
h= tk.h
_ = tk._
hdx_user_dashboard = Blueprint(u'hdx_user_dashboard', __name__, url_prefix=u'/dashboard')
def datasets():
"""
Dashboard tab for datasets. Modified to add the ability to change
the order and ultimately filter datasets displayed
"""
if not g.user:
h.flash_error(_(u'Not authorized to see this page'))
return h.redirect_to(u'home.index')
dashboard_dataset_logic = DashboardDatasetLogic(g.userobj).read()
if dashboard_dataset_logic.redirect_result:
return dashboard_dataset_logic.redirect_result
else:
return render('user/dashboard_datasets.html', extra_vars={
'user_dict': dashboard_dataset_logic.user_dict,
'search_data': dashboard_dataset_logic.search_data
})
hdx_user_dashboard.add_url_rule(u'/datasets', view_func=datasets)
|
110000
|
import random
import urllib
from flaskwallet import app
from flaskwallet import session
from settingsapp.helpers import get_setting
def real_format(account):
if account == '__DEFAULT_ACCOUNT__':
account = ''
return account
def human_format(account):
if account == '':
account = '__DEFAULT_ACCOUNT__'
return account
def get_accounts(conn, getbalance=False, getchoice=False):
"""
Returns the list of accounts for a wallet
"""
if getbalance:
ret = conn.listaccounts(as_dict=True)
elif getchoice:
accounts = conn.listaccounts()
ret = []
for account in accounts:
account = human_format(account)
ret.append((urllib.quote_plus(str(account)), account))
return ret
def strtofloat(value):
"""
This is used to convert form inputs into floats.
Heh.. why exactly is this a function?
"""
return float(value)
def get_cachetime():
"""
TODO: configurable
"""
cachetime_min = int(get_setting('cachetime_min', 5))
cachetime_max = int(get_setting('cachetime_max', 15))
return random.randint(cachetime_min, cachetime_max)
def get_coin_choices():
ret = []
for key, data in app.config['COINS'].iteritems():
ret.append((key, '%s (%s)' % (key, data['name'])))
ret = sorted(ret, key=lambda tup: tup[0])
return ret
|
110052
|
from __future__ import print_function, unicode_literals
import sys
from workflow import Workflow, web, ICON_ERROR, ICON_SETTINGS
from utils import parse_args, is_match
from settings import UPDATE_SETTINGS, HELP_URL, LEETCODE_URL, LC_TOPICS
class SearchResult(object):
def __init__(self, title, subtitle, url):
self.title_ = title
self.subtitle_ = subtitle
self.url_ = url
def search_topic(args):
results = []
query = args["query"]
difficulty_api = "difficulty=" + args["difficulty"] if args["difficulty"] else ""
difficulty = "[%s]" % args["difficulty"] if args["difficulty"] else ""
for k, v in LC_TOPICS.items():
if is_match(query, k):
title = "%s %s" % (v[0], difficulty)
subtitle = "Search LeetCode Topic '%s' %s" % (v[0], difficulty)
url = "{0}?topicSlugs={1}&{2}".format(LEETCODE_URL, v[1], difficulty_api)
results.append( SearchResult(title, subtitle, url) )
if not results:
title = query
subtitle = "Search LeetCode for '%s'" % query
url = "{0}?search={1}".format(LEETCODE_URL, query)
results.append( SearchResult(title, subtitle, url) )
return results
def search_prob(args):
query = args["query"] if args["query"] else "..."
difficulty_api = "difficulty=" + args["difficulty"] if args["difficulty"] else ""
difficulty = "[%s]" % args["difficulty"] if args["difficulty"] else ""
title = title = "%s %s" % (query, difficulty)
subtitle = "Search LeetCode for '%s' %s" % (query, difficulty)
url = "{0}?search={1}&{2}".format(LEETCODE_URL, query, difficulty_api)
result = SearchResult(title, subtitle, url)
return [result]
def main(wf):
args = parse_args(wf.args)
if args["mode"] == "topic":
results = search_topic(args)
elif args["mode"] == "problem":
results = search_prob(args)
for res in results:
wf.add_item(
title=res.title_,
subtitle=res.subtitle_,
valid=True,
uid=res.url_,
arg=res.url_
)
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow(update_settings=UPDATE_SETTINGS, help_url=HELP_URL)
sys.exit( wf.run(main) )
|
110055
|
import math
import random
from typing import Tuple
from .. import base
class Friedman(base.SyntheticDataset):
"""Friedman synthetic dataset.
Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1].
The target is defined by the following function:
$$y = 10 sin(\\pi x_0 x_1) + 20 (x_2 - 0.5)^2 + 10 x_3 + 5 x_4 + \\epsilon$$
In the last expression, $\\epsilon \\sim \\mathcal{N}(0, 1)$, is the noise. Therefore,
only the first 5 features are relevant.
Parameters
----------
seed
Random seed number used for reproducibility.
Examples
--------
>>> from river import synth
>>> dataset = synth.Friedman(seed=42)
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90
References
----------
[^1]: [<NAME>., 1991. Multivariate adaptive regression splines. The annals of statistics, pp.1-67.](https://projecteuclid.org/euclid.aos/1176347963)
"""
def __init__(self, seed: int = None):
super().__init__(task=base.REG, n_features=10)
self.seed = seed
def __iter__(self):
rng = random.Random(self.seed)
while True:
x = {i: rng.uniform(a=0, b=1) for i in range(10)}
y = (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
+ rng.gauss(mu=0, sigma=1)
)
yield x, y
class FriedmanDrift(Friedman):
"""Friedman synthetic dataset with concept drifts.
Each observation is composed of 10 features. Each feature value is sampled uniformly in [0, 1].
Only the first 5 features are relevant. The target is defined by different functions depending
on the type of the drift.
The three available modes of operation of the data generator are described in [^1].
Parameters
----------
drift_type
The variant of concept drift.</br>
- `'lea'`: Local Expanding Abrupt drift. The concept drift appears in two distinct
regions of the instance space, while the remaining regions are left unaltered.
There are three points of abrupt change in the training dataset.
At every consecutive change the regions of drift are expanded.</br>
- `'gra'`: Global Recurring Abrupt drift. The concept drift appears over the whole
instance space. There are two points of concept drift. At the second point of drift
the old concept reoccurs.</br>
- `'gsg'`: Global and Slow Gradual drift. The concept drift affects all the instance
space. However, the change is gradual and not abrupt. After each one of the two change
points covered by this variant, and during a window of length `transition_window`,
examples from both old and the new concepts are generated with equal probability.
After the transition period, only the examples from the new concept are generated.
position
The amount of monitored instances after which each concept drift occurs. A tuple with
at least two element must be passed, where each number is greater than the preceding one.
If `drift_type='lea'`, then the tuple must have three elements.
transition_window
The length of the transition window between two concepts. Only applicable when
`drift_type='gsg'`. If set to zero, the drifts will be abrupt. Anytime
`transition_window > 0`, it defines a window in which instances of the new
concept are gradually introduced among the examples from the old concept.
During this transition phase, both old and new concepts appear with equal probability.
seed
Random seed number used for reproducibility.
Examples
--------
>>> from river import synth
>>> dataset = synth.FriedmanDrift(
... drift_type='lea',
... position=(1, 2, 3),
... seed=42
... )
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 7.04
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] -2.65
>>> dataset = synth.FriedmanDrift(
... drift_type='gra',
... position=(2, 3),
... seed=42
... )
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.96
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 18.16
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 8.90
>>> dataset = synth.FriedmanDrift(
... drift_type='gsg',
... position=(1, 4),
... transition_window=2,
... seed=42
... )
>>> for x, y in dataset.take(5):
... print(list(x.values()), y)
[0.63, 0.02, 0.27, 0.22, 0.73, 0.67, 0.89, 0.08, 0.42, 0.02] 7.66
[0.02, 0.19, 0.64, 0.54, 0.22, 0.58, 0.80, 0.00, 0.80, 0.69] 8.33
[0.34, 0.15, 0.95, 0.33, 0.09, 0.09, 0.84, 0.60, 0.80, 0.72] 8.92
[0.37, 0.55, 0.82, 0.61, 0.86, 0.57, 0.70, 0.04, 0.22, 0.28] 17.32
[0.07, 0.23, 0.10, 0.27, 0.63, 0.36, 0.37, 0.20, 0.26, 0.93] 6.05
References
----------
[^1]: <NAME>., <NAME>. and <NAME>., 2011. Learning model trees from evolving
data streams. Data mining and knowledge discovery, 23(1), pp.128-168.
"""
_LOCAL_EXPANDING_ABRUPT = "lea"
_GLOBAL_RECURRING_ABRUPT = "gra"
_GLOBAL_AND_SLOW_GRADUAL = "gsg"
_VALID_DRIFT_TYPES = [
_LOCAL_EXPANDING_ABRUPT,
_GLOBAL_RECURRING_ABRUPT,
_GLOBAL_AND_SLOW_GRADUAL,
]
def __init__(
self,
drift_type: str = "lea",
position: Tuple[int, ...] = (50_000, 100_000, 150_000),
transition_window: int = 10_000,
seed: int = None,
):
super().__init__(seed=seed)
if drift_type not in self._VALID_DRIFT_TYPES:
raise ValueError(
f'Invalid "drift_type: {drift_type}"\n'
f"Valid options are: {self._VALID_DRIFT_TYPES}"
)
self.drift_type = drift_type
if self.drift_type == self._LOCAL_EXPANDING_ABRUPT and len(position) < 3:
raise ValueError(
"Insufficient number of concept drift locations passed.\n"
'Three concept drift points should be passed when drift_type=="lea"'
)
elif self.drift_type != self._LOCAL_EXPANDING_ABRUPT and len(position) < 2:
raise ValueError(
"Insufficient number of concept drift locations passed.\n"
"Two locations must be defined."
)
elif len(position) > 3:
raise ValueError(
"Too many concept drift locations passed. Check the documentation"
"for details on the usage of this class."
)
self.position = position
if self.drift_type == self._LOCAL_EXPANDING_ABRUPT:
(
self._change_point1,
self._change_point2,
self._change_point3,
) = self.position
else:
self._change_point1, self._change_point2 = self.position
self._change_point3 = math.inf
if not self._change_point1 < self._change_point2 < self._change_point3:
raise ValueError(
"The concept drift locations must be defined in an increasing order."
)
if (
transition_window > self._change_point2 - self._change_point1
or transition_window > self._change_point3 - self._change_point2
) and self.drift_type == self._GLOBAL_AND_SLOW_GRADUAL:
raise ValueError(
f'The chosen "transition_window" value is too big: {transition_window}'
)
self.transition_window = transition_window
if self.drift_type == self._LOCAL_EXPANDING_ABRUPT:
self._y_maker = self._local_expanding_abrupt_gen
elif self.drift_type == self._GLOBAL_RECURRING_ABRUPT:
self._y_maker = self._global_recurring_abrupt_gen
else: # Global and slow gradual drifts
self._y_maker = self._global_and_slow_gradual_gen
def __lea_in_r1(self, x, index):
if index < self._change_point1:
return False
elif self._change_point1 <= index < self._change_point2:
return x[1] < 0.3 and x[2] < 0.3 and x[3] > 0.7 and x[4] < 0.3
elif self._change_point2 <= index < self._change_point3:
return x[1] < 0.3 and x[2] < 0.3 and x[3] > 0.7
else:
return x[1] < 0.3 and x[2] < 0.3
def __lea_in_r2(self, x, index):
if index < self._change_point1:
return False
elif self._change_point1 <= index < self._change_point2:
return x[1] > 0.7 and x[2] > 0.7 and x[3] < 0.3 and x[4] > 0.7
elif self._change_point2 <= index < self._change_point3:
return x[1] > 0.7 and x[2] > 0.7 and x[3] < 0.3
else:
return x[1] > 0.7 and x[2] > 0.7
def _local_expanding_abrupt_gen(
self, x, index: int, rc: random.Random = None
): # noqa
if self.__lea_in_r1(x, index):
return 10 * x[0] * x[1] + 20 * (x[2] - 0.5) + 10 * x[3] + 5 * x[4]
if self.__lea_in_r2(x, index):
return (
10 * math.cos(x[0] * x[1])
+ 20 * (x[2] - 0.5)
+ math.exp(x[3])
+ 5 * x[4] ** 2
)
# default case
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
def _global_recurring_abrupt_gen(
self, x, index: int, rc: random.Random = None
): # noqa
if index < self._change_point1 or index >= self._change_point2:
# The initial concept is recurring
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
else:
# Drift: the positions of the features are swapped
return (
10 * math.sin(math.pi * x[3] * x[5])
+ 20 * (x[1] - 0.5) ** 2
+ 10 * x[0]
+ 5 * x[2]
)
def _global_and_slow_gradual_gen(self, x, index: int, rc: random.Random):
if index < self._change_point1:
# default function
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
elif self._change_point1 <= index < self._change_point2:
if index < self._change_point1 + self.transition_window and bool(
rc.getrandbits(1)
):
# default function
return (
10 * math.sin(math.pi * x[0] * x[1])
+ 20 * (x[2] - 0.5) ** 2
+ 10 * x[3]
+ 5 * x[4]
)
else: # First new function
return (
10 * math.sin(math.pi * x[3] * x[4])
+ 20 * (x[1] - 0.5) ** 2
+ 10 * x[0]
+ 5 * x[2]
)
elif index >= self._change_point2:
if index < self._change_point2 + self.transition_window and bool(
rc.getrandbits(1)
):
# First new function
return (
10 * math.sin(math.pi * x[3] * x[4])
+ 20 * (x[1] - 0.5) ** 2
+ 10 * x[0]
+ 5 * x[2]
)
else: # Second new function
return (
10 * math.sin(math.pi * x[1] * x[4])
+ 20 * (x[3] - 0.5) ** 2
+ 10 * x[2]
+ 5 * x[0]
)
def __iter__(self):
rng = random.Random(self.seed)
# To produce True or False with equal probability. Only used in gradual drifts
if self.drift_type == self._GLOBAL_AND_SLOW_GRADUAL:
rc = random.Random(self.seed)
else:
rc = None
i = 0
while True:
x = {i: rng.uniform(a=0, b=1) for i in range(10)}
y = self._y_maker(x, i, rc) + rng.gauss(mu=0, sigma=1)
yield x, y
i += 1
|
110069
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, H, W):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
self.W = W
self.H = H
self.gridX = torch.nn.Parameter(torch.tensor(gridX), requires_grad=False)
self.gridY = torch.nn.Parameter(torch.tensor(gridY), requires_grad=False)
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).float() + u
y = self.gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid, padding_mode='border')
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
class Coeff(nn.Module):
def __init__(self):
super(Coeff, self).__init__()
self.t = torch.nn.Parameter(torch.FloatTensor(np.linspace(0.125, 0.875, 7)), requires_grad=False)
def getFlowCoeff (self, indices):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
# Convert indices tensor to numpy array
ind = indices.detach()
C11 = C00 = - (1 - (self.t[ind])) * (self.t[ind])
C01 = (self.t[ind]) * (self.t[ind])
C10 = (1 - (self.t[ind])) * (1 - (self.t[ind]))
return C00[None, None, None, :].permute(3, 0, 1, 2), C01[None, None, None, :].permute(3, 0, 1, 2), C10[None, None, None, :].permute(3, 0, 1, 2), C11[None, None, None, :].permute(3, 0, 1, 2)
def getWarpCoeff (self, indices):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
# Convert indices tensor to numpy array
ind = indices.detach()
C0 = 1 - self.t[ind]
C1 = self.t[ind]
return C0[None, None, None, :].permute(3, 0, 1, 2), C1[None, None, None, :].permute(3, 0, 1, 2)
def set_t(self, factor):
ti = 1 / factor
self.t = torch.nn.Parameter(torch.FloatTensor(np.linspace(ti, 1 - ti, factor - 1)), requires_grad=False)
|
110084
|
import numpy as np
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
model = Sequential()
model.add(Dense(units=50, input_dim=1, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.add(Dense(units=1, activation='linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
import csv
with open('data/france_history.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
fr_corn_y = []
for each_y in rows:
fr_corn_y.append(int(each_y[0]))
dates = len(fr_corn_y)
fr_corn_x = list(range(1, dates + 1))
fr_corn_x = np.array(fr_corn_x)
fr_corn_y = np.array(fr_corn_y)
fr_dates_length = len(fr_corn_x)
fr_absorb = fr_corn_y[fr_dates_length-1]
corn_y_norm = fr_corn_y / fr_absorb
model.fit(fr_corn_x, corn_y_norm, epochs=10000, shuffle=False)
corn_y_predict = model.predict(fr_corn_x)
corn_y_predict = corn_y_predict * fr_absorb
fig_italy = plt.figure(figsize=(7, 5))
plt.scatter(fr_corn_x, fr_corn_y, label='Real Confirmed')
plt.plot(fr_corn_x, corn_y_predict, label='Predict Result')
plt.title('France Confirmed VS Dates')
plt.xlabel('Dates')
plt.ylabel('Amount')
plt.legend()
plt.show()
|
110085
|
from typing import Dict, Tuple
import numpy as np
from . import functional as F
class Padding:
"""Applies padding to image and target boxes"""
def __init__(self, target_size: Tuple[int, int] = (640, 640), pad_value: int = 0):
super(Padding, self).__init__()
self.pad_value = pad_value
self.target_size = target_size # w,h
def __call__(self, img: np.ndarray, targets: Dict = None) -> Tuple[np.ndarray, Dict]:
# TODO check image shape
targets = dict() if targets is None else targets
return F.pad(
img, target_size=self.target_size, pad_value=self.pad_value, targets=targets
)
|
110121
|
from time import time
from app import db
from util import elapsed
from util import safe_commit
import argparse
from models.person import make_person
from models.orcid import clean_orcid
from models.orcid import NoOrcidException
# needs to be imported so the definitions get loaded into the registry
import jobs_defs
"""
Call from command line to add ORCID profiles based on IDs in a local CSV.
"""
def create_person(dirty_orcid, campaign=None, store_in_db=False):
try:
orcid_id = clean_orcid(dirty_orcid)
except NoOrcidException:
print u"\n\nWARNING: no valid orcid_id in {}; skipping\n\n".format(dirty_orcid)
raise
if store_in_db:
print u"storing in db"
my_person = make_person(orcid_id, store_in_db=True)
if campaign:
my_person.campaign = campaign
db.session.add(my_person)
success = safe_commit(db)
if not success:
print u"ERROR! committing {}".format(my_person.orcid_id)
else:
print u"NOT storing in db"
my_person = make_person(orcid_id, store_in_db=False)
print my_person
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# just for updating lots
parser.add_argument('orcid_id', type=str, help="ORCID ID to build")
parser.add_argument('--campaign', type=str, help="name of campaign")
parser.add_argument('--store', type=bool, help="store in the database?")
parsed = parser.parse_args()
start = time()
create_person(parsed.orcid_id, parsed.campaign, parsed.store)
db.session.remove()
print "finished update in {}sec".format(elapsed(start))
|
110142
|
import random
import math
import torch
from PIL import Image, ImageOps, ImageEnhance, ImageDraw
from torchvision.transforms import functional as F
import transforms
from transforms import check_prob, PIL_INTER_MAP, RandomTransform
def rescale_float(level, max_val, param_max=10):
return float(level) * max_val / param_max
def rescale_int(level, max_val, param_max=10):
return int(level * max_val / param_max)
def random_mirror(mirror, val):
if mirror and check_prob(0.5):
val *= -1
return val
def apply_affine(img, translate, shear, resample, fillcolor):
trans_x, trans_y = translate
shear_x, shear_y = shear
return img.transform(
img.size,
Image.AFFINE,
(1, shear_x, trans_x, shear_y, 1, trans_y),
resample,
fillcolor=fillcolor,
)
class AutoAugmentAffine(RandomTransform):
def __init__(self, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0):
super().__init__(p)
self.mirror = mirror
self.resample = resample
self.fillcolor = fillcolor
def _mirror(self, val):
if self.mirror and check_prob(0.5):
val *= -1
return val
def _repr_params(self):
params = dict(self.__dict__)
params["resample"] = PIL_INTER_MAP[self.resample]
return params
def _apply_img_fn(self, img, translate, shear):
trans_x, trans_y = translate
shear_x, shear_y = shear
return img.transform(
img.size,
Image.AFFINE,
(1, shear_x, trans_x, shear_y, 1, trans_y),
self.resample,
fillcolor=self.fillcolor,
)
def shear_x(img, shear_x, mirror=True, resample=Image.NEAREST, fillcolor=None):
shear_x = random_mirror(mirror, shear_x)
return apply_affine(img, (0, 0), (shear_x, 0), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(0, 0),
scale=1.0,
shear=(math.degrees(shear_x), 0.0),
resample=resample,
fillcolor=fillcolor,
)
def shear_y(img, shear_y, mirror=True, resample=Image.NEAREST, fillcolor=None):
shear_y = random_mirror(mirror, shear_y)
return apply_affine(img, (0, 0), (0, shear_y), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(0, 0),
scale=1.0,
shear=(0, math.degrees(shear_y)),
resample=resample,
fillcolor=fillcolor,
)
def translate_x(img, translate_x, mirror=True, resample=Image.NEAREST, fillcolor=None):
translate_x = random_mirror(mirror, translate_x)
return apply_affine(img, (translate_x, 0), (0, 0), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(translate_x, 0),
scale=1.0,
shear=(0, 0),
resample=resample,
fillcolor=fillcolor,
)
def translate_y(img, translate_y, mirror=True, resample=Image.NEAREST, fillcolor=None):
translate_y = random_mirror(mirror, translate_y)
return apply_affine(img, (0, translate_y), (0, 0), resample, fillcolor)
return F.affine(
img,
angle=0.0,
translate=(0, translate_y),
scale=1.0,
shear=(0, 0),
resample=resample,
fillcolor=fillcolor,
)
def rotate(img, rotate, mirror=True, resample=Image.NEAREST, fillcolor=None):
rotate = random_mirror(mirror, rotate)
return img.rotate(rotate, resample=resample, fillcolor=fillcolor)
return F.rotate(img, rotate, resample=resample, fillcolor=fillcolor)
def posterize(img, bits):
return ImageOps.posterize(img, bits)
return F.posterize(img, bits)
def cutout(img, size, fillcolor=None):
if isinstance(img, torch.Tensor):
pass
else:
x = random.random()
y = random.random()
w, h = img.size
c_x = int(x * w)
c_y = int(y * h)
x0 = max(0, c_x - size)
x1 = w - max(0, w - c_x - size) - 1
y0 = max(0, c_y - size)
y1 = h - max(0, h - c_y - size) - 1
xy = (x0, y0, x1, y1)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, fillcolor)
return img
def solarize(img, threshold):
return ImageOps.solarize(img, threshold)
return F.posterize(img, solarize)
def solarize_add(img, add, threshold=128):
if isinstance(img, torch.Tensor):
mask = img < threshold
solarized = img.clamp(max=255 - add) + add
result = mask * solarized + ~mask * img
return result
else:
lut = []
for i in range(256):
if i < threshold:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def saturation(img, saturate):
return ImageEnhance.Color(img).enhance(saturate)
return F.adjust_saturation(img, saturate_value)
def contrast(img, contrast):
return ImageEnhance.Contrast(img).enhance(contrast)
return F.adjust_contrast(img, contrast)
def brightness(img, brightness):
return ImageEnhance.Brightness(img).enhance(brightness)
return F.adjust_brightness(img, brightness)
def sharpness(img, sharpness):
return ImageEnhance.Sharpness(img).enhance(sharpness)
return F.adjust_sharpness(img, sharpness)
def invert(img):
return ImageOps.invert(img)
return F.invert(img)
def auto_contrast(img):
return ImageOps.autocontrast(img)
return F.autocontrast(img)
def equalize(img):
return ImageOps.equalize(img)
return F.equalize(img)
class ShearX(AutoAugmentAffine):
def __init__(
self, shear_x, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.shear_x = shear_x
def sample(self):
shear_x = self._mirror(self.shear_x)
return {"shear_x": shear_x}
def _apply_img(self, img, shear_x):
return self._apply_img_fn(img, (0, 0), (shear_x, 0))
class ShearY(AutoAugmentAffine):
def __init__(
self, shear_y, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.shear_y = shear_y
def sample(self):
shear_y = self._mirror(self.shear_y)
return {"shear_y": shear_y}
def _apply_img(self, img, shear_y):
return self._apply_img_fn(img, (0, 0), (0, shear_y))
class TranslateX(AutoAugmentAffine):
def __init__(
self, translate_x, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.translate_x = translate_x
def sample(self):
trans_x = self._mirror(self.translate_x)
return {"translate_x": trans_x}
def _apply_img(self, img, translate_x):
return self._apply_img_fn(img, (translate_x, 0), (0, 0))
class TranslateY(AutoAugmentAffine):
def __init__(
self, translate_y, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.translate_y = translate_y
def sample(self):
trans_y = self._mirror(self.translate_y)
return {"translate_y": trans_y}
def _apply_img(self, img, translate_y):
return self._apply_img_fn(img, (0, translate_y), (0, 0))
class Rotate(AutoAugmentAffine):
def __init__(
self, rotate, mirror=True, resample=Image.NEAREST, fillcolor=None, p=1.0
):
super().__init__(mirror=mirror, resample=resample, fillcolor=fillcolor, p=p)
self.rotate = rotate
def sample(self):
rotate = self._mirror(self.rotate)
return {"rotate": rotate}
def _apply_img(self, img, rotate):
return img.rotate(rotate, resample=self.resample, fillcolor=self.fillcolor)
class Posterize(RandomTransform):
def __init__(self, bits, p=1.0):
super().__init__(p)
self.bits = bits
def sample(self):
return {"bits": self.bits}
def _apply_img(self, img, bits):
return ImageOps.posterize(img, bits)
class Cutout(RandomTransform):
def __init__(self, size, fillcolor=(0, 0, 0), p=1.0):
super().__init__(p)
self.size = size
self.fillcolor = fillcolor
def sample(self):
x = random.random()
y = random.random()
return {"center": (x, y)}
def _apply_img(self, img, center):
w, h = img.size
c_x = int(center[0] * w)
c_y = int(center[1] * h)
x0 = max(0, c_x - self.size)
x1 = w - max(0, w - c_x - self.size) - 1
y0 = max(0, c_y - self.size)
y1 = h - max(0, h - c_y - self.size) - 1
xy = (x0, y0, x1, y1)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, self.fillcolor)
return img
class Solarize(RandomTransform):
def __init__(self, threshold, p=1.0):
super().__init__(p)
self.threshold = threshold
def sample(self):
return {"threshold": self.threshold}
def _apply_img(self, img, threshold):
return ImageOps.solarize(img, threshold)
class SolarizeAdd(RandomTransform):
def __init__(self, add, threshold=128, p=1.0):
super().__init__(p)
self.add = add
self.threshold = threshold
def sample(self):
return {"add": self.add, "threshold": self.threshold}
def _apply_img(self, img, add, threshold):
return solarize_add(img, add, threshold)
class Saturation(RandomTransform):
def __init__(self, saturation, p=1.0):
super().__init__(p)
self.saturation = saturation
def sample(self):
return {"saturation": self.saturation}
def _apply_img(self, img, saturation):
return ImageEnhance.Color(img).enhance(saturation)
class Contrast(RandomTransform):
def __init__(self, contrast, p=1.0):
super().__init__(p)
self.contrast = contrast
def sample(self):
return {"contrast": self.contrast}
def _apply_img(self, img, contrast):
return ImageEnhance.Contrast(img).enhance(contrast)
class Brightness(RandomTransform):
def __init__(self, brightness, p=1.0):
super().__init__(p)
self.brightness = brightness
def sample(self):
return {"brightness": self.brightness}
def _apply_img(self, img, brightness):
return ImageEnhance.Brightness(img).enhance(brightness)
class Sharpness(RandomTransform):
def __init__(self, sharpness, p=1.0):
super().__init__(p)
self.sharpness = sharpness
def sample(self):
return {"sharpness": self.sharpness}
def _apply_img(self, img, sharpness):
return ImageEnhance.Sharpness(img).enhance(sharpness)
def reparam_shear(level):
return rescale_float(level, 0.3)
def reparam_translate(level, max_translate):
return rescale_int(level, max_translate)
def reparam_rotate(level):
return rescale_int(level, 30)
def reparam_solarize(level):
return rescale_int(level, 256)
def reparam_solarize_increasing(level):
return 256 - rescale_int(level, 256)
def reparam_posterize(level):
return rescale_int(level, 4)
def reparam_posterize_increasing(level):
return 4 - rescale_int(level, 4)
def reparam_color(level):
return rescale_float(level, 1.8) + 0.1
def reparam_cutout(level, cutout):
return rescale_int(level, cutout)
def reparam_solarize_add(level):
return rescale_int(level, 110)
AUTOAUGMENT_MAP = {
"ShearX": (ShearX, shear_x, reparam_shear),
"ShearY": (ShearY, shear_y, reparam_shear),
"TranslateX": (TranslateX, translate_x, reparam_translate),
"TranslateY": (TranslateY, translate_y, reparam_translate),
"Rotate": (Rotate, rotate, reparam_rotate),
"Solarize": (Solarize, solarize, reparam_solarize),
"SolarizeIncreasing": (Solarize, solarize, reparam_solarize_increasing),
"Posterize": (Posterize, posterize, reparam_posterize),
"PosterizeIncreasing": (Posterize, posterize, reparam_posterize_increasing),
"Contrast": (Contrast, contrast, reparam_color),
"Color": (Saturation, saturation, reparam_color),
"Brightness": (Brightness, brightness, reparam_color),
"Sharpness": (Sharpness, sharpness, reparam_color),
"Invert": (transforms.Invert, invert, None),
"AutoContrast": (transforms.AutoContrast, auto_contrast, None),
"Equalize": (transforms.Equalize, equalize, None),
"Cutout": (Cutout, cutout, reparam_cutout),
"SolarizeAdd": (SolarizeAdd, solarize_add, reparam_solarize_add),
}
def autoaugment_policy():
policy_list = [
[("PosterizeIncreasing", 0.4, 8), ("Rotate", 0.6, 9)],
[("SolarizeIncreasing", 0.6, 5), ("AutoContrast", 0.6, 5)],
[("Equalize", 0.8, 8), ("Equalize", 0.6, 3)],
[("PosterizeIncreasing", 0.6, 7), ("PosterizeIncreasing", 0.6, 6)],
[("Equalize", 0.4, 7), ("SolarizeIncreasing", 0.2, 4)],
[("Equalize", 0.4, 4), ("Rotate", 0.8, 8)],
[("SolarizeIncreasing", 0.6, 3), ("Equalize", 0.6, 7)],
[("PosterizeIncreasing", 0.8, 5), ("Equalize", 1.0, 2)],
[("Rotate", 0.2, 3), ("SolarizeIncreasing", 0.6, 8)],
[("Equalize", 0.6, 8), ("PosterizeIncreasing", 0.4, 6)],
[("Rotate", 0.8, 8), ("Color", 0.4, 0)],
[("Rotate", 0.4, 9), ("Equalize", 0.6, 2)],
[("Equalize", 0.0, 7), ("Equalize", 0.8, 8)],
[("Invert", 0.6, 4), ("Equalize", 1.0, 8)],
[("Color", 0.6, 4), ("Contrast", 1.0, 8)],
[("Rotate", 0.8, 8), ("Color", 1.0, 0)],
[("Color", 0.8, 8), ("SolarizeIncreasing", 0.8, 7)],
[("Sharpness", 0.4, 7), ("Invert", 0.6, 8)],
[("ShearX", 0.6, 5), ("Equalize", 1.0, 9)],
[("Color", 0.4, 0), ("Equalize", 0.6, 3)],
[("Equalize", 0.4, 7), ("SolarizeIncreasing", 0.2, 4)],
[("SolarizeIncreasing", 0.6, 5), ("AutoContrast", 0.6, 5)],
[("Invert", 0.6, 4), ("Equalize", 1.0, 8)],
[("Color", 0.6, 4), ("Contrast", 1.0, 8)],
[("Equalize", 0.8, 8), ("Equalize", 0.6, 3)],
]
reparam_policy = []
for policy in policy_list:
sub_pol = []
for pol in policy:
augment, prob, magnitude = pol
augment_fn, _, reparam_fn = AUTOAUGMENT_MAP[augment]
if reparam_fn is not None:
magnitude = reparam_fn(magnitude)
sub_pol.append(augment_fn(magnitude, p=prob))
else:
sub_pol.append(augment_fn(p=prob))
reparam_policy.append(sub_pol)
return reparam_policy
class AutoAugment:
def __init__(self, policy):
self.policy = policy
def __call__(self, img):
selected_policy = random.choice(self.policy)
for pol in selected_policy:
sample = pol.sample()
img = pol.apply_img(img, **sample)
return img
def __repr__(self):
return f"{self.__class__.__name__}(\n{self.policy}\n)"
def check(self, img):
log = []
selected_policy = random.choice(self.policy)
for pol in selected_policy:
sample = pol.sample()
img, check = pol.apply_img_check(img, **sample)
log.append((pol, sample, check))
return img, log
class RandAugment:
def __init__(
self,
n_augment,
magnitude,
translate=100,
cutout=40,
fillcolor=(128, 128, 128),
increasing=False,
magnitude_std=0,
):
self.n_augment = n_augment
self.magnitude = magnitude
self.translate = translate
self.fillcolor = fillcolor
self.magnitude_std = magnitude_std
# fmt: off
if increasing:
augment_list = [
"AutoContrast", "Equalize", "Invert", "Rotate",
"PosterizeIncreasing", "SolarizeIncreasing",
"Color", "Contrast", "Brightness", "Sharpness", "ShearX",
"ShearY", "TranslateX", "TranslateY", "Cutout", "SolarizeAdd",
]
else:
augment_list = [
"AutoContrast", "Equalize", "Invert", "Rotate", "Posterize", "Solarize",
"Color", "Contrast", "Brightness", "Sharpness", "ShearX",
"ShearY", "TranslateX", "TranslateY", "Cutout", "SolarizeAdd",
]
# fmt: on
if cutout == 0:
augment_list.remove("Cutout")
self.cutout = cutout
self.translate = translate
self.fillcolor = fillcolor
self.augment = []
for augment in augment_list:
_, augment_fn, reparam_fn = AUTOAUGMENT_MAP[augment]
reparam_fn_param = {}
augment_fn_param = {}
if reparam_fn is not None:
if augment in ("TranslateX", "TranslateY"):
reparam_fn_param = {"max_translate": translate}
elif augment == "Cutout":
reparam_fn_param = {"cutout": cutout}
if augment in (
"TranslateX",
"TranslateY",
"ShearX",
"ShearY",
"Rotate",
"Cutout",
):
augment_fn_param = {"fillcolor": fillcolor}
self.augment.append(
(augment_fn, reparam_fn, augment_fn_param, reparam_fn_param)
)
def __repr__(self):
return (
f"{self.__class__.__name__}(n_augment={self.n_augment}, magnitude={self.magnitude}, cutout={self.cutout},"
f" translate={self.translate}, fillcolor={self.fillcolor})"
)
def __call__(self, img):
augments = random.choices(self.augment, k=self.n_augment)
for augment, mag_fn, aug_param, reparam_param in augments:
if mag_fn is not None:
if self.magnitude_std > 0:
mag = random.normalvariate(self.magnitude, self.magnitude_std)
else:
mag = self.magnitude
mag = mag_fn(mag, **reparam_param)
img = augment(img, mag, **aug_param)
else:
img = augment(img, **aug_param)
return img
|
110172
|
from builtins import object
from rest_framework import serializers
from bluebottle.funding.base_serializers import PaymentSerializer, BaseBankAccountSerializer
from bluebottle.funding_vitepay.models import VitepayPayment, VitepayBankAccount
from bluebottle.funding_vitepay.utils import get_payment_url
class VitepayPaymentSerializer(PaymentSerializer):
payment_url = serializers.CharField(read_only=True)
class Meta(PaymentSerializer.Meta):
model = VitepayPayment
fields = PaymentSerializer.Meta.fields + ('payment_url', 'mobile_number')
class JSONAPIMeta(PaymentSerializer.JSONAPIMeta):
resource_name = 'payments/vitepay-payments'
def create(self, validated_data):
payment = super(VitepayPaymentSerializer, self).create(validated_data)
payment.payment_url = get_payment_url(payment)
return payment
class VitepayBankAccountSerializer(BaseBankAccountSerializer):
class Meta(BaseBankAccountSerializer.Meta):
model = VitepayBankAccount
fields = BaseBankAccountSerializer.Meta.fields + (
'account_name',
'mobile_number',
)
included_serializers = {
'connect_account': 'bluebottle.funding.serializers.PlainPayoutAccountSerializer',
}
class JSONAPIMeta(BaseBankAccountSerializer.JSONAPIMeta):
resource_name = 'payout-accounts/vitepay-external-accounts'
class PayoutVitepayBankAccountSerializer(serializers.ModelSerializer):
class Meta(object):
fields = (
'id',
'account_name',
'mobile_number',
)
model = VitepayBankAccount
|
110213
|
import json
import logging
import boto3
import cfnresponse
import time
ec2_client = boto3.client('ec2')
logs_client = boto3.client('logs')
def boto_throttle_backoff(boto_method, max_retries=10, backoff_multiplier=2, **kwargs):
retry = 0
results = None
while not results:
try:
results = boto_method(**kwargs)
except Exception as e:
if 'ThrottlingException' in str(e) or 'VolumeInUse' in str(e):
retry += 1
if retry > max_retries:
print("Maximum retries of %s reached" % str(max_retries))
raise
print("hit an api throttle, or eventual consistency error, waiting for %s seconds before retrying" % str(retry * backoff_multiplier))
time.sleep(retry * backoff_multiplier)
else:
raise
return results
def handler(event, context):
print('Received event: %s' % json.dumps(event))
status = cfnresponse.SUCCESS
physical_resource_id = 'PVCleanup'
data = {}
reason = None
try:
if event['RequestType'] == 'Delete':
print('Removing any orphaned EBS volumes...')
tag_name = 'tag:kubernetes.io/cluster/%s' % event['ResourceProperties']['ClusterId']
response = boto_throttle_backoff(
ec2_client.describe_volumes,
Filters=[{'Name': tag_name, 'Values': ['owned']}]
)['Volumes']
for volume in response:
print('deleting volume %s' % volume['VolumeId'])
boto_throttle_backoff(ec2_client.delete_volume, VolumeId=volume['VolumeId'])
except Exception as e:
logging.error('Exception: %s' % e, exc_info=True)
reason = str(e)
status = cfnresponse.FAILED
finally:
if event['RequestType'] == 'Delete':
try:
wait_message = 'waiting for events for request_id %s to propagate to cloudwatch...' % context.aws_request_id
while not logs_client.filter_log_events(
logGroupName=context.log_group_name,
logStreamNames=[context.log_stream_name],
filterPattern='"%s"' % wait_message
)['events']:
print(wait_message)
time.sleep(5)
except Exception as e:
logging.error('Exception: %s' % e, exc_info=True)
time.sleep(120)
cfnresponse.send(event, context, status, data, physical_resource_id, reason)
|
110242
|
import requests
def whois_more(IP):
result = requests.get('http://api.hackertarget.com/whois/?q=' + IP).text
print('\n'+ result + '\n')
|
110272
|
from django.contrib.auth.models import User
import factory
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Faker('name')
email = factory.Sequence(lambda n: '<EMAIL>' % n)
class Meta:
model = User
|
110293
|
import numpy as np
#import simpleaudio as sa
import scipy.io.wavfile as sw
'''
def audioplay(fs, y):
yout = np.iinfo(np.int16).max / np.max(np.abs(y)) * y
yout = yout.astype(np.int16)
play_obj = sa.play_buffer(yout, y.ndim, 2, fs)
'''
def wavread(wavefile):
fs, y = sw.read(wavefile)
if y.dtype == 'float32' or y.dtype == 'float64':
max_y = 1
elif y.dtype == 'uint8':
y = y - 128
max_y = 128
elif y.dtype == 'int16':
max_y = np.abs(np.iinfo(np.int16).min)
else:
max_y = np.abs(np.iinfo(np.int16).min)
y = y / max_y
y = y.astype(np.float32)
return fs, y
def wavwrite(wavefile, fs, data):
if data.dtype == 'float32' or data.dtype == 'float64':
max_y = np.max(np.abs(data))
elif data.dtype == 'uint8':
data = data - 128
max_y = 128
elif data.dtype == 'int16':
max_y = np.abs(np.iinfo(np.int16).min)
else:
max_y = np.abs(np.iinfo(np.int16).min)
data = np.int16(data / max_y * np.abs(np.iinfo(np.int16).min))
sw.write(wavefile, fs, data)
|
110299
|
import pytest
pytest.importorskip("speedtest")
def test_load_module():
__import__("modules.core.speedtest")
|
110309
|
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from osmaxx.excerptexport.models import Export
def tracker(request, export_id):
export = get_object_or_404(Export, pk=export_id)
export.set_and_handle_new_status(request.GET['status'], incoming_request=request)
response = HttpResponse('')
response.status_code = 200
return response
|
110351
|
from nighres.registration.apply_coordinate_mappings import apply_coordinate_mappings
from nighres.registration.apply_coordinate_mappings import apply_coordinate_mappings_2d
from nighres.registration.embedded_antsreg import embedded_antsreg
from nighres.registration.embedded_antsreg import embedded_antsreg_2d
from nighres.registration.embedded_antsreg import embedded_antsreg_multi
from nighres.registration.embedded_antsreg import embedded_antsreg_2d_multi
from nighres.registration.generate_coordinate_mapping import generate_coordinate_mapping
from nighres.registration.simple_align import simple_align
|
110364
|
import logging
import os
import tempfile
from pathlib import Path
from cached_path.common import PathOrStr
logger = logging.getLogger(__name__)
class CacheFile:
"""
This is a context manager that makes robust caching easier.
On `__enter__`, an IO handle to a temporarily file is returned, which can
be treated as if it's the actual cache file.
On `__exit__`, the temporarily file is renamed to the cache file. If anything
goes wrong while writing to the temporary file, it will be removed.
"""
def __init__(self, cache_filename: PathOrStr, mode: str = "w+b", suffix: str = ".tmp") -> None:
self.cache_filename = Path(cache_filename)
self.cache_directory = os.path.dirname(self.cache_filename)
self.mode = mode
self.temp_file = tempfile.NamedTemporaryFile(
self.mode, dir=self.cache_directory, delete=False, suffix=suffix
)
def __enter__(self):
return self.temp_file
def __exit__(self, exc_type, exc_value, traceback):
self.temp_file.close()
if exc_value is None:
# Success.
logger.debug(
"Renaming temp file %s to cache at %s", self.temp_file.name, self.cache_filename
)
# Rename the temp file to the actual cache filename.
os.replace(self.temp_file.name, self.cache_filename)
return True
# Something went wrong, remove the temp file.
logger.debug("removing temp file %s", self.temp_file.name)
os.remove(self.temp_file.name)
return False
|
110374
|
import hashlib
from django.shortcuts import render, reverse, redirect
from .models import UseInfo, HostInfo
from remoteCMD.remote import Remote
# Create your views here.
def hash_password(password):
"""
md5加密
:param password:
:return:
"""
md5 = hashlib.md5()
md5.update(password.encode('utf-8'))
return md5.hexdigest()
def is_user_exist(username):
"""
检查用户名是否存在
:param username:
:return:
"""
if UseInfo.objects.filter(name=username):
return True
return False
def login_check(func):
"""
用户登录验证
:param func:
:return:
"""
def wrapper(request, *args, **kwargs):
name = request.COOKIES.get('name')
if not name:
return redirect(reverse('login'))
return func(request, *args, **kwargs)
return wrapper
def login(request):
if request.method == 'POST' and request.POST:
username = request.POST['name']
password = request.POST['password']
password = <PASSWORD>(password)
print(password)
# email = request.POST['email']
if is_user_exist(username):
db_passwd = UseInfo.objects.get(name=username).password
print(db_passwd)
if password == db_passwd:
response = redirect(reverse('index'))
response.set_cookie('name', username, max_age=3600)
request.session['name'] = username
return response
else:
error = '用户名或者密码错误, 请重新输入'
return render(request, 'login.html', locals())
else:
error = '用户名或者密码错误, 请重新输入'
return render(request, 'login.html', locals())
else:
return render(request, 'login.html', locals())
def logout(request):
"""
退出
:param request:
:return:
"""
request.session.clear()
response = redirect(reverse('login'))
response.delete_cookie('name')
return response
def register(request):
if request.method == 'POST' and request.POST:
username = request.POST['name']
if is_user_exist(username):
warn = '用户名已经存在, 请重新输入'
return render(request, 'register.html', locals())
else:
user = UseInfo()
user.name = username
password = request.POST['password']
email = request.POST['email']
user.password = <PASSWORD>_password(password)
user.email = email
user.save()
return redirect(reverse('login'))
else:
return render(request, 'register.html')
@login_check
def index(request):
"""首页"""
username = request.COOKIES.get('name', '')
return render(request, 'index.html', locals())
@login_check
def hostlist(request):
username = request.COOKIES.get('name', '')
host_list = HostInfo.objects.filter(is_delete=False)
return render(request, 'hostlist.html', locals())
def get_host_info(ip, admin, password, nickname):
"""获取主机信息"""
try:
r = Remote(host=ip, username=admin, password=password)
db_host = HostInfo()
db_host.ip = ip
db_host.host_name = nickname
db_host.cpu = str(r.ssh('cat /proc/cpuinfo | grep name |cut -f2 -d:')[0].replace('\n', '')) # cpu信息
db_host.os = str(r.ssh('cat /etc/issue')[0].replace('\\n', '').replace('\\l\n', '')) # 系统版本
db_host.last_login_time = str(r.ssh("who -b | cut -d ' ' -f 13,14")[0].replace('\n', '')) # 上次登录时间
db_host.is_delete = False
db_host.save()
return True
except Exception as e:
print(e)
return False
@login_check
def add_host(request):
"""添加主机"""
username = request.COOKIES.get('name', '')
if request.method == 'POST' and request.POST:
host_ip = request.POST['ip']
nickname = request.POST['nickname']
password = request.POST['password']
admin = request.POST['admin']
if get_host_info(host_ip, admin, password, nickname):
is_add = 0
return render(request, 'add_host.html', locals())
else:
is_add = 1
return render(request, 'add_host.html', locals())
return render(request, 'add_host.html', locals())
@login_check
def del_host(request):
"""删除主机"""
del_id = request.GET.get('id', '')
host = HostInfo.objects.get(id=del_id)
host.is_delete = True
host.save()
return redirect(reverse('hostList'))
|
110376
|
class AcousticParam(object):
def __init__(
self,
sampling_rate: int = 24000,
pad_second: float = 0,
threshold_db: float = None,
frame_period: int = 5,
order: int = 8,
alpha: float = 0.466,
f0_floor: float = 71,
f0_ceil: float = 800,
fft_length: int = 1024,
dtype: str = 'float32',
) -> None:
self.sampling_rate = sampling_rate
self.pad_second = pad_second
self.threshold_db = threshold_db
self.frame_period = frame_period
self.order = order
self.alpha = alpha
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.fft_length = fft_length
self.dtype = dtype
def _asdict(self):
return self.__dict__
|
110445
|
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torch.distributions import Categorical, Independent, MixtureSameFamily, Normal
from torch.nn.modules.conv import Conv2d
import einops
from .helpers import Delta, batch_flatten, batch_unflatten, prod, weights_init
__all__ = ["CondDist", "get_marginalDist"]
### CONDITIONAL DISTRIBUTIONS ###
class CondDist(nn.Module):
"""Return the (uninstantiated) correct CondDist.
Parameters
----------
in_shape : tuple of int
out_dim : int
Architecture : nn.Module
Module to be instantiated by `Architecture(in_shape, out_dim)`.
family : {"gaussian","uniform"}
Family of the distribution (after conditioning), this can be easily extandable to any
distribution in `torch.distribution`.
kwargs :
Additional arguments to the `Family`.
"""
def __init__(self, in_shape, out_dim, Architecture, family, **kwargs):
super().__init__()
if family == "diaggaussian":
self.Family = DiagGaussian
elif family == "deterministic":
self.Family = Deterministic
else:
raise ValueError(f"Unkown family={family}.")
self.in_shape = in_shape
self.out_dim = out_dim
self.kwargs = kwargs
self.mapper = Architecture(in_shape, out_dim * self.Family.n_param)
self.reset_parameters()
def forward(self, x):
"""Compute the distribution conditioned on `X`.
Parameters
----------
Xx: torch.Tensor, shape: [batch_size, *in_shape]
Input on which to condition the output distribution.
Return
------
p(.|x) : torch.Distribution, batch shape: [batch_size] event shape: [out_dim]
"""
# shape: [batch_size, out_dim * n_param]
suff_param = self.mapper(x)
# batch shape: [batch_size] ; event shape: [out_dim]
p__lx = self.Family.from_suff_param(suff_param, **self.kwargs)
return p__lx
def reset_parameters(self):
weights_init(self)
class Distributions:
"""Base class for distributions that can be instantiated with joint suff stat."""
n_param = None # needs to be defined in each class
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_suff_param(cls, concat_suff_params, **kwargs):
"""Initialize the distribution using the concatenation of sufficient parameters (output of NN)."""
# shape: [batch_size, -1] * n_param
suff_params = einops.rearrange(
concat_suff_params, "b (z p) -> b z p", p=cls.n_param
).unbind(-1)
suff_params = cls.preprocess_suff_params(*suff_params)
return cls(*suff_params, **kwargs)
@classmethod
def preprocess_suff_params(cls, *suff_params):
"""Preprocesses parameters outputed from network (usually to satisfy some constraints)."""
return suff_params
def detach(self, is_grad_flow=False):
"""
Detaches all the parameters. With optional `is_grad_flow` that would ensure pytorch does
not complain about no grad (by setting grad to 0.
"""
raise NotImplementedError()
class DiagGaussian(Distributions, Independent):
"""Gaussian with diagonal covariance."""
n_param = 2
min_std = 1e-5
def __init__(self, diag_loc, diag_scale):
super().__init__(Normal(diag_loc, diag_scale), 1)
self.min_std
@classmethod
def preprocess_suff_params(cls, diag_loc, diag_log_var):
# usually exp()**0.5, but you don't want to explose
diag_scale = F.softplus(diag_log_var) + cls.min_std
return diag_loc, diag_scale
def detach(self, is_grad_flow=False):
loc = self.base_dist.loc.detach()
scale = self.base_dist.scale.detach()
if is_grad_flow:
loc = loc + 0 * self.base_dist.loc
scale = scale + 0 * self.base_dist.scale
return DiagGaussian(loc, scale)
class Deterministic(Distributions, Independent):
"""Delta function distribution (i.e. no stochasticity)."""
n_param = 1
def __init__(self, param):
super().__init__(Delta(param), 1)
def detach(self, is_grad_flow=False):
loc = self.base_dist.loc.detach()
if is_grad_flow:
loc = loc + 0 * self.base_dist.loc
return Deterministic(loc)
### MARGINAL DISTRIBUTIONS ###
def get_marginalDist(family, cond_dist, **kwargs):
"""Return an approximate marginal distribution.
Notes
-----
- Marginal ditsributions are Modules that TAKE NO ARGUMENTS and return the correct distribution
as they are modules, they ensure that parameters are on the correct device.
"""
if family == "unitgaussian":
marginal = MarginalUnitGaussian(cond_dist.out_dim, **kwargs)
else:
raise ValueError(f"Unkown family={family}.")
return marginal
class MarginalUnitGaussian(nn.Module):
"""Mean 0 covariance 1 Gaussian."""
def __init__(self, out_dim):
super().__init__()
self.out_dim = out_dim
self.register_buffer("loc", torch.as_tensor([0.0] * self.out_dim))
self.register_buffer("scale", torch.as_tensor([1.0] * self.out_dim))
def forward(self):
return Independent(Normal(self.loc, self.scale), 1)
|
110446
|
import grpc
import pi_pb2
import pi_pb2_grpc
from concurrent import futures
def pi(client, k):
return client.Calc(pi_pb2.PiRequest(n=k)).value
def main():
channel = grpc.insecure_channel('localhost:8080')
client = pi_pb2_grpc.PiCalculatorStub(channel)
pool = futures.ThreadPoolExecutor(max_workers=10)
results = []
for i in range(1, 100):
results.append((i, pool.submit(pi, client, i)))
pool.shutdown()
if __name__ == '__main__':
main()
|
110463
|
import json
import logging
from channels.generic.websocket import AsyncWebsocketConsumer
from django_redis import get_redis_connection
from competitions.models import Submission
from utils.data import make_url_sassy
logger = logging.getLogger(__name__)
class SubmissionIOConsumer(AsyncWebsocketConsumer):
#
async def connect(self):
submission_id = self.scope['url_route']['kwargs']['submission_id']
secret = self.scope['url_route']['kwargs']['secret']
try:
Submission.objects.get(pk=submission_id, secret=secret)
except Submission.DoesNotExist:
return await self.close()
await self.accept()
async def receive(self, text_data=None, bytes_data=None):
user_pk = self.scope['url_route']['kwargs']['user_pk']
submission_id = self.scope['url_route']['kwargs']['submission_id']
logger.debug(f"Received websocket input for user = {user_pk}, submission = {submission_id}, text_data = {text_data}")
try:
sub = Submission.objects.get(pk=submission_id)
except Submission.DoesNotExist:
return await self.close()
if sub.phase.hide_output and not sub.phase.competition.user_has_admin_permission(user_pk):
return
data = json.loads(text_data)
if data['kind'] == 'detailed_result_update':
data['result_url'] = make_url_sassy(Submission.objects.get(id=submission_id).detailed_result.name)
# update text data to include the newly added sas url for retrieval on page refresh
text_data = json.dumps(data)
con = get_redis_connection("default")
con.append(f':1:submission-{submission_id}-log', f'{text_data}\n')
await self.channel_layer.group_send(f"submission_listening_{user_pk}", {
'type': 'submission.message',
'text': data,
'submission_id': submission_id,
})
class SubmissionOutputConsumer(AsyncWebsocketConsumer):
async def connect(self):
if not self.scope["user"].is_authenticated:
return await self.close()
await self.accept()
await self.channel_layer.group_add(f"submission_listening_{self.scope['user'].pk}", self.channel_name)
async def disconnect(self, close_code):
await self.channel_layer.group_discard(f"submission_listening_{self.scope['user'].pk}", self.channel_name)
await self.close()
def group_send(self, text, submission_id, full_text=False):
return self.channel_layer.group_send(f"submission_listening_{self.scope['user'].pk}", {
'type': 'submission.message',
'text': text,
'submission_id': submission_id,
'full_text': full_text,
})
async def receive(self, text_data=None, bytes_data=None):
"""We expect to receive a message at this endpoint containing the ID(s) of submissions to get
details about; typically on page load, looking up the previous submission details"""
data = json.loads(text_data)
submission_ids = data.get("submission_ids", [])
if submission_ids:
# Filter out submissions not by this user
submissions = Submission.objects.filter(id__in=submission_ids, owner=self.scope["user"])
con = get_redis_connection("default")
for sub in submissions:
text = (con.get(f':1:submission-{sub.id}-log'))
if text:
await self.group_send(text.decode('utf-8'), sub.id, full_text=True)
async def submission_message(self, event):
data = {
"type": "catchup" if event.get('full_text') else "message",
"submission_id": event['submission_id'],
"data": event['text']
}
await self.send(json.dumps(data))
|
110472
|
from selenium.webdriver import Firefox
url = 'http://selenium.dunossauro.live/aula_05_a.html'
firefox = Firefox()
firefox.get(url)
div_py = firefox.find_element_by_id('python')
div_hk = firefox.find_element_by_id('haskell')
print(div_hk.text)
firefox.quit()
|
110527
|
from unittest import TestCase
from feito import Messages
class MessagesTestCase(TestCase):
def test_format(self):
stub_messages = {
'messages': [{
'source': 'pylint',
'code': 'syntax-error',
'location': {
'path': 'tests/feito/fixtures/analyze_file.py',
'module': 'tests.feito.fixtures.analyze_file',
'function': None,
'line': 1,
'character': 0
},
'message': 'unexpected indent (<string>, line 1)',
},{
'source': 'pylint',
'code': 'too-many-arguments',
'location': {
'path': 'feito/github/api.py',
'module': 'feito.github.api',
'function': 'API.create_comment_commit',
'line': 25,
'character': 4
},
'message': 'Too many arguments (6/5)'
}]
}
formatted_messages = Messages(stub_messages).commit_format()
assert formatted_messages == [{
'message': 'pylint: unexpected indent (<string>, line 1). Code: syntax-error',
'file': 'tests/feito/fixtures/analyze_file.py',
'line': 1
},{
'message': 'pylint: Too many arguments (6/5). Code: too-many-arguments',
'file': 'feito/github/api.py',
'line': 25
}]
|
110536
|
from unittest.mock import Mock, ANY
from click.testing import CliRunner
from taskit.infrastructure.cli.taskit import cli, State
def test_cli_state(state):
assert isinstance(state, State)
def test_cli():
runner = CliRunner()
result = runner.invoke(cli, [])
assert result.exit_code == 0
|
110637
|
import pickle
import json
import argparse
import cv2
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
import matplotlib.lines as lines
from tqdm import tqdm
import _init_paths
from core.config import cfg
from datasets_rel.pytorch_misc import intersect_2d, argsort_desc
from functools import reduce
#from utils.boxes import bbox_overlaps
#from utils_rel.boxes_rel import boxes_union
from graphviz import Digraph, Graph
import seaborn as sns
#sns.set_theme()
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument(
'--output_dir',
help='output directory to save the testing results. If not provided, '
'defaults to [args.load_ckpt|args.load_detectron]/../test.')
parser.add_argument(
'--filename',
help='Visualization file',
default='rel_detection_range_12_15', type=str)
args = parser.parse_args()
strfname = args.filename.split('_')
st,ed = strfname[-2], strfname[-1]
st,ed = int(st), int(ed)
def visualize_feature_map(feat_map, save_path, fname='feat_map'):
fig = plt.figure()
plt.imshow(feat_map)
plt.axis('off')
plt.savefig(os.path.join(save_path, fname+'.png'))
plt.close(fig)
def vis_box(img, box, save_path, fname='obj_box'):
fig = plt.figure()
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.imshow(img)
plt.axis('off')
for i in range(len(box)):
x, y, x1, y1 = box[i, 1:].astype(np.int)
srect = plt.Rectangle((x,y),x1-x,y1-y, fill=False, edgecolor='b', linewidth=1)
ax.add_patch(srect)
#ax.text(x, y,
# color='white',
# bbox=dict(facecolor='orange', alpha=0.5, pad=0, edgecolor='none'))
plt.savefig(os.path.join(save_path, fname+'.png'))
plt.close(fig)
with open(os.path.join(args.output_dir, args.filename+'.pkl'), 'rb') as f:
ret = pickle.load(f)
f.close()
for ii, return_dict2 in enumerate(ret):
if return_dict2 is None: continue
blob_conv = return_dict2['blob_conv']
feat_map = return_dict2['feat_map']
temporal_blob_conv_prd = return_dict2['temporal_blob_conv_prd']
batch_A = return_dict2['batch_A']
batch_non_leaf_bbox = return_dict2['batch_non_leaf_bbox']
spatio_feat1 = return_dict2['spatio_feat1']
spatio_feat2 = return_dict2['spatio_feat2']
if batch_non_leaf_bbox is not None:
batch_non_leaf_bbox = batch_non_leaf_bbox.data.cpu().numpy()
if return_dict2['roi'] is not None:
roi = return_dict2['roi'].data.cpu().numpy()
else:
roi = None
im = return_dict2['im']
save_dir = os.path.join(args.output_dir, str(ii+st))
if not os.path.exists(os.path.join(args.output_dir, str(ii+st))):
os.mkdir(save_dir)
else:
continue
all_frames = return_dict2['all_frames']
if isinstance(blob_conv, list):
for i,v in enumerate(blob_conv):
visualize_feature_map(v, save_dir, fname='obj_feat_map'+str(i))
else:
visualize_feature_map(blob_conv, save_dir, fname='obj_feat_map')
im = im[:,:,::-1]
visualize_feature_map(im, save_dir, fname='origin')
if all_frames is not None:
all_frames = all_frames.squeeze(0)
channel_swap = (0, 2, 3, 1)
all_frames = all_frames.transpose(channel_swap)
frame_sampled = all_frames[0] + cfg.PIXEL_MEANS
frame_sampled = frame_sampled.astype(np.int)
frame_sampled = frame_sampled[:,:,::-1]
visualize_feature_map(frame_sampled, save_dir, fname='frame_sampled')
visualize_feature_map(feat_map, save_dir)
if temporal_blob_conv_prd is not None:
for i in range(len(temporal_blob_conv_prd)):
visualize_feature_map(temporal_blob_conv_prd[i],
save_dir, fname='temporal_feat_map'+str(i))
if roi is not None:
vis_box(im, roi, save_dir)
if batch_A is not None:
rid_f2s = set()
dot = Graph(filename=('tree'))
dot.body.append('size="16,16"')
#dot.body.append('rankdir="LR"')
son, fa = np.where(batch_A > 0)
for i in range(len(batch_A)):
dot.node(str(i), str(i), color='black')
for i in range(len(fa)):
if (son[i], fa[i]) in rid_f2s or (son[i], fa[i]) in rid_f2s: continue
dot.edge(str(son[i]), str(fa[i]), color='black')
rid_f2s.add((son[i], fa[i]))
dot.render(os.path.join(save_dir, 'tree'), cleanup=True)
for i in range(len(batch_A)):
if i < len(roi):
x, y, x1, y1 = roi[i, 1:].astype(np.int)
else:
x, y, x1, y1 = batch_non_leaf_bbox[i-len(roi), 1:].astype(np.int)
subim = im[y:y1, x:x1, :]
if not os.path.exists(os.path.join(save_dir, 'subim')):
os.mkdir(os.path.join(save_dir, 'subim'))
visualize_feature_map(subim,
os.path.join(save_dir, 'subim'), str(i))
if not os.path.exists(os.path.join(save_dir, 'subject_vis_branch')):
os.mkdir(os.path.join(save_dir, 'subject_vis_branch'))
for i in range(len(spatio_feat1)):
print(spatio_feat1[i])
print(spatio_feat2[i])
print()
#visualize_feature_map(spatio_feat1[i], os.path.join(save_dir, 'subject_vis_branch'), str(i))
#if not os.path.exists(os.path.join(save_dir, 'object_vis_branch')):
# os.mkdir(os.path.join(save_dir, 'object_vis_branch'))
#for i in range(len(spatio_feat2)):
# visualize_feature_map(spatio_feat2[i], os.path.join(save_dir, 'object_vis_branch'), str(i))
|
110660
|
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
points = IN[0]
almostzero = IN[1]
struts = list()
# this function recursively finds all the pairs of points of the buckyball struts
def BuckyballStruts(points,struts):
firstpoint = points[0]
restofpoints = points[1:]
# measure distance between first point and rest of points
distances = [firstpoint.DistanceTo(x) for x in restofpoints]
# filter out all points that do not have a distance of 2 to the first point
strutpoints = list()
strutpointpairs = list()
i = 0
for dist in distances:
# use a little tolerance so we catch all struts
if dist > 2 - almostzero and dist < 2 + almostzero:
strutpoints.append(restofpoints[i])
strutpointpairs.append((firstpoint,restofpoints[i]))
i += 1
# add strutpointpairs to struts
if len(strutpointpairs) > 0: struts.extend(strutpointpairs)
# Continue processing the list recursively until there's only one point left. By always removing the first point from the list, we ensure that no duplicate struts are computed.
if len(restofpoints) > 1:
return BuckyballStruts(restofpoints,struts)
else: return (restofpoints,struts)
OUT = BuckyballStruts(points,struts)[1]
##### NEXT PYTHON NODE #####
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
struts = IN[0]
points = IN[1]
almostzero = IN[2]
def BuckyballFaces(struts,points,planes,almostzero,vertices):
firststrut = struts[0]
struts.pop(0)
# find the two adjacent struts
adjacent = list()
for strut in struts:
for point in strut:
if point.IsAlmostEqualTo(firststrut[0]):
adjacent.append(strut)
break
if len(adjacent) == 2:
break
# identify planes and find all vertices on planes
vlist = list()
for item in adjacent:
triangle = (firststrut[1],item[0],item[1])
pl = Plane.ByBestFitThroughPoints(triangle)
vlist = list()
for point in points:
dist = pl.DistanceTo(point)
if dist < almostzero and dist > -almostzero:
vlist.append(point)
newplane = (Plane.ByBestFitThroughPoints(vlist))
append_vertices = True
for pl in planes:
if newplane.IsAlmostEqualTo(pl):
append_vertices = False
if append_vertices:
vertices.append(vlist)
planes.append(newplane)
# let this function recursively call itself until it finds all planes
if len(planes) < 32:
return BuckyballFaces(struts,points,planes,almostzero,vertices)
else:
return (struts,points,planes,almostzero,vertices)
def OrderFaceIndices(p_ordered,p_unordered,almostzero):
i = 0;
for p in p_unordered:
dist = p_ordered[(len(p_ordered)-1)].DistanceTo(p)
if dist > 2-almostzero and dist < 2+almostzero:
p_ordered.append(p)
p_unordered.pop(i)
break
i += 1
if len(p_unordered) > 0:
return OrderFaceIndices(p_ordered,p_unordered,almostzero)
else:
return (p_ordered,p_unordered,almostzero)
vlist_unordered = BuckyballFaces(struts,points,list(),almostzero,list())[4]
vset_ordered = list()
for vset in vlist_unordered:
p_ordered = [vset[0]]
vset.pop(0)
vset_ordered.append(OrderFaceIndices(p_ordered,vset,almostzero))
vset_out = list()
for vset in vset_ordered:
vset_out.append(vset[0])
OUT = vset_out
|
110663
|
import math
import numpy as np
from ... import IntegerProgram, LinearProgram
from unittest import TestCase, main
class TestRelax(TestCase):
def test_relax(self) -> None:
A = np.array([
[1, 2, 3, 4],
[3, 5, 7, 9]
])
b = np.array([-9, 7])
c = np.array([1, 7, 1, 5])
z = 25
ip = IntegerProgram(A, b, c, z, "min", ["<=", ">="], [2], [1])
lp = ip.relax()
self.assertIsInstance(lp, LinearProgram, "Should be a linear program.")
self.assertTrue(np.allclose(lp.A, A), "Should have the same constraint matrix.")
self.assertTrue(np.allclose(lp.b, b), "Should have the same constraint vector.")
self.assertTrue(np.allclose(lp.c, c), "Should have the same coefficient vector.")
self.assertTrue(math.isclose(lp.z, z), "Should have the same constant.")
self.assertEqual(lp.inequalities, ip.inequalities, "Should have the same inequalities.")
self.assertEqual(lp.objective, ip.objective, "Should have the same objective.")
self.assertEqual(lp.negative_variables, ip.negative_variables, "Should have the same negative variables.")
self.assertEqual(lp.free_variables, ip.free_variables, "Should have the same free variables.")
if __name__ == "__main__":
main()
|
110747
|
import sys
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
if PY3:
from io import BytesIO as StringIO
from urllib.parse import urlparse
else:
from urlparse import urlparse # noqa
from cStringIO import StringIO # noqa
if PY26:
from unittest2 import TestCase
else:
from unittest import TestCase
if PY3:
def to_bytes(string):
return bytes(string, 'utf-8')
else:
to_bytes = lambda s: s
|
110790
|
import gzip
from typing import List
from typing import Set
import grequests
import progressbar as pb
import requests
import spacy
from bs4 import BeautifulSoup
MAIN_WIKI = 'https://en.wikipedia.org/wiki/Lists_of_people_by_nationality'
BASE_WIKI = 'https://en.wikipedia.org'
NLP_MODEL = spacy.load('en_core_web_lg')
def get_pages(demonym_file: str) -> List[str]:
"""Gets individual famous people pages by nationality
Arguments:
demonym_file {txt} -- path to file containing country demonyms
Returns:
str[] -- list of urls to individual country pages
"""
result = requests.get(MAIN_WIKI)
soup = BeautifulSoup(result.content, 'html.parser')
with open(demonym_file, encoding='utf-8') as fp:
demonyms = {entity.strip() for entity in fp}
pages: Set[str] = set()
first_ul = soup.find_all('ul')[1]
for li_tag in first_ul.find_all('li'):
if li_tag.text in demonyms:
pages.add(BASE_WIKI + li_tag.a.attrs['href'])
return list(pages)
def get_people(pages: List[str], output_file: str):
"""Given a list of pages, gets all people's names
Arguments:
pages {str[]} -- list of urls to famous people from country pages
output_file {str} -- path to store output
"""
widget = ['Fetching list of people: ', pb.Percentage(), ' ',
pb.Bar(marker=pb.RotatingMarker()), ' ', pb.ETA()]
timer = pb.ProgressBar(widgets=widget,
maxval=len(pages)).start()
calls = (grequests.get(page) for page in pages)
responses = grequests.map(calls)
with gzip.open(output_file, 'wb') as fp:
for count, result in enumerate(responses):
soup = BeautifulSoup(result.content, 'html.parser')
if soup.find('div', {'class': 'navbox'}):
soup.find('div', {'class': 'navbox'}).decompose()
div = soup.find('div', {'id': 'mw-content-text'})
for li_tag in div.find_all('li'):
if not li_tag.a:
continue
if li_tag.a.has_attr('title') and li_tag.a.has_attr('href'):
doc = NLP_MODEL(li_tag.a.text)
for ent in doc.ents:
if ent.label_ == "PERSON":
line = str(ent.text) + "\n"
fp.write(line.encode(encoding='utf-8'))
timer.update(count)
timer.finish()
def _main():
pages = get_pages('../../common/text_files/country_demonyms.txt')
get_people(pages, '../../common/text_files/famous_people.txt.gz')
if __name__ == "__main__":
_main()
|
110800
|
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Summer_Patia
class Summer_Patia(Adv):
comment = 'cannot build combo for Cat Sith; uses up 15 stacks by 46.94s'
a3 = [('antiaffself_poison', 0.15, 10, 5), ('edge_poison', 60, 'hp50')]
conf = {}
conf['slots.poison.a'] = Kung_Fu_Masters()+The_Plaguebringer()
conf['slots.d'] = Shinobi()
conf['acl'] = """
# use dragon if using Cat Sith
# `dragon.act('c3 s end'), fsc
`s3, not self.s3_buff
`s1, fsc
`s2, fsc
`s4, fsc
`dodge, fsc
`fs3
"""
coab = ['Summer_Patia', 'Blade', 'Wand', 'Curran']
share = ['Curran']
def d_slots(self):
if self.duration <= 120:
self.conf['slots.d'] = Gala_Cat_Sith()
def init(self):
self.conf.fs.hit = -1
conf_alt_fs = {
'fs1': {
'dmg': 207 / 100.0,
'dmg2': 2.17,
'sp': 600,
'charge': 24 / 60.0,
'startup': 24 / 60.0,
'recovery': 46 / 60.0,
},
'fs2': {
'dmg': 297 / 100.0,
'dmg2': 3.12,
'sp': 900,
'charge': 48 / 60.0,
'startup': 24 / 60.0,
'recovery': 46 / 60.0,
},
'fs3': {
'dmg': 384 / 100.0,
'dmg2': 4.03,
'sp': 1400,
'charge': 72 / 60.0,
'startup': 24 / 60.0,
'recovery': 46 / 60.0,
}
}
for n, c in conf_alt_fs.items():
self.conf[n] = Conf(c)
act = FS_MH(n, self.conf[n])
self.__dict__['a_'+n] = act
self.l_fs1 = Listener('fs1',self.l_fs1)
self.l_fs2 = Listener('fs2',self.l_fs2)
self.l_fs3 = Listener('fs3',self.l_fs3)
self.fs = None
self.fs_alt_uses = 0
def do_fs(self, e, name):
log('cast', name)
e.name = name
self.__dict__['a_'+name].getdoing().cancel_by.append(name)
self.__dict__['a_'+name].getdoing().interrupt_by.append(name)
self.fs_before(e)
self.update_hits('fs')
if self.fs_alt_uses:
self.dmg_make(e.name, self.conf[name+'.dmg2'], 'fs')
self.afflics.poison(e.name,110,0.436)
self.fs_alt_uses = 0
else:
self.dmg_make(e.name, self.conf[name+'.dmg'], 'fs')
self.fs_proc(e)
self.think_pin('fs')
self.charge(name,self.conf[name+'.sp'])
def l_fs1(self, e):
self.do_fs(e, 'fs1')
def fs1(self):
return self.a_fs1()
def l_fs2(self, e):
self.do_fs(e, 'fs2')
def fs2(self):
return self.a_fs2()
def l_fs3(self, e):
self.do_fs(e, 'fs3')
def fs3(self):
return self.a_fs3()
def s1_before(self, e):
self.dmg_make(e.name, 7.47)
def s1_proc(self, e):
self.dmg_make(e.name, 7.47)
self.fs_alt_uses = 1
def s2_proc(self, e):
self.afflics.poison(e.name,120,0.582)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
110803
|
import wx
import matplotlib.cm
import numpy as np
from . import properties
slider_width = 30
s_off = slider_width/2
class ColorBarPanel(wx.Panel):
'''
A HORIZONTAL color bar and value axis drawn on a panel.
'''
def __init__(self, parent, map, local_extents=[0.,1.], global_extents=None,
ticks=5, **kwargs):
'''
map -- a colormap name from matplotlib.cm
local_extents -- local min and max values of the measurement
global_extents -- min and max values of the measurement
ticks -- # of ticks to display values for on the bar
1 or 0 will draw no ticks
labelformat -- a valid format string for the values displayed
on the value axis
'''
wx.Panel.__init__(self, parent, **kwargs)
self.ticks = ticks
self.labelformat = '%.3f'
self.low_slider = wx.Button(self, -1, '[', pos=(0,-1), size=(slider_width,-1))
self.high_slider = wx.Button(self, -1, ']', pos=(self.Size[0],-1), size=(slider_width,-1))
self.ClearNotifyWindows()
self.SetMap(map)
self.interval = list(local_extents)
self.local_extents = local_extents
self.global_extents = list(local_extents)
self.clipmode = 'rescale'
self.low_slider.SetToolTip('')
self.low_slider.GetToolTip().Enable(True)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.low_slider.Bind(wx.EVT_LEFT_DOWN, self.OnClipSliderLeftDown)
self.low_slider.Bind(wx.EVT_MOTION, self.OnClipSliderMotion)
self.high_slider.Bind(wx.EVT_LEFT_DOWN, self.OnClipSliderLeftDown)
self.high_slider.Bind(wx.EVT_MOTION, self.OnClipSliderMotion)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnLeftDown(self, evt):
# Get the slider closest to the click point.
if abs(self.low_slider.GetPositionTuple()[0] - evt.GetX()) < abs(self.high_slider.GetPositionTuple()[0] - evt.GetX()):
self.cur_slider = self.low_slider
else:
self.cur_slider = self.high_slider
self.cur_slider.SetPosition((evt.GetX() - s_off, -1))
self.xo = 0
self.UpdateInterval()
def OnMotion(self, evt):
if not evt.Dragging() or not evt.LeftIsDown():
return
self.cur_slider.SetPosition((evt.GetX() - s_off, -1))
self.UpdateInterval()
def OnClipSliderLeftDown(self, evt):
self.cur_slider = evt.EventObject
self.xo = evt.GetX()
def OnClipSliderMotion(self, evt):
slider = evt.EventObject
if not evt.Dragging() or not evt.LeftIsDown():
return
slider.SetPosition((slider.GetPosition()[0] + evt.GetX() - self.xo - s_off, -1))
self.xo = 0
self.UpdateInterval()
def ClearNotifyWindows(self):
self.notify_windows = []
def AddNotifyWindow(self, win):
self.notify_windows += [win]
def ResetInterval(self):
''' Sets clip interval to the extents of the colorbar. '''
self.interval = list(self.global_extents)
self.low_slider.SetPosition((0-s_off,-1))
self.high_slider.SetPosition((self.Size[0]-s_off,-1))
for win in self.notify_windows:
win.SetClipInterval(self.GetLocalInterval(), self.local_extents, self.clipmode)
self.Refresh()
def UpdateInterval(self):
''' Calculates the interval values w.r.t. the current extents
and clipping slider positions. '''
range = self.global_extents[1]-self.global_extents[0]
w = float(self.Size[0])
if range > 0 and w > 0:
self.interval[0] = self.global_extents[0] + ((self.low_slider.GetPosition()[0] + s_off) / w * range)
self.interval[1] = self.global_extents[0] + ((self.high_slider.GetPosition()[0] + s_off) / w * range)
self.low_slider.SetToolTip(str(self.global_extents[0] + ((self.low_slider.GetPosition()[0] + s_off) / w * range)))
self.high_slider.SetToolTip(str(self.global_extents[0] + ((self.high_slider.GetPosition()[0] + s_off) / w * range)))
else:
self.interval = list(self.local_extents)
self.UpdateLabelFormat()
for win in self.notify_windows:
win.SetClipInterval(self.GetLocalInterval(), self.local_extents, self.clipmode)
self.Refresh()
# TODO: To be added. Not sure how to treat intervals that are outside
# the current extents, do we resize the extents? This could get
# ugly and confusing.
## def SetInterval(self, interval):
## ''' '''
## self.interval = interval
## self.low_slider.SetPosition((0-s_off,-1))
## self.high_slider.SetPosition((self.Size[0]-s_off,-1))
## for win in self.notify_windows:
## win.SetClipInterval(self.GetInterval(), self.clipmode)
## self.Refresh()
def GetGlobalInterval(self):
''' Returns the interval clipped on the value axis. '''
return self.interval
def GetLocalInterval(self):
''' Returns the interval clipped on the local color bar.
If either part is outside the local_extents, the extent is returned.
'''
return (max(self.interval[0], self.local_extents[0]),
min(self.interval[1], self.local_extents[1]))
def GetGlobalExtents(self):
return self.global_extents
def GetLocalExtents(self):
return self.local_extents
def GetClipMode(self):
return self.clipmode
def SetMap(self, map):
''' Sets the colormap that is displayed.
map should be the string name of a colormap from matplotlib.cm'''
self.cm = matplotlib.cm.get_cmap(map)
self.Refresh()
def SetLocalExtents(self, local_extents):
#''' Sets the value axis min and max. Accepts a 2-tuple.'''
self.local_extents = local_extents
if self.local_extents[0] < self.global_extents[0]:
self.global_extents[0] = self.local_extents[0]
if self.local_extents[1] > self.global_extents[1]:
self.global_extents[1] = self.local_extents[1]
self.UpdateInterval()
def SetGlobalExtents(self, global_extents):
self.global_extents = list(global_extents)
self.UpdateInterval()
def SetTicks(self, ticks):
''' Sets the number of tick marks displayed by the ColorBarPanel.
1 or 0 will draw no ticks'''
self.ticks = ticks
self.Refresh()
def UpdateLabelFormat(self):
''' Selects a number format based on the step value between ticks '''
range = self.global_extents[1] - self.global_extents[0]
step = range / self.ticks
if 0 < step < 0.001:
self.labelformat = '%.3e'
else:
self.labelformat = '%.3f'
def OnToggleClipMode(self, evt):
if self.clipmode == 'clip':
self.clipmode = 'rescale'
else:
self.clipmode = 'clip'
for win in self.notify_windows:
win.SetClipInterval(self.GetLocalInterval(), self.local_extents, self.clipmode)
self.Refresh()
def OnRightDown(self, evt):
popupMenu = wx.Menu()
popupMenu.SetTitle('Colorbar')
reset = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Reset sliders'))
self.Bind(wx.EVT_MENU, lambda evt:self.ResetInterval(), reset)
if self.clipmode == 'clip':
bracket_mode = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Value bracketing: RESCALE'))
else:
bracket_mode = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Value bracketing: CLIP'))
self.Bind(wx.EVT_MENU, self.OnToggleClipMode, bracket_mode)
aggmethod = self.Parent.aggregationMethodsChoice.GetStringSelection().lower()
src_table = self.Parent.sourceChoice.GetStringSelection()
if (aggmethod in ['mean', 'median', 'min', 'max']
and self.interval != self.global_extents):
popupMenu.AppendSeparator()
saveitem = popupMenu.AppendItem(wx.MenuItem(popupMenu, -1, 'Create gate from interval'))
self.Bind(wx.EVT_MENU, self.on_create_gate_from_interval, saveitem)
self.PopupMenu(popupMenu, (evt.GetX(), evt.GetY()))
def on_create_gate_from_interval(self, evt):
self.create_gate_from_interval()
def create_gate_from_interval(self):
table = self.Parent.sourceChoice.GetStringSelection()
colname = self.Parent.measurementsChoice.GetStringSelection()
from .guiutils import GateDialog
dlg = GateDialog(self)
if dlg.ShowModal() == wx.ID_OK:
from .sqltools import Gate, Gate1D
p = properties.Properties()
p.gates[dlg.Value] = Gate([Gate1D((table, colname), self.interval)])
dlg.Destroy()
def OnResize(self, evt):
range = self.global_extents[1] - self.global_extents[0]
if range == 0:
self.low_slider.SetPosition((0,-1))
self.high_slider.SetPosition((self.Size[1],-1))
else:
self.low_slider.SetPosition((self.Size[0] * (self.interval[0] - self.global_extents[0]) / range - s_off, -1))
self.high_slider.SetPosition((self.Size[0] * (self.interval[1] - self.global_extents[0]) / range - s_off, -1))
self.UpdateLabelFormat()
def OnPaint(self, evt):
w_global, h = self.Size
if 0 in self.Size:
return
low_slider_pos = self.low_slider.GetPosition()[0] + s_off
high_slider_pos = self.high_slider.GetPosition()[0] + s_off
global_scale = self.global_extents[1] - self.global_extents[0] # value scale of the global data
if global_scale == 0:
local_x0 = 0
local_x1 = w_global
w_local = w_global
else:
local_x0 = (self.local_extents[0] - self.global_extents[0]) / global_scale * w_global # x pos (pixels) to start drawing the local color bar
local_x1 = (self.local_extents[1] - self.global_extents[0]) / global_scale * w_global # x pos (pixels) to stop drawing the local color bar
w_local = local_x1 - local_x0 # pixel width of the local color bar
w0 = int(max(low_slider_pos, local_x0) - local_x0)
w1 = int(local_x1 - min(high_slider_pos, local_x1))
# create array of values to be used for the color bar
if self.clipmode=='rescale':
a1 = np.array([])
if w0 > 0:
a1 = np.zeros(w0)
a2 = np.arange(abs(min(high_slider_pos, local_x1) - max(low_slider_pos, local_x0)), dtype=float) / (min(high_slider_pos, local_x1) - max(low_slider_pos, local_x0)) * 255
a3 = np.array([])
if w1 > 0:
a3 = np.ones(w1)
if len(a1) > 0 and len(a3) > 0:
a = np.hstack([a1,a2,a3])
else:
a = a2
elif self.clipmode=='clip':
a = np.arange(w_local, dtype=float) / w_local
a[:w0] = 0.
if w1>=1:
a[-w1:] = 1.
# draw the color bar
dc = wx.PaintDC(self)
dc.Clear()
dc.SetPen(wx.Pen((0,0,0)))
dc.DrawLine(0, (h-14)/2, local_x0, (h-14)/2)
for x, v in enumerate(a):
v = int(v)
color = np.array(self.cm(v)) * 255
dc.SetPen(wx.Pen(color))
dc.DrawLine(x+local_x0, 0, x+local_x0, h-14)
dc.SetPen(wx.Pen((0,0,0)))
dc.DrawLine(local_x1, (h-14)/2, w_global, (h-14)/2)
# draw value axis
if self.ticks <= 1:
return
font = dc.GetFont()
font.SetPixelSize((6,12))
dc.SetFont(font)
for t in range(self.ticks):
xpos = t * w_global/(self.ticks-1.)
val = t * (self.global_extents[1]-self.global_extents[0]) / (self.ticks-1) + self.global_extents[0]
dc.DrawLine(xpos,6,xpos,h-14)
textpos = xpos - xpos/w_global * dc.GetFullTextExtent(self.labelformat%(val), font)[0]
dc.DrawText(self.labelformat%(val), textpos, h-13)
|
110831
|
import os
import sys
def process(input_file, output_file):
output_text = ""
if input_file.endswith("abstract.summary.txt") or input_file.endswith("community.summary.txt") or input_file.endswith("combined.summary.txt") or input_file.endswith("human.summary.txt"):
input_text = []
with open(input_file, "r") as f:
input_text = f.readlines()
for i in range(len(input_text)):
inp = input_text[i].strip()
if len(inp) == 0:
continue
if inp.startswith("<S sid ="):
out = inp.split(">", 1)[1]
out = out.split("</S>", 1)[0]
else:
out = inp
output_text += out + " "
else:
with open(input_file, "r") as f:
output_text = f.read()
with open(output_file, "w") as f:
f.write(output_text)
def main(input_dir, output_dir):
if not os.path.exists(input_dir):
print("%s not a valid directory" % input_dir)
if not os.path.exists(output_dir):
print("%s not a valid directory" % output_dir)
for file in os.listdir(input_dir):
process(os.path.join(input_dir, file), os.path.join(output_dir, file))
if __name__ == "__main__":
input_dir = sys.argv[1]
output_dir = sys.argv[2]
main(input_dir, output_dir)
|
110845
|
import Spheral
import distributeNodesGeneric
#-------------------------------------------------------------------------------
# Domain decompose using PeanoHilbert ordering (1d method).
#-------------------------------------------------------------------------------
def distributeNodes1d(*listOfNodeTuples):
distributeNodesGeneric.distributeNodesGeneric(listOfNodeTuples,
Spheral.DataBase1d,
Spheral.globalNodeIDsAll1d,
Spheral.PeanoHilbertOrderRedistributeNodes1d)
#-------------------------------------------------------------------------------
# Domain decompose using PeanoHilbert ordering (2d method).
#-------------------------------------------------------------------------------
def distributeNodes2d(*listOfNodeTuples):
distributeNodesGeneric.distributeNodesGeneric(listOfNodeTuples,
Spheral.DataBase2d,
Spheral.globalNodeIDsAll2d,
Spheral.PeanoHilbertOrderRedistributeNodes2d)
#-------------------------------------------------------------------------------
# Domain decompose using PeanoHilbert ordering (3d method).
#-------------------------------------------------------------------------------
def distributeNodes3d(*listOfNodeTuples):
distributeNodesGeneric.distributeNodesGeneric(listOfNodeTuples,
Spheral.DataBase3d,
Spheral.globalNodeIDsAll3d,
Spheral.PeanoHilbertOrderRedistributeNodes3d)
|
110865
|
from django.contrib import admin
from devilry.devilry_group import models
class FeedbackSetAdmin(admin.ModelAdmin):
list_display = [
'id',
'group_id',
'get_students',
'deadline_datetime',
'feedbackset_type',
'grading_published_datetime',
'grading_points'
]
raw_id_fields = [
'group',
'created_by',
'last_updated_by',
'grading_published_by'
]
readonly_fields = [
'feedbackset_type',
'gradeform_data_json'
]
search_fields = [
'=group__id',
'group__parentnode__short_name',
'group__parentnode__long_name',
'group__parentnode__parentnode__short_name',
'group__parentnode__parentnode__long_name',
'group__parentnode__parentnode__parentnode__short_name',
'group__parentnode__parentnode__parentnode__long_name',
'group__candidates__relatedstudent__user__shortname'
]
list_filter = [
'created_datetime',
'deadline_datetime',
'grading_published_datetime'
]
def get_students(self, obj):
return obj.group.get_unanonymized_short_displayname()
get_students.short_description = 'Students'
admin.site.register(models.FeedbackSet, FeedbackSetAdmin)
class GroupCommentAdmin(admin.ModelAdmin):
list_display = [
'id',
'user',
'part_of_grading'
]
raw_id_fields = [
'feedback_set'
]
admin.site.register(models.GroupComment, GroupCommentAdmin)
class FeedbackSetPassedPreviousPeriodAdmin(admin.ModelAdmin):
readonly_fields = [
'feedbackset',
'passed_previous_period_type',
'assignment_short_name',
'assignment_long_name',
'assignment_max_points',
'assignment_passing_grade_min_points',
'period_short_name',
'period_long_name',
'period_start_time',
'period_end_time',
'grading_points',
'grading_published_by',
'grading_published_datetime',
'created_datetime',
'created_by',
]
search_fields = [
'feedbackset_id',
'created_by_id',
'passed_previous_period_type'
]
list_display = [
'feedbackset'
]
admin.site.register(models.FeedbacksetPassedPreviousPeriod, FeedbackSetPassedPreviousPeriodAdmin)
class FeedbackSetGradingUpdateHistoryAdmin(admin.ModelAdmin):
readonly_fields = [
'feedback_set',
'updated_by',
'updated_datetime',
'old_grading_points',
'old_grading_published_by',
'old_grading_published_datetime'
]
search_fields = [
'feedbackset_id',
'updated_by_id',
'old_grading_published_by_id'
]
list_display = [
'feedback_set',
'updated_by'
]
admin.site.register(models.FeedbackSetGradingUpdateHistory, FeedbackSetGradingUpdateHistoryAdmin)
class FeedbackSetDeadlineHistoryAdmin(admin.ModelAdmin):
readonly_fields = [
'feedback_set',
'changed_by',
'changed_datetime',
'deadline_old',
'deadline_new'
]
search_fields = [
'feedback_set_id',
'changed_by_id'
]
list_display = [
'feedback_set',
'changed_datetime'
]
admin.site.register(models.FeedbackSetDeadlineHistory, FeedbackSetDeadlineHistoryAdmin)
class GroupCommentEditHistoryAdmin(admin.ModelAdmin):
list_display = [
'visibility',
'edited_datetime',
'edited_by'
]
admin.site.register(models.GroupCommentEditHistory, GroupCommentEditHistoryAdmin)
|
110879
|
from typing import Any, Union
from unittest.mock import Mock
import pystac
class MockStacIO(pystac.StacIO):
"""Creates a mock that records StacIO calls for testing and allows
clients to replace StacIO functionality, all within a context scope.
"""
def __init__(self) -> None:
self.mock = Mock()
def read_text(
self, source: Union[str, pystac.Link], *args: Any, **kwargs: Any
) -> str:
self.mock.read_text(source)
return pystac.StacIO.default().read_text(source)
def write_text(
self, dest: Union[str, pystac.Link], txt: str, *args: Any, **kwargs: Any
) -> None:
self.mock.write_text(dest, txt)
pystac.StacIO.default().write_text(dest, txt)
|
110910
|
import FWCore.ParameterSet.Config as cms
#
# tracker
#
from RecoLocalTracker.Configuration.RecoLocalTracker_Cosmics_cff import *
from RecoTracker.Configuration.RecoTrackerP5_cff import *
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
from RecoTracker.Configuration.RecoTrackerBHM_cff import *
from RecoTracker.DeDx.dedxEstimators_Cosmics_cff import *
#
# calorimeters
#
from RecoLocalCalo.Configuration.RecoLocalCalo_Cosmics_cff import *
from RecoEcal.Configuration.RecoEcalCosmics_cff import *
#
# muons
#
from RecoLocalMuon.Configuration.RecoLocalMuonCosmics_cff import *
from RecoMuon.Configuration.RecoMuonCosmics_cff import *
# primary vertex
#from RecoVertex.Configuration.RecoVertexCosmicTracks_cff import *
# local reco
trackerCosmics = cms.Sequence(offlineBeamSpot*trackerlocalreco)
caloCosmics = cms.Sequence(calolocalreco)
muonsLocalRecoCosmics = cms.Sequence(muonlocalreco+muonlocalrecoNoDrift)
localReconstructionCosmics = cms.Sequence(trackerCosmics*caloCosmics*muonsLocalRecoCosmics)
reconstructionCosmics = cms.Sequence(localReconstructionCosmics)
|
110920
|
from fusion.points2heatmap import *
from fusion.calcAffine import *
from fusion.warper import warping as warp
import matplotlib.pyplot as plt
from fusion.parts2lms import parts2lms
import time
from tqdm import *
import random
import multiprocessing
import sys
def gammaTrans(img, gamma):
gamma_table = [np.power(x/255.0, gamma)*255.0 for x in range(256)]
gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
return cv2.LUT(img, gamma_table)
def erodeAndBlur(img,kernelSize=21,blurSize=21):
#img : ndarray float32
kernel = np.ones((int(kernelSize), int(kernelSize)), np.uint8)
res = cv2.erode(img,kernel)
res = cv2.GaussianBlur(res, (blurSize, blurSize), math.sqrt(blurSize))
return res
def affineface(img,src_pt,dst_pt,heatmapSize=256,needImg=True):
#src/dst_pt[ndarray] : [...,[x,y],...] in [0.0,1.0],with gaze
#naive mode: align 5 parts
curves_src,_ = points2curves(src_pt.copy())
pts_fivesense_src = np.vstack(curves_src[1:])
curves_dst,_ = points2curves(dst_pt.copy())
pts_fivesense_dst = np.vstack(curves_dst[1:])
affine_mat = calAffine(pts_fivesense_src,pts_fivesense_dst)
pt_aligned = affinePts(affine_mat,src_pt*255.0)/255.0
if needImg:
img_aligned = affineImg(img,affine_mat)
return pt_aligned,img_aligned
else:
return pt_aligned
def affineface_parts(img,src_pt,dst_pt):
curves_src,_ = points2curves(src_pt.copy())
curves_dst,_ = points2curves(dst_pt.copy())#[0,255]
parts_src = curves2parts(curves_src)
parts_dst = curves2parts(curves_dst) #[0,255]
partsList = []
for i in range(len(parts_src)-2):
affine_mat = calAffine(parts_src[i],parts_dst[i])
parts_aligned = affinePts(affine_mat,parts_src[i]) #[0,255]
partsList.append(parts_aligned)
partsList.append(parts_src[-2])
partsList.append(parts_src[-1])
'''
A = []
B = []
for i in range(len(parts_src)):
A.append(parts_src[i])
B.append(partsList[i])
A = np.vstack(A)
B = np.vstack(B)
res = warp(img,A,B)
'''
lms = parts2lms(partsList)
#bound
lms[:33] = dst_pt[:33]*256
res = warp(img,src_pt[:106]*256,lms[:106])
return lms/255.0,res
def lightEye(img_ref,lms_ref,img_gen,lms_gen,ratio=0.1):
#get curves
curves_ref,_ = points2curves(lms_ref.copy())
curves_gen,_ = points2curves(lms_gen.copy())
parts_ref = curves2parts(curves_ref)
parts_gen = curves2parts(curves_gen) #[0,255]
#get rois
gaze_ref = curves2gaze(curves_ref)
gaze_gen = curves2gaze(curves_gen)
#img_gazeL = np.dot(gaze_ref[0], img_ref)
img_gazeL = multi(img_ref,gaze_ref[0])
#img_gazeR = np.dot(gaze_ref[1] , img_ref)
img_gazeR = multi(img_ref,gaze_ref[1])
affine_mat = calAffine(parts_ref[-2],parts_gen[-2])
img_gazeL_affined = affineImg(img_gazeL,affine_mat)
affine_mat = calAffine(parts_ref[-1],parts_gen[-1])
img_gazeR_affined = affineImg(img_gazeR,affine_mat)
img_ref = img_gazeL_affined + img_gazeR_affined
mask = gaze_gen[0] + gaze_gen[1]
mask = erodeAndBlur(mask,5,5)
R = img_gen[:,:,0] * (1-mask) + mask* (img_gen[:,:,0]*ratio + img_ref[:,:,0]*(1-ratio))
G = img_gen[:,:,1] * (1-mask) + mask* (img_gen[:,:,1]*ratio + img_ref[:,:,1]*(1-ratio))
B = img_gen[:,:,2] * (1-mask) + mask* (img_gen[:,:,2]*ratio + img_ref[:,:,2]*(1-ratio))
res = np.stack([R,G,B]).transpose((1,2,0))
seg = mask
seg = seg * 127
return res,seg,img_ref
def multi(img,mask):
R = img[:,:,0] * mask
G = img[:,:,1] * mask
B = img[:,:,2] * mask
res = np.stack([R,G,B]).transpose((1,2,0))
return res
def fusion(img_ref,lms_ref,img_gen,lms_gen,ratio=0.2):
#img*: ndarray(np.uint8) [0,255]
#lms*: ndarray , [...,[x,y],...] in [0,1]
#ratio: weight of gen
#--------------------------------------------
#get curves
curves_ref,_ = points2curves(lms_ref.copy())
curves_gen,_ = points2curves(lms_gen.copy())
#get rois
roi_ref = curves2segments(curves_ref)
roi_gen = curves2segments(curves_gen)
#get seg
seg_ref = roi_ref.sum(0)
seg_gen = roi_gen.sum(0)
seg_ref = seg_ref / seg_ref.max() * 255
seg_gen = seg_gen / seg_gen.max() * 255
#get skin mask
skin_src = roi_ref[0] - roi_ref[2:].max(0)
skin_gen = roi_gen[0] - roi_gen[2:].max(0)
#blur edge
skin_src = erodeAndBlur(skin_src,7,7)
skin_gen = erodeAndBlur(skin_gen,7,7)
#fusion
skin = skin_src * skin_gen
R = img_gen[:,:,0] * (1-skin) + skin * (img_gen[:,:,0]*ratio + img_ref[:,:,0]*(1-ratio))
G = img_gen[:,:,1] * (1-skin) + skin * (img_gen[:,:,1]*ratio + img_ref[:,:,1]*(1-ratio))
B = img_gen[:,:,2] * (1-skin) + skin * (img_gen[:,:,2]*ratio + img_ref[:,:,2]*(1-ratio))
res = np.stack([R,G,B]).transpose((1,2,0))
return res,seg_ref,seg_gen
def loaddata(head,path_lms,flag=256,num = 50000):
#head: head of img
#return res:[[path,lms[0,1]]]
fin = open(path_lms,'r')
data = fin.read().splitlines()
res = []
for i in tqdm(range(min(len(data)//2,num))):
name = data[2*i]
path = os.path.join(head,name)
lms = list(map(float,data[2*i+1].split()))
if flag==256:
lms = np.array(lms).reshape(-1,2) / 255.0
else:
lms = (np.array(lms).reshape(-1,2)-64) / 255.0
res.append((path,lms))
return res
def gray2rgb(img):
res = np.stack([img,img,img]).transpose((1,2,0))
return res.astype(np.uint8)
def process(index, album_ref, album_gen, album_pose):
# 30ms
img_gen = cv2.imread(album_gen[index][0])
lms_gen = album_gen[index][1]
img_ref = cv2.imread(album_ref[index // 100][0])[64:64 + 256, 64:64 + 256, :]
lms_ref = album_ref[index // 100][1]
img_pose = cv2.imread(album_pose[index % 100][0])[64:64 + 256, 64:64 + 256, :]
lms_pose = album_pose[index % 100][1]
# affine
# 4ms
lms_ref_, img_ref_ = affineface(img_ref, lms_ref, lms_gen)
# 200ms
lms_ref_parts, img_ref_parts = affineface_parts(img_ref, lms_ref, lms_gen)
# fusion
# fuse_all,seg_ref_,seg_gen = fusion(img_ref_,lms_ref_,img_gen,lms_gen,0.1)
fuse_parts, seg_ref_parts, seg_gen = fusion(img_ref_parts, lms_ref_parts, img_gen, lms_gen, 0.1)
fuse_eye, mask_eye, img_eye = lightEye(img_ref, lms_ref, fuse_parts, lms_gen, 0.1)
res = np.hstack([img_ref, img_pose, img_gen, fuse_eye])
cv2.imwrite('proposed_wild/fuse/%d.jpg' % (index), fuse_eye)
|
110921
|
import re
from patent_client.util.schema import *
from dateutil.parser import parse as parse_dt
from .model import Publication, Inventor, Applicant, Assignee, RelatedPatentDocument, PriorPublication, USReference, ForeignReference, NPLReference, CpcClass, USClass, ForeignPriority
# Related People
class InventorSchema(Schema):
__model__ = Inventor
city = StringField()
first_name = StringField()
last_name = StringField()
region = StringField()
class ApplicantSchema(Schema):
__model__ = Applicant
city = StringField()
country = StringField()
name = StringField()
state = StringField()
type = StringField()
class AssigneeSchema(Schema):
__model__ = Assignee
name = StringField()
city = StringField(required=False)
region = StringField(required=False)
# Classification Types
class CpcClassSchema(Schema):
__model__ = CpcClass
classification = StringField("class")
version = StringField()
class IntlClassSchema(CpcClassSchema):
pass
class USClassSchema(Schema):
__model__ = USClass
classification = StringField("class")
subclassification = StringField("subclass")
# Foreign Priority
class ForeignPrioritySchema(Schema):
__model__ = ForeignPriority
date = DateField()
country_code = StringField()
number = StringField()
# Cited Reference Types
class USReferenceSchema(Schema):
__model__ = USReference
date = StringField()
name = StringField()
publication_number = StringField()
class ForeignReferenceSchema(Schema):
__model__ = ForeignReference
publication_number = StringField()
name = StringField()
country_code = StringField()
class NPLReferenceSchema(Schema):
__model__ = NPLReference
citation = StringField()
# Related Document Types
class PriorPublicationSchema(Schema):
__model__ = PriorPublication
publication_number = StringField()
publication_date = DateField()
class RelatedPatentDocumentSchema(Schema):
__model__ = RelatedPatentDocument
appl_id = StringField()
filing_date = DateField(required=False)
patent_number = StringField(required=False)
class PublicationSchema(Schema):
publication_number = StringField()
kind_code = StringField()
publication_date = DateField()
title = StringField()
description = StringField()
abstract = StringField()
claims = StringField()
appl_id = StringField("appl_no", formatter=lambda x: re.sub(r"[^\d]", "", x.replace("D", "29")), required=False)
filing_date = DateField("filed")
family_id = StringField(required=False)
pct_filing_date = DateField("pct_filed", required=False)
pct_number = StringField("pct_no", required=False)
national_stage_entry_date = DateField("371_date", required=False)
foreign_priority = Nested(ForeignPrioritySchema(), many=True, required=False)
inventors = Nested(InventorSchema(), many=True)
applicants = Nested(ApplicantSchema(), many=True, required=False)
assignees = Nested(AssigneeSchema(), data_key="assignee", many=True, required=False)
examiner = StringField(required=False)
agent = StringField(required=False)
prior_publications = Nested(PriorPublicationSchema(), many=True)
related_us_applications = Nested(RelatedPatentDocumentSchema(), many=True)
cpc_classes = Nested(CpcClassSchema(), data_key="current_cpc_class", many=True, required=False)
intl_classes = Nested(IntlClassSchema(), data_key="current_international_class", many=True)
us_classes = Nested(USClassSchema(), many=True, data_key="current_us_class")
field_of_search = Nested(USClassSchema(), many=True, data_key="field_of_search", required=False)
us_references = Nested(USReferenceSchema(), many=True, required=False)
foreign_references = Nested(ForeignReferenceSchema(), many=True, required=False)
npl_references = Nested(NPLReferenceSchema(), many=True, required=False)
class ImageSchema(Schema):
publication_number = StringField()
pdf_url = StringField()
sections = Field()
|
110960
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_('Agent')
# Agent:
config.Agent.hostName = None
config.Agent.contact = None
config.Agent.teamName = "team_usa"
config.Agent.agentName = None
config.section_('General')
# General: General Settings Section
config.General.workDir = '/home/test/application/WMAgentEmulator'
config.section_('CoreDatabase')
# CoreDatabase:
# dialect: Choose between oracle or mysql
# socket: Set the socket file location for mysql (optional)
#
config.CoreDatabase.connectUrl='mysql://username:password@<EMAIL>:3306/TestDB'
config.component_('WMAgentEmulator')
# WMAgents:
config.WMAgentEmulator.componentDir = config.General.workDir + '/WMAgentEmulator'
config.WMAgentEmulator.namespace = "WMQuality.Emulators.WMAgents.WMAgentEmulator"
config.WMAgentEmulator.pollInterval = 10
|
110966
|
from torch import nn
import torch
import torch.nn.functional as F
from .base_model import BaseModel
from ..layers import EmbeddingLayer
class BiasMF(BaseModel):
def __init__(self,
feature_map,
model_id="BiasMF",
gpu=-1,
learning_rate=1e-3,
embedding_initializer="lambda w: nn.init.normal_(w, std=1e-4)",
embedding_dim=10,
user_id_field="user_id",
item_id_field="item_id",
enable_bias=False,
num_negs=1,
regularizer=None,
embedding_dropout=0,
similarity_score="dot",
**kwargs):
super(BiasMF, self).__init__(feature_map,
model_id=model_id,
gpu=gpu,
embedding_regularizer=regularizer,
num_negs=num_negs,
embedding_initializer=embedding_initializer,
**kwargs)
self.similarity_score = similarity_score
self.embedding_dim = embedding_dim
self.embedding_layer = EmbeddingLayer(feature_map, embedding_dim)
assert similarity_score in ["dot", "cosine", "sigmoid"]
self.enable_bias = enable_bias
if self.enable_bias:
self.user_bias = EmbeddingLayer(feature_map, 1,
disable_sharing_pretrain=True,
required_feature_columns=[user_id_field])
self.item_bias = EmbeddingLayer(feature_map, 1,
disable_sharing_pretrain=True,
required_feature_columns=[item_id_field])
self.global_bias = nn.Parameter(torch.zeros(1))
self.dropout = nn.Dropout(embedding_dropout)
self.compile(lr=learning_rate, **kwargs)
def forward(self, inputs):
"""
Inputs: [user_dict, item_dict, label]
"""
user_dict, item_dict, labels = inputs[0:3]
user_vecs = self.user_tower(user_dict)
user_vecs = self.dropout(user_vecs)
item_vecs = self.item_tower(item_dict)
y_pred = torch.bmm(item_vecs.view(user_vecs.size(0), self.num_negs + 1, -1),
user_vecs.unsqueeze(-1)).squeeze(-1)
if self.enable_bias:
# user_bias and global_bias only influence training, but not inference phase
y_pred += self.user_bias(self.to_device(user_dict)) + self.global_bias
if self.similarity_score == "sigmoid":
y_pred = y_pred.sigmoid()
loss = self.get_total_loss(y_pred, labels)
return_dict = {"loss": loss, "y_pred": y_pred}
return return_dict
def user_tower(self, inputs):
user_inputs = self.to_device(inputs)
user_vec = self.embedding_layer(user_inputs, feature_source="user")
if self.similarity_score == "cosine":
user_vec = F.normalize(user_vec)
if self.enable_bias:
user_vec = torch.cat([user_vec, torch.ones(user_vec.size(0), 1).to(self.device)], dim=-1)
return user_vec
def item_tower(self, inputs):
item_inputs = self.to_device(inputs)
item_vec = self.embedding_layer(item_inputs, feature_source="item")
if self.similarity_score == "cosine":
item_vec = F.normalize(item_vec)
if self.enable_bias:
item_vec = torch.cat([item_vec, self.item_bias(item_inputs)], dim=-1)
return item_vec
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.