index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,600 | 5d8a43f74f469b3960819c91e4706406b7dfc94d | from google.auth import credentials
from google.auth import environment_vars
from google.cloud import storage
import logging
import os
import yum
import yum.config
import yum.Errors
import yum.plugins
from yum.yumRepo import YumRepository
URL_SCHEME = 'gs://'
__all__ = ['requires_api_version', 'plugin_type', 'CONDUIT',
'config_hook', 'init_hook', 'prereposetup_hook']
requires_api_version = '2.5'
plugin_type = yum.plugins.TYPE_CORE
CONDUIT = None
OPTIONAL_ATTRIBUTES = ['priority', 'base_persistdir', 'metadata_expire',
'skip_if_unavailable', 'keepcache', 'priority']
UNSUPPORTED_ATTRIBUTES = ['mirrorlist']
def config_hook(conduit):
yum.config.RepoConf.google_application_credentials = yum.config.Option()
yum.config.RepoConf.baseurl = yum.config.UrlListOption(
schemes=('http', 'https', 's3', 'ftp', 'file', URL_SCHEME.strip(':/'))
)
def check_base_url(baseurl):
if len(baseurl) != 1:
raise yum.plugins.PluginYumExit("Only one base URL supported; got %s" % baseurl)
def parse_url(url):
"""Returns pair (bucket, path)
Expects url in the format gs://<bucket>/<path>
"""
if url.startswith(URL_SCHEME) and len(url) > len(URL_SCHEME):
bucket_and_path = url.rstrip('/')[len(URL_SCHEME):].split('/', 1)
if len(bucket_and_path) == 1:
bucket_and_path.append('')
return bucket_and_path
return (None, None)
def replace_repo(repos, repo):
repos.delete(repo.id)
repos.add(GCSRepository(repo.id, repo))
def init_hook(conduit):
"""Plugin initialization hook. Setup the GCS repositories."""
repos = conduit.getRepos()
for repo in repos.listEnabled():
if len(repo.baseurl) == 0:
continue
bucket, path = parse_url(repo.baseurl[0])
if bucket and isinstance(repo, YumRepository):
check_base_url(repo.baseurl)
replace_repo(repos, repo)
def prereposetup_hook(conduit):
"""Maintain compatibility with Yum on older CentOS (< 7.7.1908) releases."""
return init_hook(conduit)
class GCSRepository(YumRepository):
def __init__(self, repoid, repo):
super(GCSRepository, self).__init__(repoid)
check_base_url(repo.baseurl)
bucket, path = parse_url(repo.baseurl[0])
if not bucket:
msg = "gcsiam: unable to parse url %s'" % repo.baseurl
raise yum.plugins.PluginYumExit(msg)
self.baseurl = repo.baseurl
if repo.google_application_credentials:
os.environ[environment_vars.CREDENTIALS] = repo.google_application_credentials
self.bucket = bucket
self.base_path = path
self.name = repo.name
self.basecachedir = repo.basecachedir
self.gpgcheck = repo.gpgcheck
self.gpgkey = repo.gpgkey
self.enablegroups = repo.enablegroups
for attr in OPTIONAL_ATTRIBUTES:
if hasattr(repo, attr):
setattr(self, attr, getattr(repo, attr))
for attr in UNSUPPORTED_ATTRIBUTES:
if getattr(repo, attr):
msg = "%s: Unsupported attribute: %s." % (__file__, attr)
raise yum.plugins.PluginYumExit(msg)
proxy = getattr(repo, 'proxy')
if proxy not in [ '_none_', None, False ]:
msg = "%s: Unsupported attribute: proxy. Set proxy=_none_ for an override or unset proxy." % (__file__)
raise yum.plugins.PluginYumExit(msg)
self.grabber = None
self.enable()
@property
def urls(self):
return self.baseurl
@urls.setter
def urls(self, value):
pass
@property
def grabfunc(self):
raise NotImplementedError("grabfunc called, when it shouldn't be!")
@property
def grab(self):
if not self.grabber:
self.grabber = GCSGrabber(self.bucket, self.base_path)
return self.grabber
class GCSGrabber(object):
def __init__(self, bucket, path):
self.client = storage.Client()
self.bucket = self.client.bucket(bucket)
self.base_path = path
self.verbose_logger = logging.getLogger("yum.verbose.plugin.GCSGrabber")
def urlgrab(self, url, filename=None, **kwargs):
"""urlgrab(url) copy the file to the local filesystem."""
blob_location = "%s/%s" % (self.base_path, url)
self.verbose_logger.info("downloading gs://%s/%s to %s" % (self.bucket.name, blob_location, filename))
url = url.lstrip('/')
if not filename:
filename = url
blob = storage.blob.Blob(name=blob_location,bucket = self.bucket)
blob.download_to_filename(filename)
return filename
|
995,601 | 2a2a958a917453f5f6827febeebf6106303a8577 | #!/usr/bin/python
import json
from subprocess import call
dirs = json.loads('<%= node["mirror"]["directories"].to_json %>')
for _from in dirs.keys():
full_from = "<%= node['mirror']['from'] %>/" + _from
call(["rsync", "-avz", "--delete", full_from, "<%= node['mirror']['to'] %>"])
|
995,602 | 7a48b1427da5469940ee7ad8148d984c13edf690 | # -*- coding: utf-8 -*-
a = float(input())
b = float(input())
print("MEDIA = %.5f" % float(((a*3.5)+(b*7.5))/11))
|
995,603 | 30edc9fa27f58f7962d3c35726ae80d2e5af0343 | #!/usr/bin/env python
# 5000 IRC Bot - Developed by acidvegas in Python (https://acid.vegas/trollbots)
'''
Requirements
* Python (https://www.python.org/downloads/)
Note: This script was developed to be used with the latest version of Python.
Information:
This bot requires network operator privledges in order to use the SAJOIN command.
The bot will idle in the #5000 channel and a channel defined in the config.
Anyone who joins the #5000 channel will be force joined into 5000 random channels.
It will announce in the channel defined in the config who joins the #5000 channel.
The command .kills can be used to see how many people have been 5000'd.
'''
import os,random,socket,ssl,time,threading
nickserv_password='CHANGEME'
operator_password='CHANGEME'
def randstr():return ''.join(random.sample('aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ',random.randint(4,10)))
def unicode():
msg=''
for i in range(random.randint(150,200)):msg+=chr(random.randint(0x1000,0x3000))
return msg
def attack(nick):
try:
nicklist.append(nick)
raw(f'PRIVMSG #superbowl :I am fucking the shit out of {nick} right now...')
current=str(int(open(kill_file).read())+1)
with open(kill_file,'w') as kills_file:kills_file.write(current)
for i in range(200):
channels=','.join(('#'+randstr() for x in range(25)))
raw(f'SAJOIN {nick} {channels}')
raw(f'PRIVMSG #5000 :{unicode()} oh got {nick} what is happening {unicode()}')
raw(f'PRIVMSG {nick} :{unicode()} oh got {nick} what is happening {unicode()}')
time.sleep(0.4)
except:pass
finally:
if nick in nicklist:
nicklist.remove(nick)
def raw(msg):sock.send(bytes(msg+'\r\n','utf-8'))
kill_file=os.path.join(os.path.dirname(os.path.realpath(__file__)),'kills.log')
last=0
nicklist=list()
if not os.path.isfile(kill_file):open(kill_file,'w').write('0')
while True:
try:
sock=ssl.wrap_socket(socket.socket(socket.AF_INET,socket.SOCK_STREAM))
sock.connect(('localhost',6697))
raw(f'USER 5000 0 * :I CAN SHOW YOU THE WORLD')
raw('NICK FUCKYOU')
while True:
try:
data=sock.recv(1024).decode('utf-8')
for line in (line for line in data.split('\r\n') if len(line.split())>=2):
print('{0} | [~] - {1}'.format(time.strftime('%I:%M:%S'),line))
args=line.split()
if line.startswith('ERROR :Closing Link:'):raise Exception('Connection has closed.')
elif args[0]=='PING':raw('PONG '+args[1][1:])
elif args[1]=='001':
raw('MODE FUCKYOU +BDd')
raw('PRIVMSG NickServ IDENTIFY FUCKYOU '+nickserv_password)
raw('OPER 5000 '+operator_password)
raw('JOIN #superbowl')
raw('JOIN #5000')
elif args[1]=='401':
if args[3] in nicklist:
nicklist.remove(args[3])
elif args[1]=='JOIN' and len(args)==3:
nick=args[0].split('!')[0][1:]
if args[2][1:]=='#5000' and nick not in ('ak','ChanServ','FUCKYOU') and len(nicklist)<3 and nick not in nicklist:
threading.Thread(target=attack,args=(nick,)).start()
elif args[1]=='PRIVMSG' and len(args)==4:
if ' '.join(args[3:])[1:]=='.kills' and time.time()-last>3:
raw('PRIVMSG #superbowl :'+open(kill_file).read())
last=time.time()
except (UnicodeDecodeError,UnicodeEncodeError):pass
except:sock.close()
finally:time.sleep(15)
|
995,604 | c0db339bd7bfa1b873c603d9f6d9395e88be645a |
def vect_gender(data:str):
col_options = {"Male": 0, "Female": 1, "Other": 2}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 3
def vect_relevent_experience(data:str):
col_options = {"No relevent experience": 0, "Has relevent experience": 1}
return col_options[data]
def vect_enrolled_university(data: str):
col_options = {
"no_enrollment": 0, "Part time course": 1, "Full time course": 2}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 3
def vect_education_level(data: str):
col_options = {"Primary School": 0, "High School":1, "Graduate": 2, "Masters": 3, "Phd": 4}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 5
def vect_major_discipline(data: str):
col_options = {"STEM":0, "Business Degree": 1, "Humanities": 2, "No Major": 3, "Other": 4, "Arts": 5}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 6
def vect_experience(data):
col_options = {"<1": 0, ">20": 21}
for i in range(20):
col_options[str(i+1)]=i+1
if(data in col_options.keys()):
return col_options[data]
# else:
# return 22
def vect_company_size(data: str):
col_options = {"<10": 0, "10/49+": 1, "50-99": 2, "100-500": 3, "500-999":4, "1000-4999": 5, "5000-9999":6, "10000+":7}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 8
def vect_company_type(data: str):
col_options = {"Pvt Ltd": 0, "Funded Startup": 1, "Public Sector": 2, "Early Stage Startup": 3, "Other": 4, "NGO": 5}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 6
def vect_last_new_job(data: str):
col_options = {"never": 0,"1": 1, "2": 2, "3": 3, "4": 4, ">4": 5}
if(data in col_options.keys()):
return col_options[data]
# else:
# return 6
def rem(x):
c =14- x.count()
return c
def vectorise_data(df_train, df_test, df_answer, isNull):
# df_train = df_train.drop(df_train[(df_train.isnull().sum(axis=1) >2)].index)
df_train['empty_count'] = df_train.apply(lambda x: rem(x) ,axis=1)
print(f"count of null {df_train['empty_count'].sum(axis=0)}")
df_train = df_train.drop(['empty_count'],axis=1)
df_train['city'] = df_train['city'].str[5:].values.astype('int')
df_train['gender'] = df_train.apply(lambda row: vect_gender(row['gender']), axis=1)
df_train['relevent_experience'] = df_train.apply(lambda row: vect_relevent_experience(row['relevent_experience']), axis=1)
df_train['enrolled_university'] = df_train.apply(lambda row: vect_enrolled_university(row['enrolled_university']), axis=1)
df_train['education_level'] = df_train.apply(lambda row: vect_education_level(row['education_level']), axis=1)
df_train['major_discipline'] = df_train.apply(lambda row: vect_major_discipline(row['major_discipline']), axis=1)
df_train['experience'] = df_train.apply(lambda row: vect_experience(row['experience']), axis=1)
df_train['company_size'] = df_train.apply(lambda row: vect_company_size(row['company_size']), axis=1)
df_train['company_type'] = df_train.apply(lambda row: vect_company_type(row['company_type']), axis=1)
df_train['last_new_job'] = df_train.apply(lambda row: vect_last_new_job(row['last_new_job']), axis=1)
df_test['city'] = df_test['city'].str[5:].values.astype('int')
df_test['gender'] = df_test.apply(lambda row: vect_gender(row['gender']), axis=1)
df_test['relevent_experience'] = df_test.apply(lambda row: vect_relevent_experience(row['relevent_experience']), axis=1)
df_test['enrolled_university'] = df_test.apply(lambda row: vect_enrolled_university(row['enrolled_university']), axis=1)
df_test['education_level'] = df_test.apply(lambda row: vect_education_level(row['education_level']), axis=1)
df_test['major_discipline'] = df_test.apply(lambda row: vect_major_discipline(row['major_discipline']), axis=1)
df_test['experience'] = df_test.apply(lambda row: vect_experience(row['experience']), axis=1)
df_test['company_size'] = df_test.apply(lambda row: vect_company_size(row['company_size']), axis=1)
df_test['company_type'] = df_test.apply(lambda row: vect_company_type(row['company_type']), axis=1)
df_test['last_new_job'] = df_test.apply(lambda row: vect_last_new_job(row['last_new_job']), axis=1)
if not isNull:
df_train['gender'] = df_train['gender'].fillna(round((df_train['gender'].mean())))
df_train['enrolled_university'] = df_train['enrolled_university'].fillna(0)
df_train['major_discipline'] = df_train['major_discipline'].fillna(round((df_train['major_discipline'].mean())))
df_train['company_size'] = df_train['company_size'].fillna(round((df_train['company_size'].mean())))
df_train['company_type'] = df_train['company_type'].fillna(round((df_train['company_type'].mean())))
df_train['experience'] = df_train['experience'].fillna(round((df_train['experience'].mean())))
df_train['last_new_job'] = df_train['last_new_job'].fillna(round((df_train['last_new_job'].mean())))
df_train['education_level'] = df_train['education_level'].fillna(round((df_train['education_level'].mean())))
df_test['gender'] = df_test['gender'].fillna(round((df_test['gender'].mean())))
df_test['enrolled_university'] = df_test['enrolled_university'].fillna(0)
df_test['major_discipline'] = df_test['major_discipline'].fillna(round((df_test['major_discipline'].mean())))
df_test['company_size'] = df_test['company_size'].fillna(round((df_test['company_size'].mean())))
df_test['company_type'] = df_test['company_type'].fillna(round((df_test['company_type'].mean())))
df_test['experience'] = df_test['experience'].fillna(round((df_test['experience'].mean())))
df_test['last_new_job'] = df_test['last_new_job'].fillna(round((df_test['last_new_job'].mean())))
df_test['education_level'] = df_test['education_level'].fillna(round((df_test['education_level'].mean())))
return df_train, df_test, df_answer |
995,605 | c2627a1e9960b4ad3e4928406614c6c74ae6a150 | from bpy.types import Panel
from . import props
class MeshConstraintsPanelBase(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Constraints"
class MeshConstraintsPanelMain(MeshConstraintsPanelBase):
bl_label = "Mesh Constraints"
bl_idname = "MESH_CONSTRAINTS_PT_MAIN"
def draw(self, context):
box = self.layout.box()
row = box.row()
row.operator("mesh_constraints.solve", text="Solve", icon="SNAP_ON")
icon = (
"PAUSE"
if context.window_manager.mesh_constraints_draw_constraints_definition
else "PLAY"
)
row.operator(
"mesh_constraints.draw_constraints_definition", text="Definition", icon=icon
)
# TODO display Solver error here ?
# o = context.object
# if o is not None and "MeshConstraintGenerator" in o:
# an object with constraints in it
# mc = props.MeshConstraints(o.MeshConstraintGenerator)
# if
class MeshConstraintsPanelAdd(MeshConstraintsPanelBase):
bl_label = "Add constraints"
bl_idname = "MESH_CONSTRAINTS_PT_ADD"
bl_parent_id = "MESH_CONSTRAINTS_PT_MAIN"
def draw(self, context):
box = self.layout.box()
row = box.row()
row.operator(
"mesh_constraints.constraint_distance_2_vertices",
text=props.constraints_kind_abbreviation[
props.ConstraintsKind.DISTANCE_BETWEEN_2_VERTICES
],
)
row.operator(
"mesh_constraints.constraint_same_distance_2_edges",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.SAME_DISTANCE],
)
row = box.row()
row.operator(
"mesh_constraints.constraint_fix_xyz_coord",
text=props.constraints_kind_abbreviation[
props.ConstraintsKind.FIX_XYZ_COORD
],
)
row.operator(
"mesh_constraints.constraint_fix_x_coord",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.FIX_X_COORD],
)
row.operator(
"mesh_constraints.constraint_fix_y_coord",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.FIX_Y_COORD],
)
row.operator(
"mesh_constraints.constraint_fix_z_coord",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.FIX_Z_COORD],
)
# row = box.row()
# row.operator("mesh_constraints.constraint_fix_xy_coord", text="XY")
# row.operator("mesh_constraints.constraint_fix_xz_coord", text="XZ")
# row.operator("mesh_constraints.constraint_fix_yz_coord", text="YZ")
row = box.row()
row.operator(
"mesh_constraints.constraint_on_x",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.ON_X],
)
row.operator(
"mesh_constraints.constraint_on_y",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.ON_Y],
)
row.operator(
"mesh_constraints.constraint_on_z",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.ON_Z],
)
row = box.row()
row.operator(
"mesh_constraints.constraint_angle",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.ANGLE],
)
row.operator(
"mesh_constraints.constraint_parallel_2_edges",
text=props.constraints_kind_abbreviation[props.ConstraintsKind.PARALLEL],
)
row.operator(
"mesh_constraints.constraint_perpendicular_2_edges",
text=props.constraints_kind_abbreviation[
props.ConstraintsKind.PERPENDICULAR
],
)
class MeshConstraintsPanelItems(MeshConstraintsPanelBase):
bl_label = "Items"
bl_idname = "MESH_CONSTRAINTS_PT_ITEMS"
bl_parent_id = "MESH_CONSTRAINTS_PT_MAIN"
def draw(self, context):
o = context.object
if o is None or "MeshConstraintGenerator" not in o:
# I need an object with constraints in it !
self.layout.box().row(align=True).label(text="No constraints yet")
return
mc = props.MeshConstraints(o.MeshConstraintGenerator)
box = self.layout.box()
len_mc = len(mc)
if len_mc == 0:
box.row(align=True).label(text="No constraints yet")
return
box = self.layout.box()
row = box.row(align=True)
row.operator("mesh_constraints.hide_all_constraints", text="Hide all", icon="HIDE_ON")
row.operator("mesh_constraints.show_all_constraints", text="Show all", icon="HIDE_OFF")
row = box.row(align=True)
row.operator("mesh_constraints.delete_all_constraints", text="Delete all", icon="X")
for index, c in enumerate(mc.reverse()):
# TODO do something with ValueError ?
c_kind = props.ConstraintsKind(c.kind)
c_abbreviation = props.constraints_kind_abbreviation[c_kind]
c_display = props.constraints_kind_display[c_kind]
row = box.row(align=True)
icon = "HIDE_OFF" if c.view else "HIDE_ON"
row.prop(c.raw, "view", text="", toggle=True, icon=icon)
row.prop(c.raw, "show_details", text="", toggle=True, icon="PREFERENCES")
icon = "ERROR" if c.in_error else "NONE"
row.label(text=c_abbreviation, icon=icon)
if c.nb_values == 1:
row.prop(c.raw, "value0", text="")
delete_op = "mesh_constraints.delete_constraint"
row.operator(delete_op, text="", icon="X").index = len_mc - index - 1
if c.show_details:
row = box.row(align=True)
row.label(text=c_display)
if c_kind == props.ConstraintsKind.DISTANCE_BETWEEN_2_VERTICES:
box.row(align=True).prop(c.raw, "value0", text="Distance")
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
elif c_kind == props.ConstraintsKind.FIX_X_COORD:
box.row(align=True).prop(c.raw, "value0", text="X")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.FIX_Y_COORD:
box.row(align=True).prop(c.raw, "value0", text="Y")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.FIX_Z_COORD:
box.row(align=True).prop(c.raw, "value0", text="Z")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.FIX_XY_COORD:
box.row(align=True).prop(c.raw, "value0", text="X")
box.row(align=True).prop(c.raw, "value1", text="Y")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.FIX_XZ_COORD:
box.row(align=True).prop(c.raw, "value0", text="X")
box.row(align=True).prop(c.raw, "value1", text="Z")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.FIX_YZ_COORD:
box.row(align=True).prop(c.raw, "value0", text="Y")
box.row(align=True).prop(c.raw, "value1", text="Z")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.FIX_XYZ_COORD:
box.row(align=True).prop(c.raw, "value0", text="X")
box.row(align=True).prop(c.raw, "value1", text="Y")
box.row(align=True).prop(c.raw, "value2", text="Z")
box.row(align=True).prop(c.raw, "point0", text="Point")
elif c_kind == props.ConstraintsKind.PARALLEL:
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
box.row(align=True).prop(c.raw, "point2", text="Point2")
box.row(align=True).prop(c.raw, "point3", text="Point3")
elif c_kind == props.ConstraintsKind.PERPENDICULAR:
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
box.row(align=True).prop(c.raw, "point2", text="Point2")
box.row(align=True).prop(c.raw, "point3", text="Point3")
elif c_kind == props.ConstraintsKind.ON_X:
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
elif c_kind == props.ConstraintsKind.ON_Y:
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
elif c_kind == props.ConstraintsKind.ON_Z:
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
elif c_kind == props.ConstraintsKind.SAME_DISTANCE:
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
box.row(align=True).prop(c.raw, "point2", text="Point2")
box.row(align=True).prop(c.raw, "point3", text="Point3")
elif c_kind == props.ConstraintsKind.ANGLE:
box.row(align=True).prop(c.raw, "value0", text="Angle")
box.row(align=True).prop(c.raw, "point0", text="Point0")
box.row(align=True).prop(c.raw, "point1", text="Point1")
box.row(align=True).prop(c.raw, "point2", text="Point2")
box.row(align=True).prop(c.raw, "point3", text="Point3")
else:
raise Exception(f"Not supported: {c_display}")
row = box.row(align=True)
row.label(text="")
|
995,606 | cb8631eeb1728b90ccb406700f344800a433ed4d | # Generated by Django 3.1.4 on 2021-01-23 18:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0002_auto_20201230_2138'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='firstname',
new_name='first_name',
),
migrations.RenameField(
model_name='contact',
old_name='lastname',
new_name='last_name',
),
migrations.RemoveField(
model_name='contact',
name='docs',
),
migrations.AddField(
model_name='contact',
name='file',
field=models.FileField(default=None, upload_to='Pictures/'),
),
migrations.AlterField(
model_name='contact',
name='email',
field=models.EmailField(max_length=122),
),
migrations.AlterField(
model_name='contact',
name='message',
field=models.TextField(),
),
]
|
995,607 | 9fc65052e5506be5a6da8dcb26f10a20ed46c07c | import copy
import json
import os
import time
import urllib.request
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from scipy.optimize import linprog
global penguin_url, headers
penguin_url = "https://penguin-stats.io/PenguinStats/api/"
headers = {"User-Agent": "ArkPlanner"}
gamedata_langs = ["en_US", "ja_JP", "ko_KR", "zh_CN"]
DEFAULT_LANG = "en_US"
NON_CN_WORLD_NUM = 4
FILTER_FREQ_DEFAULT = 100
class MaterialPlanning(object):
def __init__(
self,
filter_freq=FILTER_FREQ_DEFAULT,
filter_stages=[],
url_stats="result/matrix?show_stage_details=true&show_item_details=true",
url_rules="formula",
path_stats="data/matrix.json",
dont_save_data=False,
path_rules="data/formula.json",
gamedata_path="https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/"
+ "master/{}/gamedata/excel/item_table.json",
):
"""
Object initialization.
Args:
filter_freq: int or None. The lowest frequency that we consider.
No filter will be applied if None.
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
path_stats: string. local path to the dropping rate stats data.
path_rules: string. local path to the composing rules data.
"""
if not dont_save_data:
try:
material_probs, convertion_rules = load_data(path_stats, path_rules)
except FileNotFoundError:
material_probs, convertion_rules = request_data(
penguin_url + url_stats,
penguin_url + url_rules,
path_stats,
path_rules,
gamedata_path,
)
print("done.")
else:
material_probs, convertion_rules = request_data(
penguin_url + url_stats,
penguin_url + url_rules,
path_stats,
path_rules,
gamedata_path,
dont_save_data,
)
self.itemdata = request_itemdata(gamedata_path)
self.itemdata_rv = {
lang: {v: k for k, v in dct.items()} for lang, dct in self.itemdata.items()
}
filtered_probs = []
for dct in material_probs["matrix"]:
if (
dct["stage"]["apCost"] > 0.1
and dct["stage"]["code"] not in filter_stages
):
if not filter_freq or dct["times"] >= filter_freq:
filtered_probs.append(dct)
material_probs["matrix"] = filtered_probs
self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))
def _pre_processing(self, material_probs, convertion_rules):
"""
Compute costs, convertion rules and items probabilities from requested dictionaries.
Args:
material_probs: List of dictionaries recording the dropping info per stage per item.
Keys of instances: ["itemID", "times", "itemName", "quantity", "apCost", "stageCode", "stageID"].
convertion_rules: List of dictionaries recording the rules of composing.
Keys of instances: ["id", "name", "level", "source", "madeof"].
"""
# To count items and stages.
additional_items = {"30135": u"D32钢", "30125": u"双极纳米片", "30115": u"聚合剂"}
exp_unit = 200 * (30.0 - 0.048 * 30) / 7400
gold_unit = 0.004
exp_worths = {
"2001": exp_unit,
"2002": exp_unit * 2,
"2003": exp_unit * 5,
"2004": exp_unit * 10,
"3003": exp_unit * 2,
}
gold_worths = {}
item_dct = {}
stage_dct = {}
for dct in material_probs["matrix"]:
item_dct[dct["item"]["itemId"]] = dct["item"]["name"]
stage_dct[dct["stage"]["code"]] = dct["stage"]["code"]
item_dct.update(additional_items)
# To construct mapping from id to item names.
item_array = []
item_id_array = []
for k, v in item_dct.items():
try:
float(k)
item_array.append(v)
item_id_array.append(k)
except ValueError:
pass
self.item_array = np.array(item_array)
self.item_id_array = np.array(item_id_array)
self.item_id_rv = {int(v): k for k, v in enumerate(item_id_array)}
self.item_dct_rv = {v: k for k, v in enumerate(item_array)}
# To construct mapping from stage id to stage names and vice versa.
stage_array = []
for k, v in stage_dct.items():
stage_array.append(v)
self.stage_array = np.array(stage_array)
self.stage_dct_rv = {v: k for k, v in enumerate(self.stage_array)}
# To format dropping records into sparse probability matrix
probs_matrix = np.zeros([len(stage_array), len(item_array)])
cost_lst = np.zeros(len(stage_array))
cost_exp_offset = np.zeros(len(stage_array))
cost_gold_offset = np.zeros(len(stage_array))
for dct in material_probs["matrix"]:
try:
cost_lst[self.stage_dct_rv[dct["stage"]["code"]]] = dct["stage"][
"apCost"
]
float(dct["item"]["itemId"])
probs_matrix[
self.stage_dct_rv[dct["stage"]["code"]],
self.item_dct_rv[dct["item"]["name"]],
] = dct["quantity"] / float(dct["times"])
if cost_lst[self.stage_dct_rv[dct["stage"]["code"]]] != 0:
cost_gold_offset[self.stage_dct_rv[dct["stage"]["code"]]] = -dct[
"stage"
]["apCost"] * (12 * gold_unit)
except ValueError:
pass
try:
cost_exp_offset[self.stage_dct_rv[dct["stage"]["code"]]] -= (
exp_worths[dct["item"]["itemId"]]
* dct["quantity"]
/ float(dct["times"])
)
except (KeyError, ValueError):
pass
try:
cost_gold_offset[self.stage_dct_rv[dct["stage"]["code"]]] -= (
gold_worths[dct["item"]["itemId"]]
* dct["quantity"]
/ float(dct["times"])
)
except (KeyError, ValueError):
pass
# Hardcoding: extra gold farmed.
cost_gold_offset[self.stage_dct_rv["S4-6"]] -= 3228 * gold_unit
cost_gold_offset[self.stage_dct_rv["S5-2"]] -= 2484 * gold_unit
# To build equivalence relationship from convert_rule_dct.
self.convertions_dct = {}
convertion_matrix = []
convertion_outc_matrix = []
convertion_cost_lst = []
for rule in convertion_rules:
convertion = np.zeros(len(self.item_array))
convertion[self.item_dct_rv[rule["name"]]] = 1
comp_dct = {comp["id"]: comp["count"] for comp in rule["costs"]}
self.convertions_dct[rule["id"]] = comp_dct
for item_id in comp_dct:
convertion[self.item_id_rv[int(item_id)]] -= comp_dct[item_id]
convertion_matrix.append(copy.deepcopy(convertion))
outc_dct = {outc["name"]: outc["count"] for outc in rule["extraOutcome"]}
outc_wgh = {outc["name"]: outc["weight"] for outc in rule["extraOutcome"]}
weight_sum = float(sum(outc_wgh.values()))
for item_id in outc_dct:
convertion[self.item_dct_rv[item_id]] += (
outc_dct[item_id] * 0.175 * outc_wgh[item_id] / weight_sum
)
convertion_outc_matrix.append(convertion)
convertion_cost_lst.append(rule["goldCost"] * 0.004)
convertions_group = (
np.array(convertion_matrix),
np.array(convertion_outc_matrix),
np.array(convertion_cost_lst),
)
farms_group = (probs_matrix, cost_lst, cost_exp_offset, cost_gold_offset)
return convertions_group, farms_group
def _set_lp_parameters(self, convertions_group, farms_group):
"""
Object initialization.
Args:
convertion_matrix: matrix of shape [n_rules, n_items].
Each row represent a rule.
convertion_cost_lst: list. Cost in equal value to the currency spent in convertion.
probs_matrix: sparse matrix of shape [n_stages, n_items].
Items per clear (probabilities) at each stage.
cost_lst: list. Costs per clear at each stage.
"""
(
self.convertion_matrix,
self.convertion_outc_matrix,
self.convertion_cost_lst,
) = convertions_group
(
self.probs_matrix,
self.cost_lst,
self.cost_exp_offset,
self.cost_gold_offset,
) = farms_group
assert len(self.probs_matrix) == len(self.cost_lst)
assert len(self.convertion_matrix) == len(self.convertion_cost_lst)
assert self.probs_matrix.shape[1] == self.convertion_matrix.shape[1]
def update(
self,
filter_freq=FILTER_FREQ_DEFAULT,
filter_stages=None,
url_stats="result/matrix?show_stage_details=true&show_item_details=true",
url_rules="formula",
path_stats="data/matrix.json",
path_rules="data/formula.json",
gamedata_path="https://raw.githubusercontent.com/Kengxxiao/ArknightsGameData/master/{}/gamedata/excel/item_table.json",
dont_save_data=False,
):
"""
To update parameters when probabilities change or new items added.
Args:
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
path_stats: string. local path to the dropping rate stats data.
path_rules: string. local path to the composing rules data.
"""
material_probs, convertion_rules = request_data(
penguin_url + url_stats,
penguin_url + url_rules,
path_stats,
path_rules,
gamedata_path,
dont_save_data,
)
self.itemdata = request_itemdata(gamedata_path)
self.itemdata_rv = {
lang: {v: k for k, v in dct.items()} for lang, dct in self.itemdata.items()
}
if filter_freq:
if filter_stages is None:
filter_stages = []
filtered_probs = []
for dct in material_probs["matrix"]:
if (
dct["times"] >= filter_freq
and dct["stage"]["code"] not in filter_stages
):
filtered_probs.append(dct)
material_probs["matrix"] = filtered_probs
self._set_lp_parameters(*self._pre_processing(material_probs, convertion_rules))
def _get_plan_no_prioties(
self, demand_lst, outcome=False, gold_demand=True, exp_demand=True
):
"""
To solve linear programming problem without prioties.
Args:
demand_lst: list of materials demand. Should include all items (zero if not required).
Returns:
strategy: list of required clear times for each stage.
fun: estimated total cost.
"""
A_ub = (
np.vstack([self.probs_matrix, self.convertion_outc_matrix])
if outcome
else np.vstack([self.probs_matrix, self.convertion_matrix])
).T
farm_cost = (
self.cost_lst
+ (self.cost_exp_offset if exp_demand else 0)
+ (self.cost_gold_offset if gold_demand else 0)
)
convertion_cost_lst = (
self.convertion_cost_lst
if gold_demand
else np.zeros(self.convertion_cost_lst.shape)
)
cost = np.hstack([farm_cost, convertion_cost_lst])
assert np.any(farm_cost >= 0)
excp_factor = 1.0
dual_factor = 1.0
solution = None
for _ in range(5):
solution = linprog(
c=cost,
A_ub=-A_ub,
b_ub=-np.array(demand_lst) * excp_factor,
method="interior-point",
)
if solution.status != 4:
break
excp_factor /= 10.0
dual_solution = None
for _ in range(5):
dual_solution = linprog(
c=-np.array(demand_lst) * excp_factor * dual_factor,
A_ub=A_ub.T,
b_ub=cost,
method="interior-point",
)
if dual_solution.status != 4:
break
dual_factor /= 10.0
return solution, dual_solution, excp_factor
def convert_requirements(
self, requirement_dct: Union[None, Dict[str, int]]
) -> Tuple[Dict[int, int], str]:
"""
Converts a requirement dict with variable keys into a dict mapping an
item's ID to its quantity.
Args:
requirement_dct: a Dict[str, int] where the item keys are one of the
follow types: English name, Chinese name, Japanese name, Korean name,
or item ID.
Returns:
requirements: a Dict[int, int]
lang: the language successfully parsed language or "id"
Raises:
A BaseException initialized with all the KeyErrors that occured during
execution if the function was unable to parse the input dict.
"""
if requirement_dct is None:
return {}, ""
err_lst: List[BaseException] = []
# Try parsing as IDs
try:
ret = {}
for k, v in requirement_dct.items():
ret[int(k)] = int(v)
return ret, "id"
except (ValueError, KeyError) as err:
err_lst.append(err)
# Try parsing as each lang
for lang, nameMap in self.itemdata_rv.items():
ret = {}
try:
for k, v in requirement_dct.items():
ret[nameMap[k]] = int(v)
return ret, lang
except (ValueError, KeyError) as err:
err_lst.append(err)
# TODO: create custom exception class
raise BaseException(err_lst)
def get_plan(
self,
requirement_dct,
deposited_dct=None,
print_output=True,
outcome=False,
gold_demand=True,
exp_demand=True,
language=None,
exclude=None,
non_cn_compat=False,
):
"""
User API. Computing the material plan given requirements and owned items.
Args:
requirement_dct: dictionary. Contain only required items with their numbers.
deposit_dct: dictionary. Contain only owned items with their numbers.
"""
status_dct = {
0: "Optimization terminated successfully. ",
1: "Iteration limit reached. ",
2: "Problem appears to be infeasible. ",
3: "Problem appears to be unbounded. ",
4: "Numerical difficulties encountered.",
}
stt = time.time()
requirement_dct, requirement_lang = self.convert_requirements(requirement_dct)
if language is None:
language = requirement_lang
deposited_dct, _ = self.convert_requirements(None)
demand_lst = [0 for x in range(len(self.item_array))]
for k, v in requirement_dct.items():
demand_lst[self.item_id_rv[k]] = v
for k, v in deposited_dct.items():
demand_lst[self.item_dct_rv[k]] -= v
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
is_stage_alive = []
for stage in self.stage_array:
if stage in exclude:
is_stage_alive.append(False)
continue
if non_cn_compat:
try:
if int(stage.lstrip("S")[0]) > NON_CN_WORLD_NUM:
is_stage_alive.append(False)
continue
except ValueError:
pass
is_stage_alive.append(True)
if exclude or non_cn_compat:
BackTrace = [
copy.copy(self.stage_array),
copy.copy(self.cost_lst),
copy.copy(self.probs_matrix),
copy.copy(self.cost_exp_offset),
copy.copy(self.cost_gold_offset),
]
self.stage_array = self.stage_array[is_stage_alive]
self.cost_lst = self.cost_lst[is_stage_alive]
self.probs_matrix = self.probs_matrix[is_stage_alive]
self.cost_exp_offset = self.cost_exp_offset[is_stage_alive]
self.cost_gold_offset = self.cost_gold_offset[is_stage_alive]
solution, dual_solution, excp_factor = self._get_plan_no_prioties(
demand_lst, outcome, gold_demand, exp_demand
)
x, status = solution.x / excp_factor, solution.status
y = dual_solution.x
n_looting, n_convertion = x[: len(self.cost_lst)], x[len(self.cost_lst) :]
cost = np.dot(x[: len(self.cost_lst)], self.cost_lst)
gcost = np.dot(x[len(self.cost_lst) :], self.convertion_cost_lst) / 0.004
gold = -np.dot(n_looting, self.cost_gold_offset) / 0.004
exp = -np.dot(n_looting, self.cost_exp_offset) * 7400 / 30.0
if status != 0:
raise ValueError(status_dct[status])
stages = []
for i, t in enumerate(n_looting):
if t >= 0.1:
target_items = np.where(self.probs_matrix[i] >= 0.02)[0]
items = {}
for idx in target_items:
if len(self.item_id_array[idx]) != 5:
continue
try:
name_str = self.itemdata[language][int(self.item_id_array[idx])]
except KeyError:
# Fallback to CN if language is unavailable
name_str = self.itemdata["zh_CN"][int(self.item_id_array[idx])]
items[name_str] = float2str(self.probs_matrix[i, idx] * t)
stage = {
"stage": self.stage_array[i],
"count": float2str(t),
"items": items,
}
stages.append(stage)
crafts = []
for i, t in enumerate(n_convertion):
if t >= 0.1:
idx = np.argmax(self.convertion_matrix[i])
item_id = self.item_id_array[idx]
try:
target_id = self.itemdata[language][int(item_id)]
except KeyError:
target_id = self.itemdata["zh_CN"][int(item_id)]
materials = {}
for k, v in self.convertions_dct[item_id].items():
try:
key_name = self.itemdata[language][int(k)]
except KeyError:
key_name = self.itemdata["zh_CN"][int(k)]
materials[key_name] = str(v * int(t + 0.9))
synthesis = {
"target": target_id,
"count": str(int(t + 0.9)),
"materials": materials,
}
crafts.append(synthesis)
elif t >= 0.05:
idx = np.argmax(self.convertion_matrix[i])
item_id = self.item_id_array[idx]
try:
target_name = self.itemdata[language][int(item_id)]
except KeyError:
target_name = self.itemdata["zh_CN"][int(item_id)]
materials = {}
for k, v in self.convertions_dct[item_id].items():
try:
key_name = self.itemdata[language][int(k)]
except KeyError:
key_name = self.itemdata["zh_CN"][int(k)]
materials[key_name] = "%.1f" % (v * t)
synthesis = {
"target": target_name,
"count": "%.1f" % t,
"materials": materials,
}
crafts.append(synthesis)
values = [
{"level": "1", "items": []},
{"level": "2", "items": []},
{"level": "3", "items": []},
{"level": "4", "items": []},
{"level": "5", "items": []},
]
for i, item_id in enumerate(self.item_id_array):
if len(item_id) == 5 and y[i] > 0.1:
try:
item_name = self.itemdata[language][int(item_id)]
except KeyError:
item_name = self.itemdata["zh_CN"][int(item_id)]
item_value = {"name": item_name, "value": "%.2f" % y[i]}
values[int(self.item_id_array[i][-1]) - 1]["items"].append(item_value)
for group in values:
group["items"] = sorted(
group["items"], key=lambda k: float(k["value"]), reverse=True
)
res = {
"lang": language,
"cost": int(cost),
"gcost": int(gcost),
"gold": int(gold),
"exp": int(exp),
"stages": stages,
"craft": crafts,
"values": list(reversed(values)),
}
if print_output:
print(
status_dct[status]
+ (" Computed in %.4f seconds," % (time.time() - stt))
)
if print_output:
print(
"Estimated total cost: %d, gold: %d, exp: %d."
% (res["cost"], res["gold"], res["exp"])
)
print("Loot at following stages:")
for stage in stages:
display_lst = [k + "(%s) " % stage["items"][k] for k in stage["items"]]
print(
"Stage "
+ stage["stage"]
+ "(%s times) ===> " % stage["count"]
+ ", ".join(display_lst)
)
print("\nSynthesize following items:")
for synthesis in crafts:
display_lst = [
k + "(%s) " % synthesis["materials"][k]
for k in synthesis["materials"]
]
print(
synthesis["target"]
+ "(%s) <=== " % synthesis["count"]
+ ", ".join(display_lst)
)
print("\nItems Values:")
for i, group in reversed(list(enumerate(values))):
display_lst = [
"%s:%s" % (item["name"], item["value"]) for item in group["items"]
]
print("Level %d items: " % (i + 1))
print(", ".join(display_lst))
if exclude:
self.stage_array = BackTrace[0]
self.cost_lst = BackTrace[1]
self.probs_matrix = BackTrace[2]
self.cost_exp_offset = BackTrace[3]
self.cost_gold_offset = BackTrace[4]
return res
def float2str(x: float, offset=0.5):
if x < 1.0:
out = "%.1f" % x
else:
out = "%d" % (int(x + offset))
return out
def request_data(
url_stats,
url_rules,
save_path_stats,
save_path_rules,
gamedata_path,
dont_save_data=False,
) -> Tuple[Any, Any]:
"""
To request probability and convertion rules from web resources and store at local.
Args:
url_stats: string. url to the dropping rate stats data.
url_rules: string. url to the composing rules data.
save_path_stats: string. local path for storing the stats data.
save_path_rules: string. local path for storing the composing rules data.
Returns:
material_probs: dictionary. Content of the stats json file.
convertion_rules: dictionary. Content of the rules json file.
"""
if not dont_save_data:
try:
os.mkdir(os.path.dirname(save_path_stats))
except FileExistsError:
pass
try:
os.mkdir(os.path.dirname(save_path_rules))
except FileExistsError:
pass
# TODO: async requests
req = urllib.request.Request(url_stats, None, headers)
with urllib.request.urlopen(req) as response:
material_probs = json.loads(response.read().decode())
if not dont_save_data:
with open(save_path_stats, "w") as outfile:
json.dump(material_probs, outfile)
req = urllib.request.Request(url_rules, None, headers)
with urllib.request.urlopen(req) as response:
response = urllib.request.urlopen(req)
convertion_rules = json.loads(response.read().decode())
if not dont_save_data:
with open(save_path_rules, "w") as outfile:
json.dump(convertion_rules, outfile)
return material_probs, convertion_rules
def request_itemdata(gamedata_path: str) -> Dict[str, Dict[int, str]]:
"""
Pulls item data github sources.
Args:
gamedata_path: a format string that takes in 1 argument to format in the region name.
Returns:
itemdata: a dict mapping a region's name to a dict mapping an item ID to its name.
"""
itemdata = {}
for lang in gamedata_langs:
req = urllib.request.Request(gamedata_path.format(lang), None, headers)
with urllib.request.urlopen(req) as response:
response = urllib.request.urlopen(req)
# filter out unneeded data, we only care about ones with purely numerical IDs
data = {}
for k, v in json.loads(response.read().decode())["items"].items():
try:
i = int(k)
except ValueError:
continue
data[i] = v["name"]
itemdata[lang] = data
return itemdata
def load_data(path_stats, path_rules):
"""
To load stats and rules data from local directories.
Args:
path_stats: string. local path to the stats data.
path_rules: string. local path to the composing rules data.
Returns:
material_probs: dictionary. Content of the stats json file.
convertion_rules: dictionary. Content of the rules json file.
"""
with open(path_stats) as json_file:
material_probs = json.load(json_file)
with open(path_rules) as json_file:
convertion_rules = json.load(json_file)
return material_probs, convertion_rules
|
995,608 | 6cacec9e724cbaebe90e8664ea36c16d595b20f0 | #second python
print("this is my second python")
|
995,609 | 769910e9b4f83fcd096faecd6c534b324907bcbd | # Given a string, your task is to count how many palindromic substrings in this
# string.
#
# The substrings with different start indexes or end indexes are counted as
# different substrings even they consist of same characters.
#
# Example 1:
#
# Input: "abc"
# Output: 3
# Explanation: Three palindromic strings: "a", "b", "c".
#
# Example 2:
#
# Input: "aaa"
# Output: 6
# Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
class PalindromicSubstrings:
def __init__(self, input, output_expected):
self.input = input
self.output_expected = output_expected
# ========== Practice ========================================================
def blank(self):
input, output_expected = self.input, self.output_expected
result = 0
# blank, result = int
print(result == output_expected, output_expected, result)
# ========== Tests ============================================================
def test(test_file, fn):
with open(test_file) as tf:
lines = tf.readlines()
for i, line in enumerate(lines):
if i % 2 == 0:
input = line.strip('\n')
else:
output_expected = int(line.strip('\n'))
ps = PalindromicSubstrings(input, output_expected)
if fn == 'blank':
ps.blank()
# ========== Command Line Arguments ===========================================
if __name__ == '__main__':
import sys
test(sys.argv[1], sys.argv[2]) |
995,610 | 556899cd3e358f6d93caed2b02fb7fc57be521f8 | from pyston.utils import LOOKUP_SEP
from pyston.filters.filters import Filter
from pyston.filters.utils import OperatorSlug
from pyston.filters.exceptions import OperatorFilterError
from pyston.filters.managers import BaseParserModelFilterManager
class BaseDynamoFilter(Filter):
allowed_operators = None
def get_allowed_operators(self):
return self.allowed_operators
def clean_value(self, value, operator_slug, request):
return value
def get_q(self, value, operator_slug, request):
if operator_slug not in self.get_allowed_operators():
raise OperatorFilterError
else:
return self.get_filter_term(self.clean_value(value, operator_slug, request), operator_slug, request)
def get_filter_term(self, value, operator_slug, request):
raise NotImplementedError
class DynamoFilterManager(BaseParserModelFilterManager):
def _logical_conditions_and(self, condition_a, condition_b):
conditions_union = {**condition_a, **condition_b}
sorted_keys = sorted(conditions_union)
if len(condition_a) == 1 and len(condition_b) == 1 and len(sorted_keys) == 2:
condition_a_full_identifier, condition_b_full_identifier = sorted_keys
condition_a_identifier, condition_a_operator = condition_a_full_identifier.rsplit(LOOKUP_SEP, 1)
condition_b_identifier, condition_b_operator = condition_b_full_identifier.rsplit(LOOKUP_SEP, 1)
if (condition_a_identifier == condition_b_identifier and condition_a_operator == OperatorSlug.GTE
and condition_b_operator == OperatorSlug.LT):
return {
f'{condition_a_identifier}__between': (
conditions_union[condition_a_full_identifier], conditions_union[condition_b_full_identifier]
)
}
return super()._logical_conditions_and(condition_a, condition_b)
def _filter_queryset(self, qs, q):
return qs.filter(**q)
|
995,611 | 75373f24ca4bc660608670622408f97d1c5a8d7c | num = int(input('enter num: '))
if num < 10 and num >=0:
print ('mardod')
elif num >=10 and num <=20:
print ('gabol')
elif num >20:
print ('mare than 20!')
else:
print ('error!')
|
995,612 | 38218037ffa1d68a9b7cd97a697cd1193ff466d5 | import sys
from queue import PriorityQueue
from peers import PeersHandler
from torrent_client import TorrentClient
def download_torrent(torrent_file_path):
s_memory = PriorityQueue()
peer_handler = PeersHandler(torrent_file_path)
torrent_client = TorrentClient(1, "Main_Torrent_Process", peer_handler, s_memory, debug_mode=True)
torrent_client.run()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('ERROR: no torrent file')
exit(1)
download_torrent(sys.argv[1])
|
995,613 | 5c318d679bac6044a80c9ad7766b35354080299a | # -*- coding: utf-8 -*-
from django.conf import settings
from djmail import template_mail
import premailer
import logging
# Hide CSS warnings messages if debug mode is disable
if not getattr(settings, "DEBUG", False):
premailer.premailer.cssutils.log.setLevel(logging.CRITICAL)
class InlineCSSTemplateMail(template_mail.TemplateMail):
def _render_message_body_as_html(self, context):
html = super()._render_message_body_as_html(context)
# Transform CSS into line style attributes
return premailer.transform(html)
class MagicMailBuilder(template_mail.MagicMailBuilder):
pass
mail_builder = MagicMailBuilder(template_mail_cls=InlineCSSTemplateMail)
|
995,614 | cb1900d0a0fce23dce50e47ee92e449f9b51a115 | def is_leap_year(inYear):
'''
Takes any year and reports if given year is a leap year.
Leap years are on every year that is evenly divisible by 4
except every year that is evenly divisible by 100
unless the year is also evenly divisible by 400
'''
if not inYear % 400:
return True
elif not inYear % 100:
return False
elif not inYear % 4:
return True
else:
return False
|
995,615 | 1a29a9ce42eade39909ba0611eb5dac2d49d2ae7 | #!/usr/bin/python
'''
Priority queue with random access updates
-----------------------------------------
.. autoclass:: PrioQueue
:members:
:special-members:
'''
#-----------------------------------------------------------------------------
class PrioQueue:
'''
Priority queue that supports updating priority of arbitrary elements and
removing arbitrary elements.
Entry with lowest priority value is returned first.
Mutation operations (:meth:`set()`, :meth:`pop()`, :meth:`remove()`, and
:meth:`update()`) have complexity of ``O(log(n))``. Read operations
(:meth:`length()` and :meth:`peek()`) have complexity of ``O(1)``.
'''
#-------------------------------------------------------
# element container {{{
class _Element:
def __init__(self, prio, entry, pos, key):
self.prio = prio
self.entry = entry
self.pos = pos
self.key = key
def __cmp__(self, other):
return cmp(self.prio, other.prio) or \
cmp(id(self.entry), id(other.entry))
# }}}
#-------------------------------------------------------
def __init__(self, make_key = None):
'''
:param make_key: element-to-hashable converter function
If :obj:`make_key` is left unspecified, an identity function is used
(which means that the queue can only hold hashable objects).
'''
# children: (2 * i + 1), (2 * i + 2)
# parent: (i - 1) / 2
self._heap = []
self._keys = {}
if make_key is not None:
self._make_key = make_key
else:
self._make_key = lambda x: x
#-------------------------------------------------------
# dict-like operations {{{
def __len__(self):
'''
:return: queue length
Return length of the queue.
'''
return len(self._heap)
def __contains__(self, entry):
'''
:param entry: entry to check
:return: ``True`` if :obj:`entry` is in queue, ``False`` otherwise
Check whether the queue contains an entry.
'''
return (self._make_key(entry) in self._keys)
def __setitem__(self, entry, priority):
'''
:param entry: entry to add/update
:param priority: entry's priority
Set priority for an entry, either by adding a new or updating an
existing one.
'''
self.set(entry, priority)
def __getitem__(self, entry):
'''
:param entry: entry to get priority of
:return: priority
:throws: :exc:`KeyError` if entry is not in the queue
Get priority of an entry.
'''
key = self._make_key(entry)
return self._keys[key].prio # NOTE: allow the KeyError to propagate
def __delitem__(self, entry):
'''
:param entry: entry to remove
Remove an entry from the queue.
'''
self.remove(entry)
# }}}
#-------------------------------------------------------
# main operations {{{
def __iter__(self):
'''
:return: iterator
Iterate over the entries in the queue.
Order of the entries is unspecified.
'''
for element in self._heap:
yield element.entry
def iterentries(self):
'''
:return: iterator
Iterate over the entries in the queue.
Order of the entries is unspecified.
'''
for element in self._heap:
yield element.entry
def entries(self):
'''
:return: list of entries
Retrieve list of entries stored in the queue.
Order of the entries is unspecified.
'''
return [e.entry for e in self._heap]
def length(self):
'''
:return: queue length
Return length of the queue.
'''
return len(self._heap)
def set(self, entry, priority):
'''
:param entry: entry to add/update
:param priority: entry's priority
Set priority for an entry, either by adding a new or updating an
existing one.
'''
key = self._make_key(entry)
if key not in self._keys:
element = PrioQueue._Element(priority, entry, len(self._heap), key)
self._keys[key] = element
self._heap.append(element)
else:
element = self._keys[key]
element.prio = priority
self._heapify(element.pos)
def pop(self):
'''
:return: tuple ``(priority, entry)``
:throws: :exc:`IndexError` when the queue is empty
Return the entry with lowest priority value. The entry is immediately
removed from the queue.
'''
if len(self._heap) == 0:
raise IndexError("queue is empty")
element = self._heap[0]
del self._keys[element.key]
if len(self._heap) > 1:
self._heap[0] = self._heap.pop()
self._heap[0].pos = 0
self._heapify_downwards(0)
else:
# this was the last element in the queue
self._heap.pop()
return (element.prio, element.entry)
def peek(self):
'''
:return: tuple ``(priority, entry)``
:throws: :exc:`IndexError` when the queue is empty
Return the entry with lowest priority value. The entry is not removed
from the queue.
'''
if len(self._heap) == 0:
raise IndexError("queue is empty")
return (self._heap[0].prio, self._heap[0].entry)
def remove(self, entry):
'''
:return: priority of :obj:`entry` or ``None`` when :obj:`entry` was
not found
Remove an arbitrary entry from the queue.
'''
key = self._make_key(entry)
if key not in self._keys:
return None
element = self._keys.pop(key)
if element.pos < len(self._heap) - 1:
# somewhere in the middle of the queue
self._heap[element.pos] = self._heap.pop()
self._heap[element.pos].pos = element.pos
self._heapify(element.pos)
else:
# this was the last element in the queue
self._heap.pop()
return element.prio
def update(self, entry, priority):
'''
:param entry: entry to update
:param priority: entry's new priority
:return: old priority of the entry
:throws: :exc:`KeyError` if entry is not in the queue
Update priority of an arbitrary entry.
'''
key = self._make_key(entry)
element = self._keys[key] # NOTE: allow the KeyError to propagate
old_priority = element.prio
element.prio = priority
self._heapify(element.pos)
return old_priority
# }}}
#-------------------------------------------------------
# maintain heap property {{{
def _heapify(self, i):
if i > 0 and self._heap[i] < self._heap[(i - 1) / 2]:
self._heapify_upwards(i)
else:
self._heapify_downwards(i)
def _heapify_upwards(self, i):
p = (i - 1) / 2 # parent index
while p >= 0 and self._heap[i] < self._heap[p]:
# swap element and its parent
(self._heap[i], self._heap[p]) = (self._heap[p], self._heap[i])
# update positions of the elements
self._heap[i].pos = i
self._heap[p].pos = p
# now check if the parent node satisfies heap property
i = p
p = (i - 1) / 2
def _heapify_downwards(self, i):
c = 2 * i + 1 # children: (2 * i + 1), (2 * i + 2)
while c < len(self._heap):
# select the smaller child (if the other child exists)
if c + 1 < len(self._heap) and self._heap[c + 1] < self._heap[c]:
c += 1
if self._heap[i] < self._heap[c]:
# heap property satisfied, nothing left to do
return
# swap element and its smaller child
(self._heap[i], self._heap[c]) = (self._heap[c], self._heap[i])
# update positions of the elements
self._heap[i].pos = i
self._heap[c].pos = c
# now check if the smaller child satisfies heap property
i = c
c = 2 * i + 1
# }}}
#-------------------------------------------------------
#-----------------------------------------------------------------------------
# vim:ft=python:foldmethod=marker
|
995,616 | 3821a62403b56de5abac6e3dcf94a35de8d78672 | """Unit tests for database.py."""
# standard library
from pathlib import Path
import unittest
from unittest.mock import MagicMock
from unittest.mock import sentinel
# first party
from delphi.epidata.acquisition.covid_hosp.test_utils import TestUtils
# py3tester coverage target
__test_target__ = 'delphi.epidata.acquisition.covid_hosp.database'
class DatabaseTests(unittest.TestCase):
def setUp(self):
"""Perform per-test setup."""
# configure test data
path_to_repo_root = Path(__file__).parent.parent.parent.parent
self.test_utils = TestUtils(path_to_repo_root)
def test_commit_and_close_on_success(self):
"""Commit and close the connection after success."""
mock_connector = MagicMock()
with Database.connect(mysql_connector_impl=mock_connector) as database:
connection = database.connection
mock_connector.connect.assert_called_once()
connection.commit.assert_called_once()
connection.close.assert_called_once()
def test_rollback_and_close_on_failure(self):
"""Rollback and close the connection after failure."""
mock_connector = MagicMock()
try:
with Database.connect(mysql_connector_impl=mock_connector) as database:
connection = database.connection
raise Exception('intentional test of exception handling')
except Exception:
pass
mock_connector.connect.assert_called_once()
connection.commit.assert_not_called()
connection.close.assert_called_once()
def test_new_cursor_cleanup(self):
"""Cursors are unconditionally closed."""
mock_connection = MagicMock()
mock_cursor = mock_connection.cursor()
database = Database(mock_connection)
try:
with database.new_cursor() as cursor:
raise Exception('intentional test of exception handling')
except Exception:
pass
mock_cursor.close.assert_called_once()
def test_contains_revision(self):
"""Check whether a revision is already in the database."""
# Note that query logic is tested separately by integration tests. This
# test just checks that the function maps inputs to outputs as expected.
mock_connection = MagicMock()
mock_cursor = mock_connection.cursor()
database = Database(mock_connection)
with self.subTest(name='new revision'):
mock_cursor.__iter__.return_value = [(0,)]
result = database.contains_revision(sentinel.revision)
# compare with boolean literal to test the type cast
self.assertIs(result, False)
query_values = mock_cursor.execute.call_args[0][-1]
self.assertEqual(query_values, (sentinel.revision,))
with self.subTest(name='old revision'):
mock_cursor.__iter__.return_value = [(1,)]
result = database.contains_revision(sentinel.revision)
# compare with boolean literal to test the type cast
self.assertIs(result, True)
query_values = mock_cursor.execute.call_args[0][-1]
self.assertEqual(query_values, (sentinel.revision,))
def test_insert_metadata(self):
"""Add new metadata to the database."""
# Note that query logic is tested separately by integration tests. This
# test just checks that the function maps inputs to outputs as expected.
mock_connection = MagicMock()
mock_cursor = mock_connection.cursor()
database = Database(mock_connection)
result = database.insert_metadata(
sentinel.issue, sentinel.revision, sentinel.meta_json)
self.assertIsNone(result)
query_values = mock_cursor.execute.call_args[0][-1]
self.assertEqual(
query_values, (sentinel.issue, sentinel.revision, sentinel.meta_json))
def test_insert_dataset(self):
"""Add a new dataset to the database."""
# Note that query logic is tested separately by integration tests. This
# test just checks that the function maps inputs to outputs as expected.
mock_connection = MagicMock()
mock_cursor = mock_connection.cursor()
database = Database(mock_connection)
dataset = self.test_utils.load_sample_dataset()
result = database.insert_dataset(sentinel.issue, dataset)
self.assertIsNone(result)
self.assertEqual(mock_cursor.execute.call_count, 20)
last_query_values = mock_cursor.execute.call_args[0][-1]
expected_query_values = (
0, sentinel.issue, 'MA', '2020-05-10', 53, 84, 15691, 73, 12427, 83,
3625, 84, None, 0, None, 0, None, 0, None, 0, None, 0, None, 0, None,
0, None, 0, None, 0, None, 0, None, 0, None, 0, 0.697850497273019, 72,
10876, 15585, 0.2902550897239881, 83, 3607, 12427, 0.21056656682174496,
73, 3304, 15691, None, None, None, None, None, None, None, None)
self.assertEqual(len(last_query_values), len(expected_query_values))
for actual, expected in zip(last_query_values, expected_query_values):
if isinstance(expected, float):
self.assertAlmostEqual(actual, expected)
else:
self.assertEqual(actual, expected)
|
995,617 | acb042bfe9a1bc9617611b32794c00c71efafeff | import datetime
import logging
from calendar import timegm
from importlib import import_module
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.utils import timezone
from jose import jwk, jwt
from jose.jwt import JWTClaimsError, JWTError
from social_core.exceptions import AuthException, AuthTokenError
logger = logging.getLogger(__name__)
class OidcBackchannelLogoutMixin:
def validate_logout_claims(self, logout_token):
utc_timestamp = timegm(datetime.datetime.utcnow().utctimetuple())
if self.id_token_issuer() != logout_token.get('iss'):
raise AuthTokenError(self, 'Incorrect logout_token: iss')
# Verify the token was issued in the last ID_TOKEN_MAX_AGE seconds
iat_leeway = self.setting('ID_TOKEN_MAX_AGE', self.ID_TOKEN_MAX_AGE)
if utc_timestamp > logout_token.get('iat') + iat_leeway:
raise AuthTokenError(self, 'Incorrect logout_token: iat')
if not logout_token.get('sub'):
raise AuthTokenError(self, 'Incorrect logout_token: sub')
events = logout_token.get('events')
try:
if not events or 'http://schemas.openid.net/event/backchannel-logout' not in events.keys():
raise AuthTokenError(self, 'Incorrect logout_token: events')
except AttributeError:
raise AuthTokenError(self, 'Incorrect logout_token: events')
if logout_token.get('nonce'):
raise AuthTokenError(self, 'Incorrect logout_token: nonce')
def validate_and_return_logout_token(self, logout_token):
"""
Validates the logout_token according to the steps at
https://openid.net/specs/openid-connect-backchannel-1_0.html#Validation.
"""
client_id, client_secret = self.get_key_and_secret()
try:
key = self.find_valid_key(logout_token)
except ValueError:
raise AuthTokenError(self, 'Incorrect logout_token: signature missing')
if not key:
raise AuthTokenError(self, 'Signature verification failed')
alg = key['alg']
rsa_key = jwk.construct(key)
try:
claims = jwt.decode(
logout_token,
rsa_key.to_pem().decode('utf-8'),
algorithms=[alg],
audience=client_id,
options=self.JWT_DECODE_OPTIONS,
)
except JWTClaimsError as error:
raise AuthTokenError(self, str(error))
except JWTError:
raise AuthTokenError(self, 'Invalid signature')
self.validate_logout_claims(claims)
return claims
def backchannel_logout(self, *args, **kwargs):
post_data = self.strategy.request_post()
logout_token = post_data.get('logout_token')
if not logout_token:
raise AuthException(self, 'Log out token missing')
claims = self.validate_and_return_logout_token(logout_token)
social_auth = self.strategy.storage.user.get_social_auth(
self.name,
claims.get('sub'),
)
if not social_auth:
raise AuthException(self, 'User not authenticated with this backend')
# Notice: The following is a Django specific session deletion
User = get_user_model() # noqa
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # noqa
Session = SessionStore.get_model_class() # noqa
sessions = Session.objects.filter(expire_date__gte=timezone.now())
for session in sessions:
session_data = session.get_decoded()
session_user_id = User._meta.pk.to_python(session_data.get(SESSION_KEY))
if session_user_id != social_auth.user.id:
continue
if 'tunnistamo_session_id' in session_data:
from users.models import TunnistamoSession
try:
tunnistamo_session = TunnistamoSession.objects.get(pk=session_data['tunnistamo_session_id'])
tunnistamo_session.end(send_logout_to_apis=True, request=self.strategy.request)
except TunnistamoSession.DoesNotExist:
pass
session.delete()
logger.info(f'Deleted a session for user {session_user_id}')
|
995,618 | 31ecee0ce439fa7e49910c48a47227b9939be79d | import unittest
import os, signal, subprocess
from detection_engine_modules.Sniffer import Sniffer
#
# Sniffer module testing file.
#
# Black box testing
class bb_SnifferTest(unittest.TestCase):
def test_init__no_arg(self):
'''
Testing the module's constructor.
Input - no argument.
Expected output - successful sniffer object instantiation.
'''
sniffer = False
sniffer = Sniffer()
self.assertTrue(sniffer)
del sniffer
def test_init__with_valid_arg(self):
'''
Testing the module's constructor.
Input - a valid file argument.
Expected output - successful sniffer object instantiation.
'''
sniffer = False
valid_file = 'testing/testing_alerts.binetflow'
sniffer = Sniffer(valid_file)
self.assertTrue(sniffer)
del sniffer
def test_init__with_invalid_file(self):
'''
Testing the module's constructor.
Input - invalid file argument.
Expected output - Exception(FileNotFoundError)
'''
invalid_file = 'testing/not_a_real_file.binetflow'
self.assertRaises(FileNotFoundError, Sniffer, invalid_file)
# start
def test_start__without_file(self):
'''
Testing the return value of the start function, without a file string being present in the object.
Input - no file string on object instantiation.
Expected output - sniffer.start() == true
'''
sniffer = Sniffer()
started = sniffer.start()
self.assertTrue(started)
del sniffer
def test_start__with_file(self):
'''
Testing the return value of the start function, with a file string being present in the object.
Input - valid file string on object instantiation.
Expected output - sniffer.start() == true
'''
sniffer = Sniffer('testing/testing_alerts.binetflow')
started = sniffer.start()
self.assertTrue(started)
del sniffer
# White box testing
class wb_SnifferTest(unittest.TestCase):
# __init__
def test_init__no_arg(self):
'''
Testing the module's constructor - default init variables and function calls.
Input - No argument.
Expected output - Default object attribute values.
'''
sniffer = Sniffer()
self.assertEqual(sniffer.tcpdump, None)
self.assertEqual(sniffer.argus, None)
self.assertEqual(sniffer.ra, None)
self.assertEqual(sniffer.read_from_file, False)
self.assertEqual(sniffer.file, None)
self.assertEqual(sniffer.tcpdump_command, 'tcpdump -w -')
self.assertEqual(sniffer.argus_command, 'argus -f -r - -w -')
self.assertTrue(sniffer.ra_command)
del sniffer
def test_init__with_valid_pcap_file_arg(self):
'''
Testing the module's constructor - file handling-related init variables and function calls.
Requires - Valid packet capture file string in object.
Expected output - Object attribute values required for a network flow input file.
'''
sniffer = Sniffer('testing/menti.pcap')
self.assertEqual(sniffer.tcpdump, None)
self.assertEqual(sniffer.argus, None)
self.assertEqual(sniffer.ra, None)
self.assertEqual(sniffer.read_from_file, 'testing/menti.pcap')
self.assertFalse(sniffer.file)
self.assertEqual(sniffer.argus_command, 'argus -f -r ' + sniffer.read_from_file + ' -w -')
self.assertTrue(sniffer.ra_command)
del sniffer
def test_init__with_valid_network_flow_file_arg(self):
'''
Testing the module's constructor - file handling-related init variables and function calls.
Requires - Valid network flow file string in object.
Expected output - Object attribute values required for a network flow input file.
'''
sniffer = Sniffer('testing/testing_alerts.binetflow')
self.assertEqual(sniffer.tcpdump, None)
self.assertEqual(sniffer.argus, None)
self.assertEqual(sniffer.ra, None)
self.assertEqual(sniffer.read_from_file, 'testing/testing_alerts.binetflow')
self.assertTrue(sniffer.file)
self.assertTrue(sniffer.ra_command)
del sniffer
def test_init__with_invalid_file_extension(self):
'''
Testing the module's constructor - checking that the file read code only accept valid file extensions.
Input - invalid file extension argument.
Expected output - Exception
'''
invalid_file = 'testing/not_a_valid_file.txt'
self.assertRaises(Exception, Sniffer, invalid_file)
# start
def test_start__with_no_file(self):
'''
Testing the return value of the start function, without a file string being present in the object.
Requires - nothing.
Expected output - sniffer.start() == true
'''
sniffer = Sniffer()
started = sniffer.start()
self.assertTrue(started)
del sniffer
def test_start__with_pcap_file(self):
'''
Testing the return value of the start function, with a pcap file.
Requires - object to have a valid pcap file.
Expected output - sniffer.start() == true
'''
sniffer = Sniffer('testing/menti.pcap')
started = sniffer.start()
self.assertTrue(started)
del sniffer
def test_start__with_network_flow_file(self):
'''
Testing the return value of the start function, with a network flow file.
Requires - object to have a valid network flow file.
Expected output - sniffer.start() == true
'''
sniffer = Sniffer('testing/menti.pcap')
started = sniffer.start()
self.assertTrue(started)
del sniffer
# get_flow
def test_get_flow__from_pcap_flow_file(self):
'''
Testing if get_flow returns a flow from the the pcap file.
Requires - the sniffer's subprocesses to be running.
Expected output - A processed network flow from the packet capture file.
'''
sniffer = Sniffer('testing/menti.pcap')
started = sniffer.start()
flow = sniffer.get_flow()
self.assertTrue(flow)
del sniffer
def test_get_flow__from_network_flow_file(self):
'''
Testing if get_flow returns a flow from the network flow file.
Requires - the sniffer's subprocesses to be running and have a valid file handle.
Expected output - A network flow read from the file.
'''
sniffer = Sniffer('testing/testing_alerts.binetflow')
started = sniffer.start()
flow = sniffer.get_flow()
self.assertTrue(flow)
del sniffer
def test_get_flow__reach_EOF(self):
'''
Testing if get_flow returns False when reaching End-of-File (EOF)
Requires - the sniffer's subprocesses to be running and have a valid file handle.
Expected output - get_flow returns False when EOF is reached.
'''
sniffer = Sniffer('testing/testing_alerts.binetflow')
started = sniffer.start()
flow = True
while(flow):
flow = sniffer.get_flow()
self.assertFalse(flow)
del sniffer
|
995,619 | 4e9773e133a58362a941b90222b74c3961157d28 | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from fxaccount import views
urlpatterns = [
path('<int:user>', views.FxAccountView),
path('<int:user>/<int:pk>', views.AlterFxAccount),
path('transfer', views.FxAccountTransferViews.as_view()),
path('deposit/<int:user>', views.Deposit),
path('deposit/<int:user>/<int:pk>', views.AlterDeposit),
path('withdraw/<int:user>', views.Withdraw),
path('withdraw/<int:user>/<int:pk>', views.AlterWithdraw),
path('tradinghistory/<int:user>', views.TradingHistoryViews.as_view()),
path('clientaccountlist/<int:user>', views.ClientAccountListViews.as_view()),
path('commissionhistory/<int:user>', views.CommissionHistoryViews.as_view()),
path('commissionhistory/<int:user>/<int:mt4_login>', views.CommissionHistoryViewsDetail.as_view()),
path('choices', views.ChoicesView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns) |
995,620 | 3dc98b0fc0f0cc9d410630439ccb4976ea7e4b86 | """JSON implementations of logging records."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ..osid import records as osid_records
from dlkit.abstract_osid.logging_ import records as abc_logging_records
class LogEntryRecord(abc_logging_records.LogEntryRecord, osid_records.OsidRecord):
"""A record for a ``LogEntry``.
The methods specified by the record type are available through the
underlying object.
"""
class LogEntryQueryRecord(abc_logging_records.LogEntryQueryRecord, osid_records.OsidRecord):
"""A record for a ``LoglEntryQuery``.
The methods specified by the record type are available through the
underlying object.
"""
class LogEntryFormRecord(abc_logging_records.LogEntryFormRecord, osid_records.OsidRecord):
"""A record for a ``LogEntryForm``.
The methods specified by the record type are available through the
underlying object.
"""
class LogEntrySearchRecord(abc_logging_records.LogEntrySearchRecord, osid_records.OsidRecord):
"""A record for a ``LogEntrySearch``.
The methods specified by the record type are available through the
underlying object.
"""
class LogRecord(abc_logging_records.LogRecord, osid_records.OsidRecord):
"""A record for a ``Log``.
The methods specified by the record type are available through the
underlying object.
"""
class LogQueryRecord(abc_logging_records.LogQueryRecord, osid_records.OsidRecord):
"""A record for a ``LogQuery``.
The methods specified by the record type are available through the
underlying object.
"""
class LogFormRecord(abc_logging_records.LogFormRecord, osid_records.OsidRecord):
"""A record for a ``LogForm``.
The methods specified by the record type are available through the
underlying object.
"""
class LogSearchRecord(abc_logging_records.LogSearchRecord, osid_records.OsidRecord):
"""A record for a ``LogSearch``.
The methods specified by the record type are available through the
underlying object.
"""
|
995,621 | 7aff203e7c847072bba376fc5271dc9e88d3b253 | # -*- coding: utf-8 -*-
S1 = 72
S2 = 85
r = 100*(S2-S1)/S1
#print('小明的成绩提高了r%')
print('xiao ming score increased by %0.2f %%' % r) # %0.2f相当于保留小树点后面两位。为什么要两个%,%%相当于转义 等同于1个%
#%%相当于转义 相当以一个% |
995,622 | bffb5b2ec43fe0dda7a7121250061023acb31905 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda.amp as amp
import soft_dice_cpp # should import torch before import this
## Soft Dice Loss for binary segmentation
##
# v1: pytorch autograd
class SoftDiceLossV1(nn.Module):
'''
soft-dice loss, useful in binary segmentation
'''
def __init__(self,
p=1,
smooth=1):
super(SoftDiceLossV1, self).__init__()
self.p = p
self.smooth = smooth
def forward(self, logits, labels):
'''
inputs:
logits: tensor of shape (N, H, W, ...)
label: tensor of shape(N, H, W, ...)
output:
loss: tensor of shape(1, )
'''
probs = torch.sigmoid(logits)
numer = (probs * labels).sum()
denor = (probs.pow(self.p) + labels.pow(self.p)).sum()
loss = 1. - (2 * numer + self.smooth) / (denor + self.smooth)
return loss
##
# v2: self-derived grad formula
class SoftDiceLossV2(nn.Module):
'''
soft-dice loss, useful in binary segmentation
'''
def __init__(self,
p=1,
smooth=1):
super(SoftDiceLossV2, self).__init__()
self.p = p
self.smooth = smooth
def forward(self, logits, labels):
'''
inputs:
logits: tensor of shape (N, H, W, ...)
label: tensor of shape(N, H, W, ...)
output:
loss: tensor of shape(1, )
'''
logits = logits.view(1, -1)
labels = labels.view(1, -1)
loss = SoftDiceLossV2Func.apply(logits, labels, self.p, self.smooth)
return loss
class SoftDiceLossV2Func(torch.autograd.Function):
'''
compute backward directly for better numeric stability
'''
@staticmethod
@amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, logits, labels, p, smooth):
'''
inputs:
logits: (N, L)
labels: (N, L)
outpus:
loss: (N,)
'''
# logits = logits.float()
probs = torch.sigmoid(logits)
numer = 2 * (probs * labels).sum(dim=1) + smooth
denor = (probs.pow(p) + labels.pow(p)).sum(dim=1) + smooth
loss = 1. - numer / denor
ctx.vars = probs, labels, numer, denor, p, smooth
return loss
@staticmethod
@amp.custom_bwd
def backward(ctx, grad_output):
'''
compute gradient of soft-dice loss
'''
probs, labels, numer, denor, p, smooth = ctx.vars
numer, denor = numer.view(-1, 1), denor.view(-1, 1)
term1 = (1. - probs).mul_(2).mul_(labels).mul_(probs).div_(denor)
term2 = probs.pow(p).mul_(1. - probs).mul_(numer).mul_(p).div_(denor.pow_(2))
grads = term2.sub_(term1).mul_(grad_output)
return grads, None, None, None
##
# v3: implement with cuda to save memory
class SoftDiceLossV3(nn.Module):
'''
soft-dice loss, useful in binary segmentation
'''
def __init__(self,
p=1,
smooth=1.):
super(SoftDiceLossV3, self).__init__()
self.p = p
self.smooth = smooth
def forward(self, logits, labels):
'''
inputs:
logits: tensor of shape (N, H, W, ...)
label: tensor of shape(N, H, W, ...)
output:
loss: tensor of shape(1, )
'''
logits = logits.view(1, -1)
labels = labels.view(1, -1)
loss = SoftDiceLossV3Func.apply(logits, labels, self.p, self.smooth)
return loss
class SoftDiceLossV3Func(torch.autograd.Function):
'''
compute backward directly for better numeric stability
'''
@staticmethod
@amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, logits, labels, p, smooth):
'''
inputs:
logits: (N, L)
labels: (N, L)
outpus:
loss: (N,)
'''
assert logits.size() == labels.size() and logits.dim() == 2
loss = soft_dice_cpp.soft_dice_forward(logits, labels, p, smooth)
ctx.vars = logits, labels, p, smooth
return loss
@staticmethod
@amp.custom_bwd
def backward(ctx, grad_output):
'''
compute gradient of soft-dice loss
'''
logits, labels, p, smooth = ctx.vars
grads = soft_dice_cpp.soft_dice_backward(grad_output, logits, labels, p, smooth)
return grads, None, None, None
if __name__ == '__main__':
import torchvision
import torch
import numpy as np
import random
# torch.manual_seed(15)
# random.seed(15)
# np.random.seed(15)
# torch.backends.cudnn.deterministic = True
# torch.cuda.set_device('cuda:1')
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
net = torchvision.models.resnet18(pretrained=False)
self.conv1 = net.conv1
self.bn1 = net.bn1
self.maxpool = net.maxpool
self.relu = net.relu
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
self.out = nn.Conv2d(512, 1, 3, 1, 1)
def forward(self, x):
feat = self.conv1(x)
feat = self.bn1(feat)
feat = self.relu(feat)
feat = self.maxpool(feat)
feat = self.layer1(feat)
feat = self.layer2(feat)
feat = self.layer3(feat)
feat = self.layer4(feat)
feat = self.out(feat)
out = F.interpolate(feat, x.size()[2:], mode='bilinear', align_corners=True)
return out
net1 = Model()
net2 = Model()
net2.load_state_dict(net1.state_dict())
criteria1 = SoftDiceLossV3()
criteria2 = SoftDiceLossV1()
net1.cuda()
net2.cuda()
net1.train()
net2.train()
net1.double()
net2.double()
criteria1.cuda()
criteria2.cuda()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs = 12
size = 320, 320
# size = 229, 229
for it in range(300000):
# for it in range(500):
inten = torch.randn(bs, 3, *size).cuda()
lbs = torch.randint(0, 2, (bs, *size)).cuda().float()
inten = inten.double()
lbs = lbs.double()
logits = net1(inten).squeeze(1)
loss1 = criteria1(logits, lbs)
optim1.zero_grad()
loss1.backward()
optim1.step()
logits = net2(inten).squeeze(1)
loss2 = criteria2(logits, lbs)
optim2.zero_grad()
loss2.backward()
optim2.step()
with torch.no_grad():
if (it+1) % 50 == 0:
print('iter: {}, ================='.format(it+1))
print('out.weight: ', torch.mean(torch.abs(net1.out.weight - net2.out.weight)).item())
print('conv1.weight: ', torch.mean(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
print('loss: ', loss1.item() - loss2.item())
|
995,623 | 775eae291094d30ecba65c31633c957a700dab01 | #!/usr/bin/python
import boto3
import time
client = boto3.client('cloudformation', region_name='us-east-1')
#functions
def status(stack):
while True:
stackStatus = client.describe_stacks(StackName=stack)
status=(stackStatus['Stacks'][0]['StackStatus'])
print "{}'s current status is {}.".format(stack, status)
if (status == 'CREATE_FAILED') or (status == 'CREATE_COMPLETE') or (status == 'ROLLBACK_IN_PROGRESS') or (status == 'ROLLBACK_FAILED') or (status == 'ROLLBACK_COMPLETE'):
break
time.sleep(10)
if status != 'CREATE_COMPLETE':
exit()
response = client.create_stack(
StackName='RDS',
TemplateURL='file://CloudFormation/RDS.json',
Parameters=[
{
'ParameterKey': "Availability",
'ParameterValue': "False"
},
{
'ParameterKey': "AZ",
'ParameterValue': "us-east-1a"
}
]
)
status('RDS')
response = client.create_stack(
StackName='HollowEC2App',
TemplateURL='file://CloudFormation/ec2-HollowApp.json',
Parameters=[
{
'ParameterKey': "Environment",
'ParameterValue': "Sandbox"
},
{
'ParameterKey': "AccountShortName",
'ParameterValue': "sbx"
},
{
'ParameterKey': "VPCFunction",
'ParameterValue': "sbx"
},
{
'ParameterKey': "Web1Subnet",
'ParameterValue': "subnet-a693d1c2"
},
{
'ParameterKey': "Web2Subnet",
'ParameterValue': "subnet-5d68e872"
},
{
'ParameterKey': "InstanceSecurityGroups",
'ParameterValue': "sg-131d9964"
},
{
'ParameterKey': "VPC",
'ParameterValue': "vpc-8c9ac2f4"
},
{
'ParameterKey': "EC2KeyPair",
'ParameterValue': "SBX-ed2-keypair"
}
]
)
status('HollowEC2App')
|
995,624 | 11c9596845cdb43fca7d9d0cc874521a2d7e3c89 | import pandas as pd
import matplotlib.pyplot as plt
#
# TODO: Load up the Seeds Dataset into a Dataframe
# It's located at 'Datasets/wheat.data'
#
# .. your code here ..
data = pd.read_csv('Datasets/wheat.data')
#
# TODO: Drop the 'id' feature
#
# .. your code here ..
data.drop('id',1,inplace=True)
#
# TODO: Compute the correlation matrix of your dataframe
#
# .. your code here ..
correlationMatrix = data.corr(method='pearson')
#
# TODO: Graph the correlation matrix using imshow or matshow
#
# .. your code here ..
#Create the figure plot
plt.figure()
#Create the Matshow figure to modify
plt.matshow(correlationMatrix, cmap=plt.cm.Blues)
#Add to the figure the range
plt.colorbar()
#Auxiliary variables
tick_marks = [i for i in range(len(correlationMatrix.columns))]
#add the names for each grid in the x axis
plt.xticks(tick_marks, correlationMatrix.columns, rotation='vertical')
#add the names for each grid point in the y axis
plt.yticks(tick_marks, correlationMatrix.columns)
#show the graph
plt.show()
|
995,625 | 48699e3ded5ce9474b9f3b3a1c8b723065ab3390 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from BeautifulSoup import BeautifulSoup, NavigableString, Tag
from urllib2 import urlopen
from datetime import date
import re
URL = "http://laco.se/lunch/"
def get_daily_specials(day=None):
page = urlopen(URL)
soup = BeautifulSoup(page)
page.close()
daily_specials = {
"name": "Laco di Como",
"specials": [],
"streetaddress": "Timmervägen 6, Sundsvall",
"dataurl": URL,
"mapurl": "http://www.hitta.se/ViewDetailsPink.aspx?Vkiid=VgwibzXcvb%252fAf1XfiCvetg%253d%253d"
}
if day == None:
day = date.today().weekday()
# Only Monday - Friday
if day > 4:
return daily_specials
day = [(u"Måndag", 2), (u"Tisdag", 2), (u"Onsdag", 2), (u"Torsdag", 2), (u"Fredag", 3)][day]
ref = soup.find("h2", text=day[0]).parent
daily_specials["specials"] = [li.text.strip() for li in ref.findNextSibling("ul") if isinstance(li, Tag)]
return daily_specials
def main():
def print_specials(day, d):
print " Day", day
for c in d["specials"]:
print " ", c
print ""
d = get_daily_specials(0)
print d["name"]
print_specials(0, d)
for day in range(1, 5):
print_specials(day, get_daily_specials(day))
if __name__ == "__main__":
main()
|
995,626 | 70b9109ef259d0df97598c1be75eb5285d0a0c33 | import markovify
from flask import Flask, render_template, request, redirect
from flask import jsonify, abort
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm, CSRFProtect
from wtforms import TextAreaField, validators
PATH_TO_DB = "./sqlite.db"
app = Flask(__name__)
app.secret_key = 'qwerty'
app.config["TEMPLATES_AUTO_RELOAD"] = True
app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{PATH_TO_DB}"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models import Texts, Users
db.create_all()
@app.route('/', methods=["GET", "POST"])
def index():
if request.method == "GET":
return render_template("form.html")
elif request.method == "POST":
text = request.form["text"]
if 2 < len(text) < 50:
text_model = markovify.Text(text)
markov_sentence = text_model.make_sentence(tries=100)
if markov_sentence:
pass
return redirect("/", code=302)
if __name__ == '__main__':
app.run()
|
995,627 | 96aee793701aee8f224905db86f2f977ad1af5c0 | import cv2
import numpy as np
import json
red_img1 = []
blue_img1 = []
white_img1 = []
gray_img1 = []
yellow_img1 = []
red_fig1 = []
blue_fig1 = []
white_fig1 = []
gray_fig1 = []
yellow_fig1 = []
red_waga = []
blue_waga = []
white_waga = []
gray_waga = []
yellow_waga = []
roi = []
path_json = r'C:\Users\Marcin\Desktop\projekt\public.json'
path_img = 'imgs\img_002.jpg'
with open(path_json) as json_file:
data = json.load(json_file)
for img1 in data["img_002"]:
# print('red: ' + img1['red'])
red_img1.append(img1['red'])
blue_img1.append(img1['blue'])
white_img1.append(img1['white'])
gray_img1.append(img1['grey'])
yellow_img1.append(img1['yellow'])
# print(img1)
kolory = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 128, 255), (255, 255, 0), (255, 0, 255),
(0, 255, 255), (128, 128, 128), (165, 255, 165), (19, 69, 139), (128, 0, 128), (238, 130, 238), (208, 224, 64),
(114, 128, 250), (128, 0, 0), (130, 0, 75), (196, 228, 255), (250, 230, 230), (128, 0, 0), (0, 128, 128),
(210, 105, 30), (0, 191, 255), (192, 192, 192)]
#kolory = [ WHITE, BLUE, GREEN, RED, ORANGE, CYAN, MAGENTA,
# YELLOW, GRAY, LIME, BROWN, PURPLE, VIOLET, TURQUOISE,
# SALMON, NAVY, INDIGO, BISQUE, LAVENDER, MAROON, TEAL
# CHOCOLATE SKYBLUE SILVER]
# Write some Text
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
lineType = 2
red_cx = []
red_cy = []
blue_cx = []
blue_cy = []
white_cx = []
white_cy = []
gray_cx = []
gray_cy = []
yellow_cx = []
yellow_cy = []
n = 0
m = 0
img = cv2.imread(path_img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, None, fx=0.35, fy=0.35, interpolation=cv2.INTER_CUBIC)
gray = cv2.resize(gray, None, fx=0.35, fy=0.35, interpolation=cv2.INTER_CUBIC)
# img_bit_not = cv2.bitwise_not(img)
# img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# img_hsv_bit_not = cv2.cvtColor(img_bit_not, cv2.COLOR_BGR2HSV)
figure_number = np.zeros_like(img)
contours_img = np.zeros_like(img)
contours_color_img = np.zeros_like(img)
shape = img.shape
"""BACKGROUND"""
img_filtr = cv2.medianBlur(img, 9)
img_hsv_filtr = cv2.cvtColor(img_filtr, cv2.COLOR_BGR2HSV)
#img_darken = cv2.add(img, np.array([-100.0]))
#img_hsv_darken = cv2.cvtColor(img_darken, cv2.COLOR_BGR2HSV)
#img_bg = cv2.add(img, np.array([-50.0]))
#img_bg = cv2.medianBlur(img_bg, 9)
#img_hsv_bg = cv2.cvtColor(img_bg, cv2.COLOR_BGR2HSV)
#clahe = cv2.createCLAHE(clipLimit=15.0, tileGridSize=(8, 8))
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(150, 150))
cl1 = clahe.apply(gray)
img_clahe = cv2.cvtColor(cl1, cv2.COLOR_GRAY2BGR)
img_hsv_clahe = cv2.cvtColor(img_clahe, cv2.COLOR_BGR2HSV)
bg_lower_hsv_filtr = np.array([0, 0, 145], np.uint8) # filtr medianowy
bg_higher_hsv_filtr = np.array([180, 45, 220], np.uint8) # filtr medianowy
bg_mask_filtr = cv2.inRange(img_hsv_filtr, bg_lower_hsv_filtr, bg_higher_hsv_filtr)
bg_mask_inv_filtr = cv2.bitwise_not(bg_mask_filtr)
bg_result_filtr = cv2.bitwise_and(img, img, mask=bg_mask_filtr)
#bg_lower_hsv_darken = np.array([14, 0, 0], np.uint8) # przyciemnienie
#bg_higher_hsv_darken = np.array([33, 180, 125], np.uint8) # przyciemnienie
#bg_mask_darken = cv2.inRange(img_hsv_darken, bg_lower_hsv_darken, bg_higher_hsv_darken)
#bg_mask_inv_darken = cv2.bitwise_not(bg_mask_darken)
#bg_result_darken = cv2.bitwise_and(img, img, mask=bg_mask_darken)
#bg_lower_hsv = np.array([0, 0, 0], np.uint8) # darken+filtr
#bg_higher_hsv = np.array([30, 92, 150], np.uint8) # darken+filtr
#bg_mask = cv2.inRange(img_hsv_bg, bg_lower_hsv, bg_higher_hsv)
#bg_mask = cv2.dilate(bg_mask, np.ones((3, 3), np.uint8), iterations=1)
#bg_result = cv2.bitwise_and(img, img, mask=bg_mask)
#clahe_lower_hsv = np.array([0, 0, 83], np.uint8) # CLAHE
#clahe_higher_hsv = np.array([180, 255, 228], np.uint8) # CLAHE
clahe_lower_hsv = np.array([0, 0, 140], np.uint8) # CLAHE
clahe_higher_hsv = np.array([180, 255, 255], np.uint8) # CLAHE
bg_mask_clahe = cv2.inRange(img_hsv_clahe, clahe_lower_hsv, clahe_higher_hsv)
kernel_clahe = np.ones((5, 5), np.uint8)
bg_mask_clahe = cv2.erode(bg_mask_clahe, kernel_clahe, iterations=3) # czarny
bg_result_clahe = cv2.bitwise_and(img, img, mask=bg_mask_clahe)
#bg_result[400:shape[0], 500:shape[1]] = bg_result_clahe[400:shape[0], 500:shape[1]]
fg_result = cv2.bitwise_xor(bg_result_filtr, img)
#fg_result1 = cv2.bitwise_xor(bg_result_darken, img)
fg_result2 = cv2.bitwise_xor(bg_result_clahe, img)
#fg_result1[400:shape[0], 400:shape[1]] = fg_result2[400:shape[0], 400:shape[1]]
#full_result1 = cv2.bitwise_xor(bg_result, img)
full_result = cv2.bitwise_or(fg_result, fg_result2)
# foreground_result = img - bg_result
# full_result = cv2.bitwise_or(red_result, blue_result)
# full_result = cv2.bitwise_or(full_result, white_result)
# full_result = cv2.bitwise_or(full_result, gray_result)
# full_result = cv2.bitwise_or(full_result, yellow_result)
"""DRAW CONTOURS/FOREGROUND"""
full_result_gray = cv2.cvtColor(full_result, cv2.COLOR_BGR2GRAY)
contours_fg, hierarchy_fg = cv2.findContours(full_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
for i in range(len(contours_fg)):
cnt = contours_fg[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 2000:
n += 1
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
cv2.drawContours(contours_img, [cnt], 0, kolory[0], -1)
# cv2.drawContours(full_result, [cnt], 0, kolory[1], 2)
# cv2.circle(full_result, (cx, cy), 2, kolory[1], 5)
foreground_result = cv2.bitwise_and(contours_img, img)
#foreground_result_white = cv2.bitwise_and(contours_img, img)
#img_hsv_white = cv2.cvtColor(foreground_result_white, cv2.COLOR_BGR2HSV)
#kernel_bg = np.ones((9, 9), np.uint8)
#contours_img = cv2.dilate(contours_img, kernel_bg, iterations=1)
#foreground_result = cv2.bitwise_and(contours_img, img)
# foreground_result = cv2.medianBlur(foreground_result, 9)
img_hsv = cv2.cvtColor(foreground_result, cv2.COLOR_BGR2HSV)
img_hsv_bit_not = cv2.cvtColor(cv2.bitwise_not(foreground_result), cv2.COLOR_BGR2HSV)
'''RED'''
red_lower_hsv = np.array([0, 25, 0], np.uint8)
red_higher_hsv = np.array([10, 255, 255], np.uint8)
red_mask1 = cv2.inRange(img_hsv, red_lower_hsv, red_higher_hsv)
red_lower_hsv2 = np.array([166, 25, 0], np.uint8)
red_higher_hsv2 = np.array([180, 255, 255], np.uint8)
red_mask2 = cv2.inRange(img_hsv, red_lower_hsv2, red_higher_hsv2)
red_mask = cv2.bitwise_or(red_mask1, red_mask2)
red_result = cv2.bitwise_and(img, img, mask=red_mask)
red_result_gray = cv2.cvtColor(red_result, cv2.COLOR_BGR2GRAY)
contours_red, hierarchy_red = cv2.findContours(red_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
for i in range(len(contours_red)):
cnt = contours_red[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 1500:
print('area', area)
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
red_cx.append(cx)
red_cy.append(cy)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = int(rect[1][0])
height = int(rect[1][1])
rozmiar = min(height, width)
print('red rozmiar', rozmiar)
if rozmiar < 60:
red_waga.append(1)
elif 60 <= rozmiar < 90:
red_waga.append(2)
elif rozmiar >= 90:
red_waga.append(3)
cv2.drawContours(full_result, [cnt], 0, kolory[3], 3)
# cv2.circle(full_result, (cx, cy), 2, kolory[3], 5)
cv2.drawContours(red_result, [box], 0, kolory[3], 3)
# cv2.circle(red_result, (cx, cy), 2, kolory[3], 5)
cv2.circle(figure_number, (cx, cy), 8, kolory[3], 1)
cv2.circle(red_result, (int(box[0][0]), int(box[0][1])), 8, kolory[5], 5)
cv2.circle(red_result, (int(box[1][0]), int(box[1][1])), 8, kolory[6], 5)
bottomLeftCornerOfText = (cx + 5, cy + 5)
cv2.putText(figure_number, "({},{},{})".format(cx, cy, area),
bottomLeftCornerOfText,
font,
fontScale,
kolory[3],
lineType)
'''BLUE'''
blue_lower_hsv = np.array([100, 60, 125], np.uint8)
blue_higher_hsv = np.array([125, 255, 255], np.uint8)
blue_mask = cv2.inRange(img_hsv, blue_lower_hsv, blue_higher_hsv)
blue_result = cv2.bitwise_and(img, img, mask=blue_mask)
blue_result_gray = cv2.cvtColor(blue_result, cv2.COLOR_BGR2GRAY)
contours_blue, hierarchy_blue = cv2.findContours(blue_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
for i in range(len(contours_blue)):
cnt = contours_blue[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 1500:
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
blue_cx.append(cx)
blue_cy.append(cy)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = int(rect[1][0])
height = int(rect[1][1])
rozmiar = min(height, width)
print('blue rozmiar', rozmiar)
if rozmiar < 57:
blue_waga.append(1)
elif 57 <= rozmiar < 90:
blue_waga.append(2)
elif rozmiar >= 90:
blue_waga.append(3)
cv2.drawContours(full_result, [cnt], 0, kolory[1], 3)
# cv2.circle(full_result, (cx, cy), 2, kolory[1], 5)
cv2.drawContours(blue_result, [box], 0, kolory[1], 3)
# cv2.circle(blue_result, (cx, cy), 2, kolory[1], 5)
cv2.circle(figure_number, (cx, cy), 8, kolory[1], 1)
cv2.circle(blue_result, (int(box[0][0]), int(box[0][1])), 8, kolory[5], 5)
cv2.circle(blue_result, (int(box[1][0]), int(box[1][1])), 8, kolory[6], 5)
bottomLeftCornerOfText = (cx + 5, cy + 5)
cv2.putText(figure_number, "({},{},{})".format(cx, cy, area),
bottomLeftCornerOfText,
font,
fontScale,
kolory[1],
lineType)
'''WHITE'''
# white_lower_hsv = np.array([14, 0, 145], np.uint8)
# white_higher_hsv = np.array([180, 105, 255], np.uint8)
# white_lower_hsv = np.array([37, 8, 176], np.uint8)
# white_higher_hsv = np.array([180, 48, 255], np.uint8)
white_lower_hsv = np.array([25, 0, 180], np.uint8)
white_higher_hsv = np.array([120, 48, 255], np.uint8)
white_mask = cv2.inRange(img_hsv, white_lower_hsv, white_higher_hsv)
white_mask_inv = cv2.bitwise_not(white_mask)
white_result = cv2.bitwise_and(img, img, mask=white_mask)
white_result_gray = cv2.cvtColor(white_result, cv2.COLOR_BGR2GRAY)
# kernel = np.ones((3, 3), np.uint8)
# kernel1 = np.ones((7, 7), np.uint8)
# ret, thresh = cv2.threshold(white_result_gray, 127, 255, cv2.THRESH_BINARY)
# white_result_final = cv2.erode(thresh, kernel, iterations=1)
# white_result_final = cv2.dilate(thresh, kernel, iterations=1)
contours_white, hierarchy_white = cv2.findContours(white_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
for i in range(len(contours_white)):
cnt = contours_white[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 1500:
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
white_cx.append(cx)
white_cy.append(cy)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = int(rect[1][0])
height = int(rect[1][1])
rozmiar = min(height, width)
print('white rozmiar', rozmiar)
if rozmiar < 60:
white_waga.append(1)
elif 60 <= rozmiar < 90:
white_waga.append(2)
elif rozmiar >= 90:
white_waga.append(3)
cv2.drawContours(full_result, [cnt], 0, kolory[0], 3)
# cv2.circle(full_result, (cx, cy), 2, kolory[0], 5)
cv2.drawContours(white_result, [box], 0, kolory[0], 3)
# cv2.circle(white_result_final, (cx, cy), 2, kolory[0], 5)
cv2.circle(figure_number, (cx, cy), 8, kolory[0], 1)
bottomLeftCornerOfText = (cx + 5, cy + 5)
cv2.putText(figure_number, "({},{},{})".format(cx, cy, area),
bottomLeftCornerOfText,
font,
fontScale,
kolory[0],
lineType)
'''GRAY'''
gray_lower_hsv = np.array([40, 0, 0], np.uint8)
gray_higher_hsv = np.array([105, 255, 165], np.uint8)
gray_mask = cv2.inRange(img_hsv, gray_lower_hsv, gray_higher_hsv)
gray_result = cv2.bitwise_and(img, img, mask=gray_mask)
gray_result_gray = cv2.cvtColor(gray_result, cv2.COLOR_BGR2GRAY)
contours_gray, hierarchy_gray = cv2.findContours(gray_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
for i in range(len(contours_gray)):
cnt = contours_gray[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 700:
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
gray_cx.append(cx)
gray_cy.append(cy)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = int(rect[1][0])
height = int(rect[1][1])
rozmiar = min(height, width)
print('gray rozmiar', rozmiar)
if rozmiar < 57:
gray_waga.append(1)
elif 57 <= rozmiar < 90:
gray_waga.append(2)
elif rozmiar >= 90:
gray_waga.append(3)
cv2.drawContours(full_result, [cnt], 0, kolory[8], 3)
# cv2.circle(full_result, (cx, cy), 2, kolory[8], 5)
cv2.drawContours(gray_result, [box], 0, kolory[8], 3)
# cv2.circle(gray_result, (cx, cy), 2, kolory[8], 5)
cv2.circle(figure_number, (cx, cy), 8, kolory[8], 1)
bottomLeftCornerOfText = (cx + 5, cy + 5)
cv2.putText(figure_number, "({},{},{})".format(cx, cy, area),
bottomLeftCornerOfText,
font,
fontScale,
kolory[8],
lineType)
'''YELLOW'''
yellow_lower_hsv = np.array([104, 105, 0], np.uint8)
yellow_higher_hsv = np.array([180, 255, 255], np.uint8)
yellow_mask = cv2.inRange(img_hsv_bit_not, yellow_lower_hsv, yellow_higher_hsv)
yellow_result = cv2.bitwise_and(img, img, mask=yellow_mask)
yellow_result_gray = cv2.cvtColor(yellow_result, cv2.COLOR_BGR2GRAY)
contours_yellow, hierarchy_yellow = cv2.findContours(yellow_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
for i in range(len(contours_yellow)):
cnt = contours_yellow[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 1500:
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
yellow_cx.append(cx)
yellow_cy.append(cy)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = int(rect[1][0])
height = int(rect[1][1])
rozmiar = min(height, width)
print('yellow rozmiar', rozmiar)
if rozmiar < 57:
yellow_waga.append(1)
elif 57 <= rozmiar < 90:
yellow_waga.append(2)
elif rozmiar >= 90:
yellow_waga.append(3)
cv2.drawContours(full_result, [cnt], 0, kolory[7], 3)
# cv2.circle(full_result, (cx, cy), 2, kolory[7], 5)
cv2.drawContours(yellow_result, [box], 0, kolory[7], 3)
# cv2.circle(yellow_result, (cx, cy), 2, kolory[7], 5)
cv2.circle(figure_number, (cx, cy), 8, kolory[7], 1)
bottomLeftCornerOfText = (cx + 5, cy + 5)
cv2.putText(figure_number, "({},{},{})".format(cx, cy, area),
bottomLeftCornerOfText,
font,
fontScale,
kolory[7],
lineType)
"""GRUPOWANIE FIGUR + WYKRYWANIE DZIUREK"""
dziurki_img = img.copy()
contours_color = cv2.findContours(full_result_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1]
liczba_dziurek_figura = []
for i in range(len(contours_color)):
cnt = contours_color[i]
M = cv2.moments(cnt)
area = cv2.contourArea(cnt)
if area > 8000:
print('areaaa', area)
# x, y, w, h = cv2.boundingRect(cnt)
# cv2.rectangle(contours_color_img, (x, y), (x + w, y + h), kolory[m], 2)
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cx = (int(M['m10'] / M['m00']))
cy = (int(M['m01'] / M['m00']))
width = int(rect[1][0])
height = int(rect[1][1])
# print(rect)
# print(box)
src_pts = box.astype("float32")
dst_pts = np.array([[0, height - 1],
[0, 0],
[width - 1, 0],
[width - 1, height - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(img, M, (width, height))
'''
warped = cv2.resize(warped, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)
warped_gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(warped_gray, 20, 20)
circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1, 50,
param1=1, param2=20, minRadius=7, maxRadius=25)
'''
warped = cv2.resize(warped, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
warped_gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(warped_gray, 20, 20)
#cv2.imwrite("edges {}.jpg".format(m), edges)
circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1, 50,
param1=1, param2=13, minRadius=15, maxRadius=20)
if circles is not None:
m += 1
roi.append(warped)
cv2.drawContours(contours_color_img, [box], 0, kolory[m], 2)
cv2.drawContours(contours_color_img, [cnt], 0, kolory[m], -1)
red_fig1.append(0)
blue_fig1.append(0)
white_fig1.append(0)
gray_fig1.append(0)
yellow_fig1.append(0)
for j in range(len(red_cx)):
if np.all(contours_color_img[red_cy[j], red_cx[j]] == kolory[m]):
red_fig1[m - 1] += 1 * red_waga[j]
cv2.circle(figure_number, (red_cx[j], red_cy[j]), 5, kolory[m], -1)
for j in range(len(blue_cx)):
if np.all(contours_color_img[blue_cy[j], blue_cx[j]] == kolory[m]):
blue_fig1[m - 1] += 1 * blue_waga[j]
cv2.circle(figure_number, (blue_cx[j], blue_cy[j]), 5, kolory[m], -1)
for j in range(len(white_cx)):
if np.all(contours_color_img[white_cy[j], white_cx[j]] == kolory[m]):
white_fig1[m - 1] += 1 * white_waga[j]
cv2.circle(figure_number, (white_cx[j], white_cy[j]), 5, kolory[m], -1)
for j in range(len(gray_cx)):
if np.all(contours_color_img[gray_cy[j], gray_cx[j]] == kolory[m]):
gray_fig1[m - 1] += 1 * gray_waga[j]
cv2.circle(figure_number, (gray_cx[j], gray_cy[j]), 5, kolory[m], -1)
for j in range(len(yellow_cx)):
if np.all(contours_color_img[yellow_cy[j], yellow_cx[j]] == kolory[m]):
yellow_fig1[m - 1] += 1 * yellow_waga[j]
cv2.circle(figure_number, (yellow_cx[j], yellow_cy[j]), 5, kolory[m], -1)
circles = np.uint16(np.around(circles))
for j in circles[0, :]:
# draw the outer circle
cv2.circle(warped, (j[0], j[1]), j[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(warped, (j[0], j[1]), 2, (0, 0, 255), 3)
liczba_dziurek_figura.append(len(circles[0, :]))
bottomLeftCornerOfText = (cx, cy + 50)
cv2.putText(dziurki_img, "(Ilosc dziurek: {})".format(len(circles[0, :])),
bottomLeftCornerOfText,
font,
fontScale,
kolory[1],
lineType)
cv2.imwrite("roi {}.jpg".format(m), warped)
print('FIGURY WEJSCIOWE')
print('r', red_img1)
print('b', blue_img1)
print('w', white_img1)
print('g', gray_img1)
print('y', yellow_img1)
print('FIGURY WYJSCIOWE PRZED SORTOWANIEM')
print(' be', 'gn', 'rd', 'oe', 'cn', 'ma', 'yw', 'gy', 'le', 'bn', 'pe', 'vt', 'tq', 'sn', 'ny', 'io')
print('r', red_fig1)
print('b', blue_fig1)
print('w', white_fig1)
print('g', gray_fig1)
print('y', yellow_fig1)
print(liczba_dziurek_figura)
""" PRZYPISANIE ODPOWIEDNIEGO KONTURU DO FIGURY Z PLIKU """
suma_1_sortowanie = []
minimum1 = 0
wejsciowa_figura = []
dopasowane_klocki_red = []
dopasowane_klocki_blue = []
dopasowane_klocki_white = []
dopasowane_klocki_gray = []
dopasowane_klocki_yellow = []
dopasowana_liczba_dziurek = []
dopasowana_figura = []
niedopasowana_figura_wejsciowa = []
for i in range(0, len(red_img1)):
suma_1_sortowanie.clear()
wejsciowa_figura.append(i)
for j in range(0, len(red_fig1)):
suma_1_sortowanie.append(abs(int(red_img1[i]) - red_fig1[j]) + abs(int(blue_img1[i]) - blue_fig1[j]) +
abs(int(white_img1[i]) - white_fig1[j]) + abs(int(gray_img1[i]) - gray_fig1[j]) +
abs(int(yellow_img1[i]) - yellow_fig1[j]))
minimum1 = np.argmin(suma_1_sortowanie)
if min(suma_1_sortowanie) is 0:
dopasowane_klocki_red.append(red_fig1[minimum1])
dopasowane_klocki_blue.append(blue_fig1[minimum1])
dopasowane_klocki_white.append(white_fig1[minimum1])
dopasowane_klocki_gray.append(gray_fig1[minimum1])
dopasowane_klocki_yellow.append(yellow_fig1[minimum1])
dopasowana_liczba_dziurek.append(liczba_dziurek_figura[minimum1])
dopasowana_figura.append(minimum1)
else:
dopasowane_klocki_red.append('x')
dopasowane_klocki_blue.append('x')
dopasowane_klocki_white.append('x')
dopasowane_klocki_gray.append('x')
dopasowane_klocki_yellow.append('x')
dopasowana_liczba_dziurek.append('x')
niedopasowana_figura_wejsciowa.append(i)
niedopasowana_figura_wyjsciowa = list(set(wejsciowa_figura) - set(dopasowana_figura))
print('FIGURY WEJSCIOWE')
print('r', red_img1)
print('b', blue_img1)
print('w', white_img1)
print('g', gray_img1)
print('y', yellow_img1)
print('1 SORTOWANIE')
print('r', dopasowane_klocki_red)
print('b', dopasowane_klocki_blue)
print('w', dopasowane_klocki_white)
print('g', dopasowane_klocki_gray)
print('y', dopasowane_klocki_yellow)
print('dziurki', dopasowana_liczba_dziurek)
print('numer figury dopasowanej', dopasowana_figura)
print('niedopasowano figur wejsciowych o nr', niedopasowana_figura_wejsciowa)
print('niedopasowano figur wyjsciowych o nr', niedopasowana_figura_wyjsciowa)
print('------------2 SORTOWANIE-----------------')
# print('sumy', suma[i])
suma_2_sortowanie = []
minimum2 = 0
for i in range(0, len(niedopasowana_figura_wejsciowa)):
suma_2_sortowanie.clear()
for j in range(0, len(niedopasowana_figura_wyjsciowa)):
suma_2_sortowanie.append(abs(int(red_img1[niedopasowana_figura_wejsciowa[i]]) - red_fig1[niedopasowana_figura_wyjsciowa[j]]) +
abs(int(blue_img1[niedopasowana_figura_wejsciowa[i]]) - blue_fig1[niedopasowana_figura_wyjsciowa[j]]) +
abs(int(white_img1[niedopasowana_figura_wejsciowa[i]]) - white_fig1[niedopasowana_figura_wyjsciowa[j]]) +
abs(int(gray_img1[niedopasowana_figura_wejsciowa[i]]) - gray_fig1[niedopasowana_figura_wyjsciowa[j]]) +
abs(int(yellow_img1[niedopasowana_figura_wejsciowa[i]]) - yellow_fig1[niedopasowana_figura_wyjsciowa[j]]))
minimum2 = np.argmin(suma_2_sortowanie)
dopasowane_klocki_red[niedopasowana_figura_wejsciowa[i]] = red_fig1[niedopasowana_figura_wyjsciowa[minimum2]]
dopasowane_klocki_blue[niedopasowana_figura_wejsciowa[i]] = blue_fig1[niedopasowana_figura_wyjsciowa[minimum2]]
dopasowane_klocki_white[niedopasowana_figura_wejsciowa[i]] = white_fig1[niedopasowana_figura_wyjsciowa[minimum2]]
dopasowane_klocki_gray[niedopasowana_figura_wejsciowa[i]] = gray_fig1[niedopasowana_figura_wyjsciowa[minimum2]]
dopasowane_klocki_yellow[niedopasowana_figura_wejsciowa[i]] = yellow_fig1[niedopasowana_figura_wyjsciowa[minimum2]]
dopasowana_liczba_dziurek[niedopasowana_figura_wejsciowa[i]] = liczba_dziurek_figura[niedopasowana_figura_wyjsciowa[minimum2]]
print(suma_2_sortowanie)
print(minimum2)
print('----------FIGURY WEJSCIOWE-------------')
print('czerwony ', red_img1)
print('niebieski', blue_img1)
print('bialy ', white_img1)
print('szary ', gray_img1)
print('zolty ', yellow_img1)
print('------------2 SORTOWANIE---------------')
print('czerwony ', dopasowane_klocki_red)
print('niebieski', dopasowane_klocki_blue)
print('bialy ', dopasowane_klocki_white)
print('szary ', dopasowane_klocki_gray)
print('zolty ', dopasowane_klocki_yellow)
print('dziurki ', dopasowana_liczba_dziurek)
while True:
key_code = cv2.waitKey(10)
if key_code == 27:
# escape key pressed
break
# cv2.imshow('image', img)
# cv2.imwrite('full_result.jpg', full_result)
# cv2.imwrite('bg_result1.jpg', bg_result_filtr)
# cv2.imshow('bg_result_darken', bg_result_darken)
# cv2.imshow('bg_result_filtr', bg_result_filtr)
# cv2.imshow('bg_result_clahe', bg_result_clahe)
# cv2.imshow('mask clahe', bg_result_clahe)
# cv2.imshow('bg result', bg_result)
# cv2.imshow('foreground_result', foreground_result)
# cv2.imshow('full result', full_result)
# cv2.imshow('foreground_result white ', foreground_result_white)
# cv2.imwrite('foreground_result.jpg', foreground_result)
# cv2.imshow('fg result', fg_result)
# cv2.imshow('fg result1', fg_result1)
# cv2.imshow('fg result2', fg_result2)
# cv2.imshow('full result gray', full_result_gray)
# cv2.imwrite('red_canny.jpg', red_result)
# cv2.imwrite('blue_canny.jpg', blue_result)
# cv2.imwrite('white_canny.jpg', white_result)
# cv2.imwrite('gray_canny.jpg', gray_result)
# cv2.imwrite('yellow_canny.jpg', yellow_result)
# cv2.imshow('red result', red_result)
# cv2.imshow('blue result', blue_result)
# cv2.imshow('white result', white_result)
# cv2.imshow('gray result', gray_result)
# cv2.imshow('yellow result', yellow_result)
# cv2.imshow('contours img', contours_color_img)
cv2.imshow('dziurki', dziurki_img)
# cv2.imshow('figure number', figure_number)
cv2.destroyAllWindows()
|
995,628 | 6ed8e9edff29cb2177d7721361ee6ea92c630c40 | from datetime import datetime, timedelta
from flask import Flask, request, redirect
from flask.helpers import make_response, url_for
from flask_sqlalchemy import SQLAlchemy
import jwt
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = ''
app.config['SECRET_KEY'] = 'flasksecretkey'
db = SQLAlchemy(app)
token = ''
class UserTable(db.Model):
tablename = 'userr'
id = db.Column('id', db.Integer, primary_key = True)
login = db.Column('login', db.Unicode)
password = db.Column('password', db.Integer)
token = db.Column('login', db.Unicode)
def init(self, id, login, password, token):
self.id = id
self.login = login
self.password = password
self.token = token
@app.route('/login')
def login():
auth = request.authorization
select_this = UserTable.query.filter_by(login=auth).first()
if auth and auth.password == select_this.password:
return redirect(url_for('getToken', auth=auth))
return make_response('Could not find a user with login: ' + auth, 401, {'WWW-Authenticate': 'Basic realm="Login required'})
def token_required(var):
def wrapper(func):
if var != None:
return func
else:
return '<h1>Hello, could not verify the token</h1>'
return wrapper
@app.route('/getToken')
def getToken():
auth = request.args.get('auth')
token = jwt.encode({'user':auth.username, 'exp':datetime.utcnow() + timedelta(minutes=5)}, app.config['SECRET_KEY'])
return redirect(url_for('somethingElse', var=token))
@app.route('/protected')
@token_required
def somethingElse():
return '<h1>Hello world</h1>'
if __name__ == '__main':
app.run(debug=True)
|
995,629 | af9e4838d4b29a4f30de6630262e15304f014c0c | #!/usr/bin/env python
import os
import subprocess
import sys
import optparse
import gzip
import shutil
try:
import hashlib # New for python 2.5
sha = hashlib.sha1
except:
import sha
sha = sha.sha
CMDS = ['cat','q','partition','import','print']
TYPES = ['s3','fb','tsv.gz', 'cz']
delim = "\t"
def s3connect(db):
import sqlite3
connection = sqlite3.connect(db)#, isolation_level='EXCLUSIVE')
connection.text_factory = str
cursor = connection.cursor()
cursor.execute('PRAGMA journal = OFF') # Don't make a separate journal file
cursor.execute('PRAGMA cache_size = 1048576') # Cache more.
cursor.execute('PRAGMA synchronous = OFF') # Don't wait for disk sync.
cursor.execute('PRAGMA temp_store = 2') # Use memory for temp work.
cursor.execute('PRAGMA locking_mode = exclusive') # Don't spend time locking and unlocking a lot
return connection, cursor
def s3tablename(cursor, db=None):
# I assume there is only one table in the database.
if db:
db += "."
else:
db = ""
cursor.execute("SELECT name FROM %ssqlite_master WHERE type = 'table' LIMIT 1" % (db))
name = cursor.fetchone()
if not name:
print >>sys.stderr, "No table in input file"
sys.exit(-1)
name = name[0]
return name
def s3_cat(dbs, args):
# Take a list of databases on stdin and cat them together using the schema and table name found in the first database.
success = False
try:
connection, cursor = s3connect("s3")
first = True
for line in dbs:
cursor.execute("attach database '%s' as input;" % line)
if first:
table = s3tablename(cursor, 'input')
cursor.execute("create table %s as select * from input.%s;" % (table,table))
first = False
else:
cursor.execute("insert into %s select * from input.%s;" % (table, table))
cursor.execute('detach database input;')
connection.commit()
success = True
if not line: print >>sys.stderr, "db cat finishing with empty stdin"
except:
if not success:
os.unlink("s3")
raise
return 0
def iscolumnname(s):
if s[0].isalpha() and (s.replace('_','').isalpha()):
return True
else:
return False
class Query():
def __init__(self, qstr, columns):
"""Return a dictionary containing parsed information about the query.
inputcols is the set of input columns referenced.
where_str is the the where string (if any).
projection_str is the projection (select) string (if any).
projection_list is the list of projection_str elements.
parsed_projections is a list of tuples describing each element.
groupby is a list of columns to implicitly partition by (if any).
"""
qstr = qstr.strip()
where = qstr.lower().find("where ")
#Make sure it's a token and not part of column name like, say, "somewhere"
#Since "select" is implicit, "where" may be at the beginning of the string
if (where > 0 and qstr[where-1] == " "):
# Found a good where clause
self.where_str = qstr[where:].strip()
self.projection_str = qstr[:where].strip()
elif where == 0:
self.projection_str = ''
self.where_str = qstr.strip()
elif where == -1:
self.projection_str = qstr.strip()
self.where_str = ''
if not self.projection_str:
self.projection_str = ','.join(columns)
# Now decompose the projection list
if not self.projection_str:
self.projection_list = []
else:
ps = self.projection_str.split(',')
ps = [p.strip() for p in ps]
self.projection_list = ps
self.parsed_projections = []
#Now look for expressions
parsedp = []
self.outputcols = []
self.inputcols = set()
self.groupby = set()
aggregate = False
for p in self.projection_list:
aspos = p.lower().find(' as ')
alias = None
if aspos > 0:
alias = p[aspos+4:]
p = p[:aspos]
self.outputcols.append(alias)
if iscolumnname(p):
# Is a simple field name containing [a-z][a-z0-9_]*
if columns[0] != '*' and p not in columns:
print >>sys.stderr, "Invalid column name", p, " (", ','.join(columns), ")"
sys.exit(2)
self.parsed_projections.append(('column', p))
self.inputcols.add(p)
self.groupby.add(p)
if not alias: self.outputcols.append(p)
elif p.isdigit():
self.parsed_projections.append(('number', p))
if not alias: self.outputcols.append(p)
else:
if '(' in p and p[-1] == ')':
s = p.strip(')').split('(', 1)
fn = s[0].strip()
args = s[1].strip()
if fn and iscolumnname(fn) and (args == "" or args == "*" or iscolumnname(args)):
if fn in ["sum","min","max","avg","count"]:
# Aggregate function
self.parsed_projections.append(('aggregate', fn, args))
aggregate = True
else:
# Simple function
self.parsed_projections.append(('fn', fn, args))
if args != "" and args != "*":
self.groupby.add(args)
if args != "" and args != "*":
self.inputcols.add(args)
if not alias: self.outputcols.append(fn)
else:
# Must be some arithmetic expression instead.
# XXX. Go no further right now. Assume not aggregate
self.parsed_projections.append(('expr', p))
self.groupby.add(p)
if not alias: self.outputcols.append('expr')
if not aggregate:
self.groupby = []
self.inputcols = tuple(self.inputcols)
self.groupby = tuple(self.groupby)
#print >>sys.stderr, self.__dict__
def fbselect(db, args):
cols = fbcols(db)
q = Query(' '.join(args), cols)
return q
def fb_q(db, args):
q = fbselect(db, args)
# Ibis won't respond to a aggregates without a WHERE clause
if q.groupby and not q.where_str:
q.where_str = "WHERE %s=%s" % (q.groupby[0], q.groupby[0])
return subprocess.call(["ibis", "-q", "SELECT "+q.projection_str+" "+q.where_str, "-output-as-binary", "fb", "-d", db], stderr=sys.stderr, stdout=sys.stdout)
def s3select(db, args):
q = Query(' '.join(args), '*')
if q.groupby:
group = " GROUP BY " + ",".join(q.groupby)
else:
group = ""
conn,cursor = s3connect(db)
qstr = "select %s from %s %s %s" % (q.projection_str, s3tablename(cursor), q.where_str, group)
return conn, cursor, qstr
def s3_q(db, args):
conn,cursor,qstr = s3select(db, args)
qstr = "create table output.db as " + qstr
#print >>sys.stderr, qstr
cursor.execute("attach database 's3' as output;")
try:
cursor.execute(qstr)
for row in cursor:
# User may have done something with visible results
print "\t".join([str(i) for i in row])
conn.commit()
except:
os.unlink("s3")
raise
return 0
def usage():
print >>sys.stderr, "Usage: %s (%s) <db>" % (sys.argv[0], '|'.join(CMDS))
print >>sys.stderr, "Works on databases of the following types:"
print >>sys.stderr, "\ttsv.gz\tTab-separated variable (compressed)"
print >>sys.stderr, "\tcz\tDirectorying containing compressed columns"
print >>sys.stderr, "\ts3\tSQLite3 tables"
print >>sys.stderr, "\tfb\tFastbit column-oriented tables"
print >>sys.stderr, "\nCommand usage:"
print >>sys.stderr, "\timport <columns...> <db>\tImport text data into <db>"
print >>sys.stderr, "\tprint [<columns...>] <db>\tPrint some or all columns (or SQL aggregates) from <db>"
print >>sys.stderr, "\tcat <db>\t\t\tConcatenate list of databases given on stdin to output <db>"
print >>sys.stderr, "\tpartition [-n bins] [-m] <db>\tPartition input <db> into <bins> partitions"
print >>sys.stderr, "\tq <query> <db>\t\t\tSQL query on <db> into new database (named cz|s3|fb|tsv.gz)"
return 1
def checkdbtype(name):
for t in TYPES:
if name.endswith(t):
return t
if name.endswith("sqlite"):
return 's3'
print >>sys.stderr, "Unsupported db name suffix:", name
print >>sys.stderr, "Must be one of:", ' '.join(TYPES)
sys.exit(4)
def fb_partition(db, args, options):
import fastbit
# XXX need way to enumerate all columns or select *
qh = fastbit.Query(",".join(args), db, args[0] + '=' + args[0])
ncols = qh.get_result_columns()
rh = fastbit.ResultSet(qh)
#print >>sys.stderr, "Rows:", qh.get_result_rows()
while (rh.has_next() == 0): #More
vals = []
for i in xrange(0, ncols):
if rh.getString(i): vals.append(rh.getString(i).strip('"'))
elif rh.getInt(i): vals.append(rh.getInt(i))
elif rh.getLong(i): vals.append(rh.getLong(i))
elif rh.getDouble(i): vals.append(rh.getDouble(i))
elif rh.getBytes(i): vals.append(rh.getBytes(i))
else: print >>sys.stderr, "Unknown type for column", i
if vals:
print '\t'.join(vals)
def s3_partition(db, args, options):
"""Partition a single sqlite database into multiple databases
using the value of a user-supplied expression OR by time binning.
"""
import hashlib
import struct
qstr = ' '.join(args)
connection, cursor = s3connect(db)
name = s3tablename(cursor)
cursor.execute("SELECT sql FROM sqlite_master WHERE type = 'table'" + \
' AND name = ? LIMIT 1', (name,))
createsql = cursor.fetchone()[0]
if options.mod:
qstr = 'CAST((%s) AS int) - (CAST((%s) AS int) %% %d)' % \
(qstr, qstr, options.bins)
#print >>sys.stderr, 'SELECT *, (%s) AS _part_ FROM %s' % (qstr, name)
cursor.execute('SELECT *, (%s) AS _part_ FROM %s' %
(qstr, name))
outputs = {}
success = False
try:
for row in cursor:
part = row[-1]
hash = sha(str(part))
hash = struct.unpack_from("!Q",hash.digest())[0] % options.bins
if not outputs.has_key(hash):
oconn,ocur = s3connect(str(hash) + ".s3")
outputs[hash] = oconn,ocur
ocur.execute(createsql)
else:
oconn,ocur = outputs[hash]
query = 'INSERT INTO %s VALUES (%s)' % \
(name, ','.join(['?'] * (len(row)-1)))
ocur.execute(query, row[:-1])
success = True
finally:
for p in outputs:
oconn,ocur = outputs[p]
oconn.commit()
ocur.close()
oconn.close()
cursor.close()
connection.close()
if not success:
for p in outputs:
try:
os.unlink("%s.s3" % p)
except:
pass
def fb_import(db, cols, types):
import fastbit
fast = fastbit.FastBit()
while True:
colvals = []
for i in xrange(0, len(cols)):
colvals.append([])
batch = sys.stdin.readlines(1000000)
if not batch: break
for line in batch:
line = line.strip()
rowvals = line.split("\t")
rowvals += [None] * (len(cols) - len(rowvals))
#print line, rowvals, cols
for i in xrange(0, len(cols)):
if types[i].startswith('t'):
colvals[i].append(rowvals[i])
elif types[i].startswith('i'):
colvals[i].append(int(rowvals[i]))
elif types[i].startswith('f'):
colvals[i].append(float(rowvals[i]))
elif types[i].startswith('d'):
colvals[i].append(float(rowvals[i]))
elif types[i].startswith('b'):
colvals[i].append(rowvals[i])
else:
print >>sys.stderr, "Unsupported/unknown type:", types[i]
for i in xrange(0, len(cols)):
fast.add_values(cols[i], types[i], colvals[i])
try:
shutil.rmtree(db)
except:
pass
fast.flush_buffer(db)
fast.cleanup()
def s3_import(db, cols, types):
connection, cursor = s3connect(db)
cursor.execute('PRAGMA cache_size = 1048576')
cursor.execute('PRAGMA synchronous = OFF')
cursor.execute('PRAGMA temp_store = 2')
cursor.execute('CREATE TABLE %s(%s)' % ("db", ','.join(cols)))
for line in sys.stdin:
line = line.strip()
v = line.split(delim)
v += [None]*(len(cols) - len(v))
v = v[:len(cols)]
insert = 'INSERT INTO %s VALUES (%s)' % \
("db", ','.join(['?'] * len(v)))
cursor.execute(insert, v)
connection.commit()
cursor.close()
connection.close()
def fbcols(db):
# FastBit CAPI (and python bindings) don't let you enumerate columns
table = file(db + "/-part.txt")
incolumn = False
cols = []
for line in table:
line = line.strip()
if incolumn:
if line == "End Column":
incolumn =False
elif line.startswith("name = "):
kv = line.split(" = ")
cols.append(kv[1])
else:
if line == "Begin Column":
incolumn = True
return cols
def fbresultgen(ncols, rh):
#print >>sys.stderr, "Rows:", qh.get_result_rows()
while (rh.has_next() == 0): #More
vals = []
for i in xrange(0, ncols):
if rh.getString(i): vals.append(rh.getString(i).strip('"'))
elif rh.getInt(i): vals.append(rh.getInt(i))
elif rh.getLong(i): vals.append(rh.getLong(i))
elif rh.getDouble(i): vals.append(rh.getDouble(i))
elif rh.getBytes(i): vals.append(rh.getBytes(i))
else: print >>sys.stderr, "Unknown type for column", i
yield vals
def s3_print(db, args):
connection, cursor, qstr = s3select(db, args)
cursor.execute(qstr)
for row in cursor:
print '\t'.join([str(i) for i in row])
def fb_print(db, args):
import fastbit
q = fbselect(db, args)
#print >>sys.stderr, p, db, w
# Fastbit capi (Query method) doesn't implement aggregations (groupby), so just get the inputcols
qh = fastbit.Query(','.join(q.inputcols), db, q.where_str)
ncols = qh.get_result_columns();
rh = fastbit.ResultSet(qh)
# And then apply our internal aggregator to the results
for row in vquery(q.inputcols, q, fbresultgen(ncols, rh)):
print '\t'.join([str(v) for v in row])
def tsvcols(db):
try:
schema = file(db + ".schema")
return schema.readline().strip().split(delim)
except:
return None
def tsv_print(db, args):
if not args:
# Just display the whole file
subprocess.call(["zcat", "-f", db])
else:
cols = tsvcols(db)
q = Query(' '.join(args), cols)
for row in vquery(cols, q, tsvgenerator(db)):
print '\t'.join([str(i) for i in row])
def czcols(db):
return [i[:-3] for i in os.listdir(db)]
def cz_print(db, args):
q = Query(' '.join(args), czcols(db))
for row in vquery(q.inputcols, q, czgenerator(q.inputcols, db)):
print '\t'.join([str(i) for i in row])
def cz_q(db, args):
q = Query(' '.join(args), czcols(db))
czwrite("cz", q.outputcols, vquery(q.inputcols, q, czgenerator(q.inputcols, db)))
def tsv_q(db, args):
cols = tsvcols(db)
q = Query(' '.join(args), cols)
tsvwrite("tsv.gz", q.outputcols, vquery(cols, q, tsvgenerator(db)))
def czgenerator(cols, db):
"""Generate a series of tuples with the specified columns"""
cfh = []
if not cols:
cols = czcols(db)
for col in cols:
colfile = db + "/" + col + ".gz"
cfh.append(gzip.GzipFile(colfile))
for line in cfh[0]:
v = [line.strip()] + [c.readline().strip() for c in cfh[1:]]
yield v
def tsvgenerator(db):
for line in gzip.GzipFile(db):
yield line.strip().split(delim)
def to_number(s):
n = float(s)
if n.is_integer():
n = int(n)
return n
def vquery(cols, q, gen):
# Project certain columns
# cols parameter is the ordered list of columns that will be produced by the generator
groupbycolnums = [cols.index(a) for a in q.groupby]
partition = {}
for row in gen:
if q.groupby:
part = [row[i] for i in groupbycolnums]
part = tuple(part)
if not partition.has_key(part):
partition[part] = []
partition[part].append(row)
else:
vals = []
for a in q.parsed_projections:
# XXX add support for simple functions and expressions
assert(a[0] == 'column')
i = cols.index(a[1])
vals.append(row[i])
yield vals
for p in partition.values():
val = []
count = 0
for a in q.parsed_projections:
if a[0] == 'column':
i = cols.index(a[1])
val.append(p[0][i])
elif a[0] == 'aggregate':
op,arg = a[1:]
if op == "count":
val.append(str(len(p)))
else:
i = cols.index(arg)
vec = [to_number(v[i]) for v in p]
if op == "sum":
f = sum(vec)
elif op == "max":
f = max(vec)
elif op == "min":
f = min(vec)
elif op == "avg":
f = avg(vec)
else:
print >>sys.stderr, "Unsupported function", op
sys.exit(2)
val.append(f)
yield val
def czwrite(db, cols, gen):
os.mkdir(db)
cfh = []
for c in cols:
cfh.append(gzip.GzipFile(db + "/" + c + ".gz", "w"))
for vals in gen:
for i in xrange(0,len(cols)):
print >>cfh[i], vals[i]
def tsv_fo_generator(cols, fo):
for line in fo:
vals = line.strip().split(delim)
vals += [None] * (len(cols) - len(vals))
yield vals
def filelist_generator(fl, cols, gen):
for f in fl:
for row in gen(cols, f):
yield row
def cz_import(db, cols, types):
czwrite(db, cols, tsv_fo_generator(cols, sys.stdin))
def tsvwrite(db, cols, gen):
schema = file(db + ".schema", "w")
print >>schema, "\t".join(cols)
schema.close()
fh = gzip.GzipFile(db, "w")
for row in gen:
print >>fh, '\t'.join([str(i) for i in row])
fh.close()
def tsv_import(db, cols, types):
tsvwrite(db, cols, tsv_fo_generator(cols, sys.stdin))
def tsv_cat(dbs, args):
cols = tsvcols(dbs[0])
assert(cols)
tsvwrite("tsv.gz", cols, filelist_generator(dbs, cols, tsv_fo_generator))
def cz_cat(dbs, args):
cols = czcols(dbs[0])
assert(cols)
czwrite("cz", cols, filelist_generator(dbs, cols, cz_fo_generator))
def tsv_partition(db, args, options):
"""Partition an input file.
Split an inputfile into n pieces. By default, the first
whitespace-delimited field is used as the key. All lines with the
same key will be placed in the same output file. The -r option can be
used to specify a Perl-compatible regular expression that matches the
key. If the regex contains a group, then what matches the group is
the key; otherwise, the whole match is the key.
Output files are numbered 1 to n and created in the current directory.
"""
p = optparse.OptionParser()
p.add_option("-r", "--regex", help="Regex that matches the key portion of input lines")
p.add_option("-f", "--field", default="1", help="Use field # or range (numbered starting with 1).")
(localoptions, args) = p.parse_args(args)
#print >>sys.stderr, "Writing to", ofile
if localoptions.regex:
localoptions.regex = re.compile(options.regex)
if localoptions.field:
if "-" in localoptions.field:
localoptions.field = localoptions.field.split("-")
localoptions.field = [int(i)-1 for i in localoptions.field]
localoptions.field[1] += 1
else:
localoptions.field = [int(localoptions.field) - 1, int(localoptions.field)]
files = []
for i in range(0, options.bins):
fname = str(i+1) + ".tsv.gz"
files.append(gzip.GzipFile(fname, "w"))
if db.endswith("gz"):
f = gzip.GzipFile(db)
else:
f = file(db)
for line in f:
if localoptions.regex:
key = localoptions.regex.search(line)
if key:
g = key.groups()
if len(g):
key = g[0]
else:
key = key.group(0)
else:
print >>sys.stderr, "Key not found in line:", line.rstrip()
else:
words= line.split("\t")
#print words[localoptions.field[0]:localoptions.field[1]]
key = words[localoptions.field[0]:localoptions.field[1]]
if options.mod:
i = int(key) % options.bins
else:
# Hash
i = int(sha(str(key)).hexdigest(), 16) % options.bins
files[i].write(line)
for f in files: f.close()
def main():
if len(sys.argv) < 2:
return usage()
args = sys.argv[1:]
cmd = args.pop(0)
if cmd not in CMDS:
return usage()
if cmd == 'cat':
# cat takes list of files on STDIN
dbs = [i.strip() for i in sys.stdin]
types = set()
for d in dbs:
types.add(checkdbtype(d))
if not types:
print >>sys.stderr, "No inputs specified on STDIN"
return 3
if len(types) > 1:
print >>sys.stderr, "Cannot mix db types:", str(types)
return 4
dbtype = list(types)[0]
return dispatch[dbtype]['cat'](dbs, args)
else:
# last argument is a db file (for ease of use with fm or xargs)
if len(args) < 1:
print >>sys.stderr, "Usage:", sys.argv[0], cmd, "<query> <db>"
return 1
db = args.pop()
dbtype = checkdbtype(db)
if cmd == 'partition':
parser = optparse.OptionParser(usage="db partition [options] <query>")
parser.add_option('-n', '--bins', action='store', type='int', default=256,
help='number of partitions to create')
parser.add_option('-m', '--mod', action='store_true',
help='bin values into partitions of fixed size')
options, args = parser.parse_args(args=args)
if len(args) < 1:
parser.error('no query specified')
return dispatch[dbtype][cmd](db, args, options)
elif cmd == 'q':
if len(args) < 1:
print >>sys.stderr, "Usage:", sys.argv[0], cmd, "<query> <db>"
return 1
return dispatch[dbtype][cmd](db, args)
elif cmd == 'print':
return dispatch[dbtype][cmd](db, args)
elif cmd == 'import':
if len(args) < 1:
print >>sys.stderr, "Usage:", sys.argv[0], cmd, "<columns> <db>"
return 1
cols = []
types = []
for a in args:
x = a.split(':')
cols.append(x[0])
if len(x) > 1:
types.append(x[1])
else:
types.append(None)
return dispatch[dbtype][cmd](db, cols, types)
else:
assert(0)
assert(0)
dispatch = {
's3': {'q': s3_q, 'partition': s3_partition, 'cat': s3_cat, 'import': s3_import, 'print': s3_print},
'fb': {'q': fb_q, 'partition': None, 'cat': None, 'import': fb_import, 'print': fb_print},
'tsv.gz': {'q': tsv_q, 'partition': tsv_partition, 'cat': tsv_cat, 'import': tsv_import, 'print': tsv_print},
'cz': {'q': cz_q, 'partition': None, 'cat': cz_cat, 'import': cz_import, 'print': cz_print},
}
sys.exit(main())
|
995,630 | bdf7a6aca1a12d98f8050f477e3a3565fafa1831 |
def foo2():
x=100
print "Local:" ,locals()
print x
if __name__ == '__main__':
print "this is the main program"
foo2()
#to import a function you must call the file from the correct directory then call the specific function |
995,631 | e7e23f45eb6d53050273d4c0f66c5f3820b2745a | import pytest
from flask import url_for
from app import create_app, mail
from app.models import db, User, Store, Product
@pytest.fixture
def app():
return create_app('test')
@pytest.fixture
def init_database():
db.create_all()
yield
db.drop_all()
@pytest.fixture
def authenticated_request(client):
new_user = User.create("test@example.com", "examplepass")
store = Store(name="Test Store", user=new_user)
db.session.add(store)
db.session.commit()
response = client.post(url_for('user.login'), data={
'email': "test@example.com",
'password': "examplepass"
}, follow_redirects=True)
yield new_user
@pytest.fixture
def mail_outbox():
with mail.record_messages() as outbox:
yield outbox
@pytest.fixture
def user_with_product():
new_user = User.create("test@example.com", "pass")
store = Store(name="Test Store", user=new_user)
product = Product(name='Test Product', description='a product', store=store)
db.session.add(product)
db.session.commit()
yield new_user
# pytest --cov-report term-missing --cov=app
|
995,632 | ca50b26b3fd05b82f8ac27876129bf846784a0d0 | # -*- coding: utf-8 -*-
# Author: WangZi
# Mail: wangzitju@163.com
# Created Time:
###########################
# template method
# processing framework is defined in father class
# while concrete prcessing method is implemented in child class
from abc import ABCMeta, abstractmethod
class AbstractDisplay(metaclass=ABCMeta):
@abstractmethod
def close(self):
pass
@abstractmethod
def open(self):
pass
@abstractmethod
def print(self):
pass
def display(self):
self.open()
for _ in range(5):
self.print()
self.close()
class CarDisplay(AbstractDisplay):
def __init__(self, ch):
self.ch = ch
def open(self):
print("<<", end='')
def print(self):
print(self.ch, end='')
def close(self):
print(">>", end='\n')
class StringDisplay(AbstractDisplay):
def __init__(self, string):
self.string = string
def open(self):
self.printLine()
def close(self):
self.printLine()
def print(self):
print('|{}|'.format(self.string))
def printLine(self):
print('+' + ''.join(['-'] * len(self.string)) + '+')
if __name__ == '__main__':
d1 = CarDisplay('H')
d2 = StringDisplay('Hello, World.')
d1.display()
d2.display()
|
995,633 | 00f2df37799d68cddc29ec7f086f2bf13c71bc05 | from collections import defaultdict
dwarfs = {}
dwarf_name_hat_color_count = defaultdict(int)
while True:
tokens = input()
if tokens == "Once upon a time":
break
tokens = tokens.split(" <:> ")
dwarf_name, dwarf_hat_color, dwarf_physics = tokens[0], tokens[1], int(tokens[2])
key = dwarf_name, dwarf_hat_color
if key in dwarfs.keys():
if dwarfs[key] < dwarf_physics:
dwarfs[key] = dwarf_physics
else:
dwarfs[key] = dwarf_physics
dwarf_name_hat_color_count[dwarf_hat_color] += 1
for key, value in sorted(dwarfs.items(), key=lambda item: (-item[1], -dwarf_name_hat_color_count[item[0][1]])):
name, hat_color = key
physics = value
print(f"({hat_color}) {name} <-> {physics}")
|
995,634 | f9988acc80e8c3f89b44e88509f20f97b807e0f6 | import math
import euler
from sets import Set
squares = {}
matches = {}
for i in range(1000):
squares[i*i] = i
for p in range(3, 1001):
print p
matchCount = []
for a in range(1, 998):
if a >= p-2:
break
for b in range(1, 998):
if a+b >= p-1:
break
c = a*a + b*b
if c not in squares:
continue
c = int(math.sqrt(c))
if (a + b + c) != p:
continue
results = [a, b, c]
results.sort()
x = results.pop()
resultstr = str(x)
x = results.pop()
resultstr += "+"+str(x)
x = results.pop()
resultstr += "+"+str(x)
if resultstr not in matchCount:
matchCount.append(resultstr)
matches[p] = len(matchCount)
biggestVal = 0
for key, val in matches.items():
if val > biggestVal:
print key, val
biggestVal = val
|
995,635 | b66ff58928d75dbe792dcdf08223c57ca8d96931 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Event
from textblob import TextBlob
from collections import deque
import plotly.graph_objs as go
neg_polarity = []
neg_subjectivity = []
with open('negative.txt','r') as n:
negative_file = n.read().split('\n')
for line in negative_file:
neg_analysis = TextBlob(line)
neg_polarity.append(neg_analysis.sentiment.polarity)
neg_subjectivity.append(neg_analysis.sentiment.subjectivity)
polarity_round = [round(i,2) for i in neg_polarity]
subjectivity_round = [round(i,2) for i in neg_subjectivity]
X = [i for i in range(50)]
Y1 = []
Y2 = []
#X.append(1)
Y1.append(1)
Y2.append(1)
app = dash.Dash(__name__)
app.layout = html.Div([
html.H3('Negative Polarity and Subjectivity'),
dcc.Graph(id='polarity_subjectivity_live',animate=True),
dcc.Interval(id='interval_component',interval=1000)
])
@app.callback(
Output('polarity_subjectivity_live','figure'),
events=[Event('interval_component','interval')]
)
def live_polarity_subjectivity():
X.append(X[-1]+1)
Y1 = polarity_round
Y2 = subjectivity_round
traces = []
traces.append(go.Scatter(
x=list(X),
y=Y1,
name='polarity',
mode='lines+markers'
)
)
traces.append(go.Scatter(
x=list(X),
y=Y2,
name='subjectivity',
mode='lines+markers'
)
)
if len(X) >= 10:
#X.pop(0)
polarity_round.pop(0)
subjectivity_round.pop(0)
return {'data' : traces}
if __name__ == '__main__':
app.run_server(debug=True)
|
995,636 | 8961d02da6d859b4b421fa698370f5b63162734f | # Generated by Django 2.0.6 on 2018-12-02 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='supplier',
name='phone',
field=models.CharField(default='000000000', max_length=15),
),
]
|
995,637 | 1ad625ba98fb789ba96022d85d8691491a9d69a2 | #!/bin/python3
import sys
import itertools
n = int(input().strip())
a = [int(a_temp) for a_temp in input().strip().split(' ')]
count = 0
pairs = list(itertools.product(a, a))
for pair in pairs:
if abs(pair[0] - pair[1]) > 1:
pairs.remove(pair)
print(pairs) |
995,638 | 403e038af3e898a8342303e75c6af9d53a13be24 | class BinaryTree(object):
def __init__(self, value):
self.value = value
self.leftChild = None
self.rightChild = None
def insertLeft(self, newData):
if self.leftChild is None:
self.leftChild = BinaryTree(newData)
else:
t = BinaryTree(newData)
t.leftChild = self.leftChild
self.leftChild = t
def insertRight(self, newData):
if self.rightChild is None:
self.rightChild = BinaryTree(newData)
else:
t = BinaryTree(newData)
t.rightChild = self.rightChild
self.rightChild = t
def inOrder(self, tree_val = None):
if tree_val is None:
tree_val = []
if self.leftChild:
self.leftChild.inOrder(tree_val)
tree_val.append(int(self.value))
if self.rightChild:
self.rightChild.inOrder(tree_val)
def BST_check(arr):
return arr == sorted(arr)
root_node = BinaryTree("34")
root_node.insertLeft("23")
root_node.insertRight("54")
node_b = root_node.leftChild
node_b.insertLeft("12")
node_b.insertRight("30")
node_c = root_node.rightChild
node_c.insertLeft("45")
node_c.insertRight("60")
arr = []
root_node.inOrder(arr)
print(BST_check(arr))
|
995,639 | ca2c0c9b90c828540dfcf9c04dcc5eeaed506a72 | s=input()
k=int(input())
n=len(s)
num=[0]*n
for i in range(n):
if s[i]!="a":
num[i]=123-ord(s[i])
if sum(num)>=k:
ans=""
for i in range(n-1):
if k>=num[i]:
k-=num[i]
ans+="a"
else:
ans+=s[i]
if k>=num[-1]:
ans+="a"
else:
ans+=chr(97+((ord(s[-1])-97+k)%26))
else:
k-=sum(num)
ans="a"*(n-1)
ans+=chr(97+(k%26))
print(ans)
|
995,640 | db5698eb1adacbf0e5b9fae34f1e52b443fc32a9 | #!/usr/bin/env python
import roslib
roslib.load_manifest('cram_ptu')
import rospy
import actionlib
import tf
import cogman_msgs.msg
class TFRelayAction(object):
# create messages that are used to publish result
_result = cogman_msgs.msg.TFRelayResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, cogman_msgs.msg.TFRelayAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
self._broadcaster = tf.TransformBroadcaster()
def execute_cb(self, goal):
# publish info to the console for the user
rospy.loginfo('%s: Executing, publishing further transform to TF' % self._action_name)
# let's broadcast
trans = goal.transform.transform.translation
rot = goal.transform.transform.rotation
self._broadcaster.sendTransform((trans.x, trans.y, trans.z),
(rot.x, rot.y, rot.z, rot.w),
rospy.Time.now(),
goal.transform.child_frame_id,
goal.transform.header.frame_id)
self._as.set_succeeded(self._result)
if __name__ == '__main__':
rospy.init_node('tf_relay')
TFRelayAction(rospy.get_name())
rospy.spin()
|
995,641 | 728772e9bdf0c5d16c402a8810f61b0bcd8dbefe | #!/bin/python3
import sys #system functions and parameters
from datetime import datetime as dt #import with alias
print(dt.now())
my_name = "Arjun"
print(my_name[0])
print(my_name[-1])
sentence = "This is a sentence."
print(sentence[:4])
print(sentence.split())
sentence_split = sentence.split()
sentence_join = ' '.join(sentence_split)
print(sentence_join)
quote = "He said, \"give me all your money\""
print(quote)
too_much_space = " hello "
print(too_much_space.strip()) #remove extra space
print("A" in "Apple")
letter = "A"
word = "Apple"
print(letter.lower() in word.lower()) #case insensitive search
movie = "The Hangover"
print("My favorite movie is {}.".format(movie)) #Placeholders
#Dictionary - key/value pairs {}
drinks = {"White Russian": 7, "Old Fashion": 10, "Lemon Drop": 8} #drink is key, price is value
print(drinks)
employees = {'Finance': ["Bob", "Linda", "Tina"], "IT": ["Gene", "Louise", "Teddy"], "HR": ["Jimmy Jr.", "Mort"]}
print(employees)
employees['Legal'] = ["Mr. Frond"] #add new key:value pair
print(employees)
employees.update({"Sales": ["Andie", "Ollie"]}) #add new key:value pair
print(employees)
drinks['White Russian'] = 8
print(drinks)
print(drinks.get("White Russian"))
|
995,642 | 139d00ea298d8a1ae0aa000399d0b43724df6a08 | import abc
import typing
import torch
import brats.functional as F
ONE_CLASS = 1
CLASS_DIM = 1
class Loss(abc.ABC):
"""
Interface for loss
"""
@abc.abstractmethod
def __call__(self, prediction: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class DiceLoss(Loss):
"""
Compute Dice Loss calculated as 1 - dice_score for each class and sum the
result..
The loss will be averaged across batch elements.
"""
def __init__(self, epsilon: float = 1e-6):
"""
Args:
epsilon: smooth factor to prevent division by zero
"""
super().__init__()
self.epsilon = epsilon
def __call__(self, prediction: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
"""
Args:
prediction: output of a network, expected to be already binarized.
Dimensions - (Batch, Channels, Depth, Height, Width)
target: labels. The classes (channels) should be one-hot encoded
Dimensions - (Batch, Channels, Depth, Height, Width)
Returns:
torch.Tensor: Computed Dice Loss
"""
return torch.sum(1 - F.dice(prediction, target, self.epsilon))
class NLLLossOneHot(Loss):
"""
Compute negative log-likelihood loss for one hot encoded targets.
"""
def __init__(self, epsilon: float = 1e-12, *args, **kwargs):
"""
Args:
epsilon: Epsilon value to prevent log(0)
*args: Arguments accepted by the torch.nn.NLLLoss() constructor
**kwargs: Keyword arguments accepted by the torch.nn.NLLLoss()
constructor
"""
super().__init__()
self.epsilon = epsilon
self.nll_loss = torch.nn.NLLLoss(*args, **kwargs)
def __call__(self, prediction: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
"""
Args:
prediction: output of a network, expected to be already binarized.
Dimensions - (Batch, Channels, Depth, Height, Width)
target: labels. The classes (channels) should be one-hot encoded
Dimensions - (Batch, Channels, Depth, Height, Width)
Returns:
torch.Tensor: Computed NLL Loss
"""
target = torch.argmax(target, dim=CLASS_DIM)
prediction = torch.log(prediction + self.epsilon)
return self.nll_loss(prediction, target)
class ComposedLoss(Loss):
"""
Compute weighted sum on provided losses
"""
def __init__(self, losses: typing.List[Loss],
weights: typing.List[float] = None):
"""
Args:
losses: A list of losses to use.
weights: Weights for each loss.
"""
if weights is not None:
assert len(losses) == len(
weights), "Number of losses should be the same as number of weights"
self.weights = weights
else:
self.weights = [1. for _ in range(len(losses))]
self.losses = losses
def __call__(self, prediction: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
"""
Args:
prediction: output of a network, expected to be already binarized.
Dimensions - (Batch, Channels, Depth, Height, Width)
target: labels. The classes (channels) should be one-hot encoded
Dimensions - (Batch, Channels, Depth, Height, Width)
Returns:
Weighted sum of provided losses
"""
weighted_sum = torch.tensor(0., device=prediction.device)
for i, loss in enumerate(self.losses):
weighted_sum += loss(prediction, target) * self.weights[i]
return weighted_sum
|
995,643 | c362caacf0df78ad95783f500f6a85bbc4640bb4 | # Copyright 2021 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose, Tuan Chien
from __future__ import annotations
import os
import random
import urllib.parse
import uuid
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, List, Tuple
import math
import pandas as pd
import pendulum
from click.testing import CliRunner
from faker import Faker
from pendulum import DateTime
from academic_observatory_workflows.config import schema_folder, test_fixtures_folder
from observatory.platform.bigquery import bq_find_schema
from observatory.platform.files import load_jsonl
from observatory.platform.observatory_environment import Table, bq_load_tables
LICENSES = ["cc-by", None]
EVENT_TYPES = [
"f1000",
"stackexchange",
"datacite",
"twitter",
"reddit-links",
"wordpressdotcom",
"plaudit",
"cambia-lens",
"hypothesis",
"wikipedia",
"reddit",
"crossref",
"newsfeed",
"web",
]
OUTPUT_TYPES = [
"journal_articles",
"book_sections",
"authored_books",
"edited_volumes",
"reports",
"datasets",
"proceedings_article",
"other_outputs",
]
FUNDREF_COUNTRY_CODES = ["usa", "gbr", "aus", "can"]
FUNDREF_REGIONS = {"usa": "Americas", "gbr": "Europe", "aus": "Oceania", "can": "Americas"}
FUNDING_BODY_TYPES = [
"For-profit companies (industry)",
"Trusts, charities, foundations (both public and private)",
"Associations and societies (private and public)",
"National government",
"Universities (academic only)",
"International organizations",
"Research institutes and centers",
"Other non-profit organizations",
"Local government",
"Libraries and data archiving organizations",
]
FUNDING_BODY_SUBTYPES = {
"For-profit companies (industry)": "pri",
"Trusts, charities, foundations (both public and private)": "pri",
"Associations and societies (private and public)": "pri",
"National government": "gov",
"Universities (academic only)": "gov",
"International organizations": "pri",
"Research institutes and centers": "pri",
"Other non-profit organizations": "pri",
"Local government": "gov",
"Libraries and data archiving organizations": "gov",
}
@dataclass
class Repository:
"""A repository."""
name: str
endpoint_id: str = None
pmh_domain: str = None
url_domain: str = None
category: str = None
ror_id: str = None
def _key(self):
return self.name, self.endpoint_id, self.pmh_domain, self.url_domain, self.category, self.ror_id
def __eq__(self, other):
if isinstance(other, Repository):
return self._key() == other._key()
raise NotImplementedError()
def __hash__(self):
return hash(self._key())
@staticmethod
def from_dict(dict_: Dict):
name = dict_.get("name")
endpoint_id = dict_.get("endpoint_id")
pmh_domain = dict_.get("pmh_domain")
url_domain = dict_.get("url_domain")
category = dict_.get("category")
ror_id = dict_.get("ror_id")
return Repository(
name,
endpoint_id=endpoint_id,
pmh_domain=pmh_domain,
url_domain=url_domain,
category=category,
ror_id=ror_id,
)
@dataclass
class Institution:
"""An institution.
:param id: unique identifier.
:param name: the institution's name.
:param grid_id: the institution's GRID id.
:param ror_id: the institution's ROR id.
:param country_code: the institution's country code.
:param country_code_2: the institution's country code.
:param subregion: the institution's subregion.
:param papers: the papers published by the institution.
:param types: the institution type.
:param country: the institution country name.
:param coordinates: the institution's coordinates.
"""
id: int
name: str = None
grid_id: str = None
ror_id: str = None
country_code: str = None
country_code_2: str = None
region: str = None
subregion: str = None
papers: List[Paper] = None
types: str = None
country: str = None
coordinates: str = None
repository: Repository = None
def date_between_dates(start_ts: int, end_ts: int) -> DateTime:
"""Return a datetime between two timestamps.
:param start_ts: the start timestamp.
:param end_ts: the end timestamp.
:return: the DateTime datetime.
"""
r_ts = random.randint(start_ts, end_ts - 1)
return pendulum.from_timestamp(r_ts)
@dataclass
class Paper:
"""A paper.
:param id: unique identifier.
:param doi: the DOI of the paper.
:param title: the title of the paper.
:param published_date: the date the paper was published.
:param output_type: the output type, see OUTPUT_TYPES.
:param authors: the authors of the paper.
:param funders: the funders of the research published in the paper.
:param journal: the journal this paper is published in.
:param publisher: the publisher of this paper (the owner of the journal).
:param events: a list of events related to this paper.
:param cited_by: a list of papers that this paper is cited by.
:param fields_of_study: a list of the fields of study of the paper.
:param license: the papers license at the publisher.
:param is_free_to_read_at_publisher: whether the paper is free to read at the publisher.
:param repositories: the list of repositories where the paper can be read.
"""
id: int
doi: str = None
title: str = None
type: str = None
published_date: pendulum.Date = None
output_type: str = None
authors: List[Author] = None
funders: List[Funder] = None
journal: Journal = None
publisher: Publisher = None
events: List[Event] = None
cited_by: List[Paper] = None
fields_of_study: List[FieldOfStudy] = None
publisher_license: str = None
publisher_is_free_to_read: bool = False
repositories: List[Repository] = None
in_scihub: bool = False
in_unpaywall: bool = True
@property
def access_type(self) -> AccessType:
"""Return the access type for the paper.
:return: AccessType.
"""
gold_doaj = self.in_unpaywall and self.journal.license is not None
gold = self.in_unpaywall and (gold_doaj or (self.publisher_is_free_to_read and self.publisher_license is not None and not gold_doaj))
hybrid = self.in_unpaywall and self.publisher_is_free_to_read and self.publisher_license is not None and not gold_doaj
bronze = self.in_unpaywall and self.publisher_is_free_to_read and self.publisher_license is None and not gold_doaj
green = self.in_unpaywall and len(self.repositories) > 0
green_only = self.in_unpaywall and green and not gold_doaj and not self.publisher_is_free_to_read
oa = self.in_unpaywall and (gold or hybrid or bronze or green)
black = self.in_scihub # Add LibGen etc here
return AccessType(
oa=oa,
green=green,
gold=gold,
gold_doaj=gold_doaj,
hybrid=hybrid,
bronze=bronze,
green_only=green_only,
black=black,
)
@property
def oa_coki(self) -> COKIOpenAccess:
"""Return the access type for the paper.
:return: AccessType.
"""
at = self.access_type
open = at.oa
closed = not open
publisher = at.gold_doaj or at.hybrid or at.bronze
other_platform = at.green
publisher_only = publisher and not other_platform
both = publisher and other_platform
other_platform_only = at.green_only
# Publisher categories
oa_journal = at.gold_doaj
hybrid = at.hybrid
no_guarantees = at.bronze
publisher_categories = PublisherCategories(oa_journal, hybrid, no_guarantees)
# Other platform categories
preprint = self.in_unpaywall and any([repo.category == "Preprint" for repo in self.repositories])
domain = self.in_unpaywall and any([repo.category == "Domain" for repo in self.repositories])
institution = self.in_unpaywall and any([repo.category == "Institution" for repo in self.repositories])
public = self.in_unpaywall and any([repo.category == "Public" for repo in self.repositories])
aggregator = self.in_unpaywall and any([repo.category == "Aggregator" for repo in self.repositories])
other_internet = self.in_unpaywall and any([repo.category == "Other Internet" for repo in self.repositories])
unknown = self.in_unpaywall and any([repo.category == "Unknown" for repo in self.repositories])
other_platform_categories = OtherPlatformCategories(
preprint, domain, institution, public, aggregator, other_internet, unknown
)
return COKIOpenAccess(
open,
closed,
publisher,
other_platform,
publisher_only,
both,
other_platform_only,
publisher_categories,
other_platform_categories,
)
@dataclass
class AccessType:
"""The access type of a paper.
:param oa: whether the paper is open access or not.
:param green: when the paper is available in an institutional repository.
:param gold: when the paper is an open access journal or (it is not in an open access journal and is free to read
at the publisher and has an open access license).
:param gold_doaj: when the paper is an open access journal.
:param hybrid: where the paper is free to read at the publisher, it has an open access license and the journal is
not open access.
:param bronze: when the paper is free to read at the publisher website however there is no license.
:param green_only: where the paper is not free to read from the publisher, however it is available at an
:param black: where the paper is available at SciHub.
institutional repository.
"""
oa: bool = None
green: bool = None
gold: bool = None
gold_doaj: bool = None
hybrid: bool = None
bronze: bool = None
green_only: bool = None
black: bool = None
@dataclass
class COKIOpenAccess:
"""The COKI Open Access types.
:param open: .
:param closed: .
:param publisher: .
:param other_platform: .
:param publisher_only: .
:param both: .
:param other_platform_only: .
:param publisher_categories: .
:param other_platform_categories: .
"""
open: bool = None
closed: bool = None
publisher: bool = None
other_platform: bool = None
publisher_only: bool = None
both: bool = None
other_platform_only: bool = None
publisher_categories: PublisherCategories = None
other_platform_categories: OtherPlatformCategories = None
@dataclass
class PublisherCategories:
"""The publisher open subcategories.
:param oa_journal: .
:param hybrid: .
:param no_guarantees: .
"""
oa_journal: bool = None
hybrid: bool = None
no_guarantees: bool = None
@dataclass
class OtherPlatformCategories:
"""The other platform open subcategories
:param preprint: .
:param domain: .
:param institution: .
:param public: .
:param aggregator: .
:param other_internet: .
:param unknown: .
"""
preprint: bool = None
domain: bool = None
institution: bool = None
public: bool = None
aggregator: bool = None
other_internet: bool = None
unknown: bool = None
@dataclass
class Author:
"""An author.
:param id: unique identifier.
:param name: the name of the author.
:param institution: the author's institution.
"""
id: int
name: str = None
institution: Institution = None
@dataclass
class Funder:
"""A research funder.
:param id: unique identifier.
:param name: the name of the funder.
:param doi: the DOI of the funder.
:param country_code: the country code of the funder.
:param region: the region the funder is located in.
:param funding_body_type: the funding body type, see FUNDING_BODY_TYPES.
:param funding_body_subtype: the funding body subtype, see FUNDING_BODY_SUBTYPES.
"""
id: int
name: str = None
doi: str = None
country_code: str = None
region: str = None
funding_body_type: str = None
funding_body_subtype: str = None
@dataclass
class Publisher:
"""A publisher.
:param id: unique identifier.
:param name: the name of the publisher.
:param doi_prefix: the publisher DOI prefix.
:param journals: the journals owned by the publisher.
"""
id: int
name: str = None
doi_prefix: int = None
journals: List[Journal] = None
@dataclass
class FieldOfStudy:
"""A field of study.
:param id: unique identifier.
:param name: the field of study name.
:param level: the field of study level.
"""
id: int
name: str = None
level: int = None
@dataclass
class Journal:
"""A journal
:param id: unique identifier.
:param name: the journal name.
:param name: the license that articles are published under by the journal.
"""
id: int
name: str = None
license: str = None
@dataclass
class Event:
"""An event.
:param source: the source of the event, see EVENT_TYPES.
:param event_date: the date of the event.
"""
source: str = None
event_date: DateTime = None
InstitutionList = List[Institution]
AuthorList = List[Author]
FunderList = List[Funder]
PublisherList = List[Publisher]
PaperList = List[Paper]
FieldOfStudyList = List[FieldOfStudy]
EventsList = List[Event]
RepositoryList = List[Repository]
@dataclass
class ObservatoryDataset:
"""The generated observatory dataset.
:param institutions: list of institutions.
:param authors: list of authors.
:param funders: list of funders.
:param publishers: list of publishers.
:param papers: list of papers.
:param fields_of_study: list of fields of study.
:param fields_of_study: list of fields of study.
"""
institutions: InstitutionList
authors: AuthorList
funders: FunderList
publishers: PublisherList
papers: PaperList
fields_of_study: FieldOfStudyList
repositories: RepositoryList
def make_doi(doi_prefix: int):
"""Makes a randomised DOI given a DOI prefix.
:param doi_prefix: the DOI prefix.
:return: the DOI.
"""
return f"10.{doi_prefix}/{str(uuid.uuid4())}"
def make_observatory_dataset(
institutions: List[Institution],
repositories: List[Repository],
n_funders: int = 5,
n_publishers: int = 5,
n_authors: int = 10,
n_papers: int = 100,
n_fields_of_study_per_level: int = 5,
) -> ObservatoryDataset:
"""Generate an observatory dataset.
:param institutions: a list of institutions.
:param repositories: a list of known repositories.
:param n_funders: the number of funders to generate.
:param n_publishers: the number of publishers to generate.
:param n_authors: the number of authors to generate.
:param n_papers: the number of papers to generate.
:param n_fields_of_study_per_level: the number of fields of study to generate per level.
:return: the observatory dataset.
"""
faker = Faker()
funder_doi_prefix = 1000
funders = make_funders(n_funders=n_funders, doi_prefix=funder_doi_prefix, faker=faker)
publisher_doi_prefix = funder_doi_prefix + len(funders)
publishers = make_publishers(n_publishers=n_publishers, doi_prefix=publisher_doi_prefix, faker=faker)
fields_of_study = make_fields_of_study(n_fields_of_study_per_level=n_fields_of_study_per_level, faker=faker)
authors = make_authors(n_authors=n_authors, institutions=institutions, faker=faker)
papers = make_papers(
n_papers=n_papers,
authors=authors,
funders=funders,
publishers=publishers,
fields_of_study=fields_of_study,
repositories=repositories,
faker=faker,
)
return ObservatoryDataset(institutions, authors, funders, publishers, papers, fields_of_study, repositories)
def make_funders(*, n_funders: int, doi_prefix: int, faker: Faker) -> FunderList:
"""Make the funders ground truth dataset.
:param n_funders: number of funders to generate.
:param doi_prefix: the DOI prefix for the funders.
:param faker: the faker instance.
:return: a list of funders.
"""
funders = []
for i, _ in enumerate(range(n_funders)):
country_code = random.choice(FUNDREF_COUNTRY_CODES)
funding_body_type = random.choice(FUNDING_BODY_TYPES)
funders.append(
Funder(
i,
name=faker.company(),
doi=make_doi(doi_prefix),
country_code=country_code,
region=FUNDREF_REGIONS[country_code],
funding_body_type=funding_body_type,
funding_body_subtype=FUNDING_BODY_SUBTYPES[funding_body_type],
)
)
doi_prefix += 1
return funders
def make_publishers(
*,
n_publishers: int,
doi_prefix: int,
faker: Faker,
min_journals_per_publisher: int = 1,
max_journals_per_publisher: int = 3,
) -> PublisherList:
"""Make publishers ground truth dataset.
:param n_publishers: number of publishers.
:param doi_prefix: the publisher DOI prefix.
:param faker: the faker instance.
:param min_journals_per_publisher: the min number of journals to generate per publisher.
:param max_journals_per_publisher: the max number of journals to generate per publisher.
:return:
"""
publishers = []
for i, _ in enumerate(range(n_publishers)):
n_journals_ = random.randint(min_journals_per_publisher, max_journals_per_publisher)
journals_ = []
for _ in range(n_journals_):
journals_.append(Journal(str(uuid.uuid4()), name=faker.company(), license=random.choice(LICENSES)))
publishers.append(Publisher(i, name=faker.company(), doi_prefix=doi_prefix, journals=journals_))
doi_prefix += 1
return publishers
def make_fields_of_study(
*,
n_fields_of_study_per_level: int,
faker: Faker,
n_levels: int = 6,
min_title_length: int = 1,
max_title_length: int = 3,
) -> FieldOfStudyList:
"""Generate the fields of study for the ground truth dataset.
:param n_fields_of_study_per_level: the number of fields of study per level.
:param faker: the faker instance.
:param n_levels: the number of levels.
:param min_title_length: the minimum field of study title length (words).
:param max_title_length: the maximum field of study title length (words).
:return: a list of the fields of study.
"""
fields_of_study = []
fos_id_ = 0
for level in range(n_levels):
for _ in range(n_fields_of_study_per_level):
n_words_ = random.randint(min_title_length, max_title_length)
name_ = faker.sentence(nb_words=n_words_)
fos_ = FieldOfStudy(fos_id_, name=name_, level=level)
fields_of_study.append(fos_)
fos_id_ += 1
return fields_of_study
def make_authors(*, n_authors: int, institutions: InstitutionList, faker: Faker) -> AuthorList:
"""Generate the authors ground truth dataset.
:param n_authors: the number of authors to generate.
:param institutions: the institutions.
:param faker: the faker instance.
:return: a list of authors.
"""
authors = []
for i, _ in enumerate(range(n_authors)):
author = Author(i, name=faker.name(), institution=random.choice(institutions))
authors.append(author)
return authors
def make_papers(
*,
n_papers: int,
authors: AuthorList,
funders: FunderList,
publishers: PublisherList,
fields_of_study: List,
repositories: List[Repository],
faker: Faker,
min_title_length: int = 2,
max_title_length: int = 10,
min_authors: int = 1,
max_authors: int = 10,
min_funders: int = 0,
max_funders: int = 3,
min_events: int = 0,
max_events: int = 100,
min_fields_of_study: int = 1,
max_fields_of_study: int = 20,
min_repos: int = 1,
max_repos: int = 10,
min_year: int = 2017,
max_year: int = 2021,
) -> PaperList:
"""Generate the list of ground truth papers.
:param n_papers: the number of papers to generate.
:param authors: the authors list.
:param funders: the funders list.
:param publishers: the publishers list.
:param fields_of_study: the fields of study list.
:param repositories: the repositories.
:param faker: the faker instance.
:param min_title_length: the min paper title length.
:param max_title_length: the max paper title length.
:param min_authors: the min number of authors for each paper.
:param max_authors: the max number of authors for each paper.
:param min_funders: the min number of funders for each paper.
:param max_funders: the max number of funders for each paper.
:param min_events: the min number of events per paper.
:param max_events: the max number of events per paper.
:param min_fields_of_study: the min fields of study per paper.
:param max_fields_of_study: the max fields of study per paper.
:param min_repos: the min repos per paper when green.
:param max_repos: the max repos per paper when green.
:param min_year: the min year.
:param max_year: the max year.
:return: the list of papers.
"""
papers = []
for i, _ in enumerate(range(n_papers)):
# Random title
n_words_ = random.randint(min_title_length, max_title_length)
title_ = faker.sentence(nb_words=n_words_)
# Random date
published_date_ = pendulum.from_format(
str(
faker.date_between_dates(
date_start=pendulum.datetime(min_year, 1, 1), date_end=pendulum.datetime(max_year, 12, 31)
)
),
"YYYY-MM-DD",
).date()
published_date_ = pendulum.date(year=published_date_.year, month=published_date_.month, day=published_date_.day)
# Output type
output_type_ = random.choice(OUTPUT_TYPES)
# Pick a random list of authors
n_authors_ = random.randint(min_authors, max_authors)
authors_ = random.sample(authors, n_authors_)
# Random funder
n_funders_ = random.randint(min_funders, max_funders)
if n_funders_ > 0:
funders_ = random.sample(funders, n_funders_)
else:
funders_ = []
# Random publisher
publisher_ = random.choice(publishers)
# Journal
journal_ = random.choice(publisher_.journals)
# Random DOI
doi_ = make_doi(publisher_.doi_prefix)
# Random events
n_events_ = random.randint(min_events, max_events)
events_ = []
today = datetime.now()
today_ts = int(today.timestamp())
start_date = datetime(today.year - 2, today.month, today.day)
start_ts = int(start_date.timestamp())
for _ in range(n_events_):
event_date_ = date_between_dates(start_ts=start_ts, end_ts=today_ts)
events_.append(Event(source=random.choice(EVENT_TYPES), event_date=event_date_))
# Fields of study
n_fos_ = random.randint(min_fields_of_study, max_fields_of_study)
level_0_index = 199
fields_of_study_ = [random.choice(fields_of_study[:level_0_index])]
fields_of_study_.extend(random.sample(fields_of_study, n_fos_))
# Open access status
publisher_is_free_to_read_ = True
if journal_.license is not None:
# Gold
license_ = journal_.license
else:
license_ = random.choice(LICENSES)
if license_ is None:
# Bronze: free to read on publisher website but no license
publisher_is_free_to_read_ = bool(random.getrandbits(1))
# Hybrid: license=True
# Green: in a 'repository'
paper_repos = []
if bool(random.getrandbits(1)):
# There can be multiple authors from the same institution so the repos have to be sampled from a set
n_repos_ = random.randint(min_repos, max_repos)
repos = set()
for repo in [author.institution.repository for author in authors_] + repositories:
repos.add(repo)
paper_repos += random.sample(repos, n_repos_)
# Make paper
paper = Paper(
i,
type="journal-article",
doi=doi_,
title=title_,
published_date=published_date_,
output_type=output_type_,
authors=authors_,
funders=funders_,
journal=journal_,
publisher=publisher_,
events=events_,
fields_of_study=fields_of_study_,
publisher_license=license_,
publisher_is_free_to_read=publisher_is_free_to_read_,
repositories=paper_repos,
in_scihub=bool(random.getrandbits(1)),
in_unpaywall=True,
)
papers.append(paper)
# Make a subset of papers not in Unpaywall
not_in_unpaywall = random.sample([paper for paper in papers], 3)
for paper in not_in_unpaywall:
paper.in_unpaywall = False
# Create paper citations
# Sort from oldest to newest
papers.sort(key=lambda p: p.published_date)
for i, paper in enumerate(papers):
# Create cited_by
n_papers_forwards = len(papers) - i
n_cited_by = random.randint(0, int(n_papers_forwards / 2))
paper.cited_by = random.sample(papers[i + 1 :], n_cited_by)
return papers
def make_open_citations(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate an Open Citations table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
def make_oc_timespan(cited_date: pendulum.Date, citing_date: pendulum.Date):
ts = "P"
delta = citing_date - cited_date
years = delta.in_years()
months = delta.in_months() - years * 12
if years > 0:
ts += f"{years}Y"
if months > 0 or years == 0:
ts += f"{months}M"
return ts
def is_author_sc(cited_: Paper, citing_: Paper):
for cited_author in cited_.authors:
for citing_author in citing_.authors:
if cited_author.name == citing_author.name:
return True
return False
def is_journal_sc(cited_: Paper, citing_: Paper):
return cited_.journal.name == citing_.journal.name
for cited in dataset.papers:
for citing in cited.cited_by:
records.append(
{
"oci": "",
"citing": citing.doi,
"cited": cited.doi,
"creation": citing.published_date.strftime("%Y-%m"),
"timespan": make_oc_timespan(cited.published_date, citing.published_date),
"journal_sc": is_author_sc(cited, citing),
"author_sc": is_journal_sc(cited, citing),
}
)
return records
def make_crossref_events(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the Crossref Events table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
events = []
for paper in dataset.papers:
for event in paper.events:
obj_id = f"https://doi.org/{paper.doi}"
occurred_at = f"{event.event_date.to_datetime_string()} UTC"
source_id = event.source
events.append(
{
"obj_id": obj_id,
"timestamp": occurred_at,
"occurred_at": occurred_at,
"source_id": source_id,
"id": str(uuid.uuid4()),
}
)
return events
def make_scihub(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the SciHub table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
data = []
for paper in dataset.papers:
if paper.access_type.black:
data.append({"doi": paper.doi})
return data
def make_unpaywall(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the Unpaywall table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
genre_lookup = {
"journal_articles": ["journal-article"],
"book_sections": ["book-section", "book-part", "book-chapter"],
"authored_books": ["book", "monograph"],
"edited_volumes": ["edited-book"],
"reports": ["report"],
"datasets": ["dataset"],
"proceedings_article": ["proceedings-article"],
"other_outputs": ["other-outputs"],
}
for paper in dataset.papers:
# In our simulated model, a small number of papers can be closed and not in Unpaywall
if paper.in_unpaywall:
# Make OA status
journal_is_in_doaj = paper.journal.license is not None
# Add publisher oa locations
oa_locations = []
if paper.publisher_is_free_to_read:
oa_location = {"host_type": "publisher", "license": paper.publisher_license, "url": ""}
oa_locations.append(oa_location)
# Add repository oa locations
for repo in paper.repositories:
pmh_id = None
if repo.pmh_domain is not None:
pmh_id = f"oai:{repo.pmh_domain}:{str(uuid.uuid4())}"
oa_location = {
"host_type": "repository",
"endpoint_id": repo.endpoint_id,
"url": f"https://{repo.url_domain}/{urllib.parse.quote(paper.title)}.pdf",
"pmh_id": pmh_id,
"repository_institution": repo.name,
}
oa_locations.append(oa_location)
is_oa = len(oa_locations) > 0
if is_oa:
best_oa_location = oa_locations[0]
else:
best_oa_location = None
# Create record
records.append(
{
"doi": paper.doi,
"year": paper.published_date.year,
"genre": random.choice(genre_lookup[paper.output_type]),
"publisher": paper.publisher.name,
"journal_name": paper.journal.name,
"journal_issn_l": paper.journal.id,
"is_oa": is_oa,
"journal_is_in_doaj": journal_is_in_doaj,
"best_oa_location": best_oa_location,
"oa_locations": oa_locations,
}
)
return records
def make_openalex_dataset(dataset: ObservatoryDataset) -> List[dict]:
"""Generate the OpenAlex table data from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: OpenAlex table data.
"""
result = []
for paper in dataset.papers:
entry = {
"id": str(paper.id),
"doi": f"https://doi.org/{paper.doi}",
"cited_by_count": len(paper.cited_by),
"concepts": [
{"id": str(fos.id), "display_name": fos.name, "level": fos.level} for fos in paper.fields_of_study
],
"authorships": [
{
"author": {
"id": str(author.id),
"display_name": author.name,
},
"institutions": [
{
"id": str(author.institution.id),
"ror": author.institution.ror_id,
"display_name": author.institution.name,
"country_code": author.institution.country_code,
"type": author.institution.types,
}
],
}
for author in paper.authors
],
}
result.append(entry)
return result
def make_crossref_fundref(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the Crossref Fundref table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
for funder in dataset.funders:
records.append(
{
"pre_label": funder.name,
"funder": f"http://dx.doi.org/{funder.doi}",
"country_code": funder.country_code,
"region": funder.region,
"funding_body_type": funder.funding_body_type,
"funding_body_sub_type": funder.funding_body_subtype,
}
)
return records
def make_crossref_metadata(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the Crossref Metadata table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
for paper in dataset.papers:
# Create funders
funders = []
for funder in paper.funders:
funders.append({"name": funder.name, "DOI": funder.doi, "award": None, "doi_asserted_by": None})
# Add Crossref record
records.append(
{
"type": paper.type,
"title": [paper.title],
"DOI": paper.doi,
"is_referenced_by_count": len(paper.cited_by),
"issued": {
"date_parts": [paper.published_date.year, paper.published_date.month, paper.published_date.day]
},
"funder": funders,
"publisher": paper.publisher.name,
}
)
return records
def bq_load_observatory_dataset(
observatory_dataset: ObservatoryDataset,
repository: List[Dict],
bucket_name: str,
dataset_id_all: str,
dataset_id_settings: str,
snapshot_date: DateTime,
project_id: str,
):
"""Load the fake Observatory Dataset in BigQuery.
:param observatory_dataset: the Observatory Dataset.
:param repository: the repository table data.
:param bucket_name: the Google Cloud Storage bucket name.
:param dataset_id_all: the dataset id for all data tables.
:param dataset_id_settings: the dataset id for settings tables.
:param snapshot_date: the release date for the observatory dataset.
:param project_id: api project id.
:return: None.
"""
# Generate source datasets
open_citations = make_open_citations(observatory_dataset)
crossref_events = make_crossref_events(observatory_dataset)
openalex: List[dict] = make_openalex_dataset(observatory_dataset)
crossref_fundref = make_crossref_fundref(observatory_dataset)
unpaywall = make_unpaywall(observatory_dataset)
crossref_metadata = make_crossref_metadata(observatory_dataset)
scihub = make_scihub(observatory_dataset)
# Load fake ROR and settings datasets
test_doi_path = test_fixtures_folder("doi")
ror = load_jsonl(os.path.join(test_doi_path, "ror.jsonl"))
country = load_jsonl(os.path.join(test_doi_path, "country.jsonl"))
groupings = load_jsonl(os.path.join(test_doi_path, "groupings.jsonl"))
schema_path = schema_folder()
with CliRunner().isolated_filesystem() as t:
tables = [
Table(
"repository",
False,
dataset_id_settings,
repository,
bq_find_schema(path=os.path.join(schema_path, "doi"), table_name="repository"),
),
Table(
"crossref_events",
False,
dataset_id_all,
crossref_events,
bq_find_schema(
path=os.path.join(schema_path, "crossref_events"),
table_name="crossref_events",
),
),
Table(
"crossref_metadata",
True,
dataset_id_all,
crossref_metadata,
bq_find_schema(
path=os.path.join(schema_path, "crossref_metadata"),
table_name="crossref_metadata",
release_date=snapshot_date,
),
),
Table(
"crossref_fundref",
True,
dataset_id_all,
crossref_fundref,
bq_find_schema(
path=os.path.join(schema_path, "crossref_fundref"),
table_name="crossref_fundref",
release_date=snapshot_date,
),
),
Table(
"open_citations",
True,
dataset_id_all,
open_citations,
bq_find_schema(
path=os.path.join(schema_path, "open_citations"),
table_name="open_citations",
release_date=snapshot_date,
),
),
Table(
"scihub",
True,
dataset_id_all,
scihub,
bq_find_schema(
path=os.path.join(schema_path, "scihub"), release_date=snapshot_date, table_name="scihub"
),
),
Table(
"unpaywall",
False,
dataset_id_all,
unpaywall,
bq_find_schema(path=os.path.join(schema_path, "unpaywall"), table_name="unpaywall"),
),
Table(
"ror",
True,
dataset_id_all,
ror,
bq_find_schema(path=os.path.join(schema_path, "ror"), table_name="ror", release_date=snapshot_date),
),
Table(
"country",
False,
dataset_id_settings,
country,
bq_find_schema(path=os.path.join(schema_path, "doi"), table_name="country"),
),
Table(
"groupings",
False,
dataset_id_settings,
groupings,
bq_find_schema(path=os.path.join(schema_path, "doi"), table_name="groupings"),
),
Table(
"orcid",
False,
dataset_id_all,
[],
bq_find_schema(path=os.path.join(schema_path, "orcid"), table_name="orcid", release_date=snapshot_date),
),
Table(
"works",
False,
dataset_id_all,
openalex,
bq_find_schema(path=os.path.join(schema_path, "openalex"), table_name="works"),
),
]
bq_load_tables(
project_id=project_id,
tables=tables,
bucket_name=bucket_name,
snapshot_date=snapshot_date,
)
def aggregate_events(events: List[Event]) -> Tuple[List[Dict], List[Dict], List[Dict]]:
"""Aggregate events by source into total events for all time, monthly and yearly counts.
:param events: list of events.
:return: list of events for each source aggregated by all time, months and years.
"""
lookup_totals = dict()
lookup_months = dict()
lookup_years = dict()
for event in events:
# Total events
if event.source in lookup_totals:
lookup_totals[event.source] += 1
else:
lookup_totals[event.source] = 1
# Events by month
month = event.event_date.strftime("%Y-%m")
month_key = (event.source, month)
if month_key in lookup_months:
lookup_months[month_key] += 1
else:
lookup_months[month_key] = 1
# Events by year
year = event.event_date.year
year_key = (event.source, year)
if year_key in lookup_years:
lookup_years[year_key] += 1
else:
lookup_years[year_key] = 1
total = [{"source": source, "count": count} for source, count in lookup_totals.items()]
months = [{"source": source, "month": month, "count": count} for (source, month), count in lookup_months.items()]
years = [{"source": source, "year": year, "count": count} for (source, year), count in lookup_years.items()]
# Sort
sort_events(total, months, years)
return total, months, years
def sort_events(events: List[Dict], months: List[Dict], years: List[Dict]):
"""Sort events in-place.
:param events: events all time.
:param months: events by month.
:param years: events by year.
:return: None.
"""
events.sort(key=lambda x: x["source"])
months.sort(key=lambda x: f"{x['month']}{x['source']}{x['count']}")
years.sort(key=lambda x: f"{x['year']}{x['source']}{x['count']}")
def make_doi_table(dataset: ObservatoryDataset) -> List[Dict]:
"""Generate the DOI table from an ObservatoryDataset instance.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
records = []
for paper in dataset.papers:
# Doi, events and grids
doi = paper.doi.upper()
events = make_doi_events(doi, paper.events)
# Affiliations: institutions, countries, regions, subregion, funders, journals, publishers
institutions = make_doi_institutions(paper.authors)
countries = make_doi_countries(paper.authors)
regions = make_doi_regions(paper.authors)
subregions = make_doi_subregions(paper.authors)
funders = make_doi_funders(paper.funders)
journals = make_doi_journals(paper.in_unpaywall, paper.journal)
publishers = make_doi_publishers(paper.publisher)
# Make final record
records.append(
{
"doi": doi,
"crossref": {
"type": paper.type,
"title": paper.title,
"published_year": paper.published_date.year,
"published_month": paper.published_date.month,
"published_year_month": f"{paper.published_date.year}-{paper.published_date.month}",
"funder": [{"name": funder.name, "DOI": funder.doi} for funder in paper.funders],
},
"unpaywall": {},
"unpaywall_history": {},
"open_citations": {},
"events": events,
"affiliations": {
"doi": doi,
"institutions": institutions,
"countries": countries,
"subregions": subregions,
"regions": regions,
"groupings": [],
"funders": funders,
"authors": [],
"journals": journals,
"publishers": publishers,
},
}
)
# Sort to match with sorted results
records.sort(key=lambda r: r["doi"])
return records
def make_doi_events(doi: str, event_list: EventsList) -> Dict:
"""Make the events for a DOI table row.
:param doi: the DOI.
:param event_list: a list of events for the paper.
:return: the events for the DOI table.
"""
events_total, events_months, events_years = aggregate_events(event_list)
# When no events, events is None
events = None
if len(events_total):
events = {
"doi": doi,
"events": events_total,
"months": events_months,
"years": events_years,
}
return events
def make_doi_funders(funder_list: FunderList) -> List[Dict]:
"""Make a DOI table row funders affiliation list.
:param funder_list: the funders list.
:return: the funders affiliation list.
"""
# Funders
funders = {}
for funder in funder_list:
funders[funder.doi] = {
"identifier": funder.name,
"name": funder.name,
"doi": funder.doi,
"types": ["Funder"],
"country": None,
"country_code": funder.country_code,
"country_code_2": None,
"region": funder.region,
"subregion": None,
"coordinates": None,
"funding_body_type": funder.funding_body_type,
"funding_body_subtype": funder.funding_body_subtype,
"members": [],
}
funders = [v for k, v in funders.items()]
funders.sort(key=lambda x: x["identifier"])
return funders
def make_doi_journals(in_unpaywall: bool, journal: Journal) -> List[Dict]:
"""Make the journal affiliation list for a DOI table row.
:param in_unpaywall: whether the work is in Unpaywall or not. At the moment the journal IDs come from Unpaywall,
and if the work is not in Unpaywall then the journal id and name will be null.
:param journal: the paper's journal.
:return: the journal affiliation list.
"""
identifier = None
name = None
if in_unpaywall:
identifier = journal.id
name = journal.name
return [
{
"identifier": identifier,
"types": ["Journal"],
"name": name,
"country": None,
"country_code": None,
"country_code_2": None,
"region": None,
"subregion": None,
"coordinates": None,
"members": [],
}
]
def to_affiliations_list(dict_: Dict):
"""Convert affiliation dict into a list.
:param dict_: affiliation dict.
:return: affiliation list.
"""
l_ = []
for k, v in dict_.items():
v["members"] = list(v["members"])
v["members"].sort()
if "count" in v:
v["count"] = len(v["rors"])
v.pop("rors", None)
l_.append(v)
l_.sort(key=lambda x: x["identifier"])
return l_
def make_doi_publishers(publisher: Publisher) -> List[Dict]:
"""Make the publisher affiliations for a DOI table row.
:param publisher: the paper's publisher.
:return: the publisher affiliations list.
"""
return [
{
"identifier": publisher.name,
"types": ["Publisher"],
"name": publisher.name,
"country": None,
"country_code": None,
"country_code_2": None,
"region": None,
"subregion": None,
"coordinates": None,
"members": [],
}
]
def make_doi_institutions(author_list: AuthorList) -> List[Dict]:
"""Make the institution affiliations for a DOI table row.
:param author_list: the paper's author list.
:return: the institution affiliation list.
"""
institutions = {}
for author in author_list:
# Institution
inst = author.institution
if inst.ror_id not in institutions:
institutions[inst.ror_id] = {
"identifier": inst.ror_id,
"types": [inst.types],
"name": inst.name,
"country": inst.country,
"country_code": inst.country_code,
"country_code_2": inst.country_code_2,
"region": inst.region,
"subregion": inst.subregion,
"coordinates": inst.coordinates,
"members": [],
}
return to_affiliations_list(institutions)
def make_doi_countries(author_list: AuthorList):
"""Make the countries affiliations for a DOI table row.
:param author_list: the paper's author list.
:return: the countries affiliation list.
"""
countries = {}
for author in author_list:
inst = author.institution
if inst.country not in countries:
countries[inst.country] = {
"identifier": inst.country_code,
"name": inst.country,
"types": ["Country"],
"country": inst.country,
"country_code": inst.country_code,
"country_code_2": inst.country_code_2,
"region": inst.region,
"subregion": inst.subregion,
"coordinates": None,
"count": 0,
"members": {inst.ror_id},
"rors": {inst.ror_id},
}
else:
countries[inst.country]["members"].add(inst.ror_id)
countries[inst.country]["rors"].add(inst.ror_id)
return to_affiliations_list(countries)
def make_doi_regions(author_list: AuthorList):
"""Make the regions affiliations for a DOI table row.
:param author_list: the paper's author list.
:return: the regions affiliation list.
"""
regions = {}
for author in author_list:
inst = author.institution
if inst.region not in regions:
regions[inst.region] = {
"identifier": inst.region,
"name": inst.region,
"types": ["Region"],
"country": None,
"country_code": None,
"country_code_2": None,
"region": inst.region,
"subregion": None,
"coordinates": None,
"count": 0,
"members": {inst.subregion},
"rors": {inst.ror_id},
}
else:
regions[inst.region]["members"].add(inst.subregion)
regions[inst.region]["rors"].add(inst.ror_id)
return to_affiliations_list(regions)
def make_doi_subregions(author_list: AuthorList):
"""Make the subregions affiliations for a DOI table row.
:param author_list: the paper's author list.
:return: the subregions affiliation list.
"""
subregions = {}
for author in author_list:
inst = author.institution
if inst.subregion not in subregions:
subregions[inst.subregion] = {
"identifier": inst.subregion,
"name": inst.subregion,
"types": ["Subregion"],
"country": None,
"country_code": None,
"country_code_2": None,
"region": inst.region,
"subregion": None,
"coordinates": None,
"count": 0,
"members": {inst.country_code},
"rors": {inst.ror_id},
}
else:
subregions[inst.subregion]["members"].add(inst.country_code)
subregions[inst.subregion]["rors"].add(inst.ror_id)
return to_affiliations_list(subregions)
def calc_percent(value: float, total: float) -> float:
"""Calculate a percentage and round to 2dp.
:param value: the value.
:param total: the total.
:return: the percentage.
"""
if math.isnan(value) or math.isnan(total) or total == 0:
return None
return round(value / total * 100, 2)
def make_aggregate_table(agg: str, dataset: ObservatoryDataset) -> List[Dict]:
"""Generate an aggregate table from an ObservatoryDataset instance.
:param agg: the aggregation type, e.g. country, institution.
:param dataset: the Observatory Dataset.
:return: table rows.
"""
data = []
repos = []
for paper in dataset.papers:
for author in paper.authors:
inst = author.institution
at = paper.access_type
oa_coki = paper.oa_coki
# Choose id and name
if agg == "country":
id = inst.country_code
name = inst.country
elif agg == "institution":
id = inst.ror_id
name = inst.name
else:
raise ValueError(f"make_aggregate_table: agg type unknown: {agg}")
# Add repository info
for repo in paper.repositories:
if paper.in_unpaywall:
repos.append(
{
"paper_id": paper.id,
"agg_id": id,
"time_period": paper.published_date.year,
"name": repo.name,
"name_lower": repo.name.lower(),
"endpoint_id": repo.endpoint_id,
"pmh_domain": repo.pmh_domain,
"url_domain": repo.url_domain,
"category": repo.category,
"ror_id": repo.ror_id,
"total_outputs": 1,
}
)
data.append(
{
"doi": paper.doi,
"id": id,
"time_period": paper.published_date.year,
"name": name,
"country": inst.country,
"country_code": inst.country_code,
"country_code_2": inst.country_code_2,
"region": inst.region,
"subregion": inst.subregion,
"coordinates": None,
"total_outputs": 1,
# Access Types
"oa": at.oa,
"green": at.green,
"gold": at.gold,
"gold_doaj": at.gold_doaj,
"hybrid": at.hybrid,
"bronze": at.bronze,
"green_only": at.green_only,
"black": at.black,
# COKI Open Access Types
"open": oa_coki.open,
"closed": oa_coki.closed,
"publisher": oa_coki.publisher,
"other_platform": oa_coki.other_platform,
"publisher_only": oa_coki.publisher_only,
"both": oa_coki.both,
"other_platform_only": oa_coki.other_platform_only,
# Publisher Open categories
"publisher_categories_oa_journal": oa_coki.publisher_categories.oa_journal,
"publisher_categories_hybrid": oa_coki.publisher_categories.hybrid,
"publisher_categories_no_guarantees": oa_coki.publisher_categories.no_guarantees,
# Other Platform categories
"publisher_categories_preprint": oa_coki.other_platform_categories.preprint,
"publisher_categories_domain": oa_coki.other_platform_categories.domain,
"publisher_categories_institution": oa_coki.other_platform_categories.institution,
"publisher_categories_public": oa_coki.other_platform_categories.public,
"publisher_categories_aggregator": oa_coki.other_platform_categories.aggregator,
"publisher_categories_other_internet": oa_coki.other_platform_categories.other_internet,
"publisher_categories_unknown": oa_coki.other_platform_categories.unknown,
}
)
# Repos
df_repos = pd.DataFrame(repos)
df_repos.drop_duplicates(inplace=True)
agg = {
"agg_id": "first",
"time_period": "first",
"name": "first",
"name_lower": "first",
"endpoint_id": "first",
"pmh_domain": "first",
"url_domain": "first",
"category": "first",
"ror_id": "first",
"total_outputs": "sum",
}
df_repos = df_repos.groupby(["agg_id", "name", "time_period"], as_index=False).agg(agg)
df_repos.sort_values(
by=["agg_id", "time_period", "total_outputs", "name_lower"], ascending=[True, False, False, True], inplace=True
)
# Aggregate info
df = pd.DataFrame(data)
df.drop_duplicates(inplace=True)
agg = {
"id": "first",
"time_period": "first",
"name": "first",
"country": "first",
"country_code": "first",
"country_code_2": "first",
"region": "first",
"subregion": "first",
"coordinates": "first",
"total_outputs": "sum",
# Access types
"oa": "sum",
"green": "sum",
"gold": "sum",
"gold_doaj": "sum",
"hybrid": "sum",
"bronze": "sum",
"green_only": "sum",
"black": "sum",
# COKI OA types
"open": "sum",
"closed": "sum",
"publisher": "sum",
"other_platform": "sum",
"publisher_only": "sum",
"both": "sum",
"other_platform_only": "sum",
# Publisher Open categories
"publisher_categories_oa_journal": "sum",
"publisher_categories_hybrid": "sum",
"publisher_categories_no_guarantees": "sum",
# Other Platform categories
"publisher_categories_preprint": "sum",
"publisher_categories_domain": "sum",
"publisher_categories_institution": "sum",
"publisher_categories_public": "sum",
"publisher_categories_aggregator": "sum",
"publisher_categories_other_internet": "sum",
"publisher_categories_unknown": "sum",
}
df = df.groupby(["id", "time_period"], as_index=False).agg(agg).sort_values(by=["id", "time_period"])
records = []
for i, row in df.iterrows():
total_outputs = row["total_outputs"]
# Access types
oa = row["oa"]
green = row["green"]
gold = row["gold"]
gold_doaj = row["gold_doaj"]
hybrid = row["hybrid"]
bronze = row["bronze"]
green_only = row["green_only"]
black = row["black"]
# COKI access types
open = row["open"]
closed = row["closed"]
publisher = row["publisher"]
other_platform = row["other_platform"]
publisher_only = row["publisher_only"]
both = row["both"]
other_platform_only = row["other_platform_only"]
# Publisher Open
publisher_categories_oa_journal = row["publisher_categories_oa_journal"]
publisher_categories_hybrid = row["publisher_categories_hybrid"]
publisher_categories_no_guarantees = row["publisher_categories_no_guarantees"]
# Other Platform categories
publisher_categories_preprint = row["publisher_categories_preprint"]
publisher_categories_domain = row["publisher_categories_domain"]
publisher_categories_institution = row["publisher_categories_institution"]
publisher_categories_public = row["publisher_categories_public"]
publisher_categories_aggregator = row["publisher_categories_aggregator"]
publisher_categories_other_internet = row["publisher_categories_other_internet"]
publisher_categories_unknown = row["publisher_categories_unknown"]
# Get repositories for year and id
id = row["id"]
time_period = row["time_period"]
df_repos_subset = df_repos[(df_repos["agg_id"] == id) & (df_repos["time_period"] == time_period)]
repositories = []
for j, repo_row in df_repos_subset.iterrows():
ror_id = repo_row["ror_id"]
home_repo = id == ror_id
repositories.append(
{
"id": repo_row["name"],
"total_outputs": repo_row["total_outputs"],
"category": repo_row["category"],
"home_repo": home_repo,
}
)
# fmt: off
records.append(
{
"id": id,
"time_period": row["time_period"],
"name": row["name"],
"country": row["country"],
"country_code": row["country_code"],
"country_code_2": row["country_code_2"],
"region": row["region"],
"subregion": row["subregion"],
"coordinates": row["coordinates"],
"total_outputs": total_outputs,
"coki": {
"oa": {
"color": {
"oa": {"total_outputs": oa, "percent": calc_percent(oa, total_outputs)},
"green": {"total_outputs": green, "percent": calc_percent(green, total_outputs)},
"gold": {"total_outputs": gold, "percent": calc_percent(gold, total_outputs)},
"gold_doaj": {"total_outputs": gold_doaj, "percent": calc_percent(gold_doaj, total_outputs)},
"hybrid": {"total_outputs": hybrid, "percent": calc_percent(hybrid, total_outputs)},
"bronze": {"total_outputs": bronze, "percent": calc_percent(bronze, total_outputs)},
"green_only": {"total_outputs": green_only, "percent": calc_percent(green_only, total_outputs)},
"black": {"total_outputs": black, "percent": calc_percent(black, total_outputs)},
},
"coki": {
"open": {"total": open, "percent": calc_percent(open, total_outputs)},
"closed": {"total": closed, "percent": calc_percent(closed, total_outputs)},
"publisher": {"total": publisher, "percent": calc_percent(publisher, total_outputs)},
"other_platform": {"total": other_platform, "percent": calc_percent(other_platform, total_outputs)},
"publisher_only": {"total": publisher_only, "percent": calc_percent(publisher_only, total_outputs)},
"both": {"total": both, "percent": calc_percent(both, total_outputs)},
"other_platform_only": {"total": other_platform_only, "percent": calc_percent(other_platform_only, total_outputs)},
"publisher_categories": {
"oa_journal": {"total": publisher_categories_oa_journal, "percent": calc_percent(publisher_categories_oa_journal, publisher)},
"hybrid": {"total": publisher_categories_hybrid, "percent": calc_percent(publisher_categories_hybrid, publisher)},
"no_guarantees": {"total": publisher_categories_no_guarantees, "percent": calc_percent(publisher_categories_no_guarantees, publisher)}
},
"other_platform_categories": {
"preprint": {"total": publisher_categories_preprint, "percent": calc_percent(publisher_categories_preprint, other_platform)},
"domain": {"total": publisher_categories_domain, "percent": calc_percent(publisher_categories_domain, other_platform)},
"institution": {"total": publisher_categories_institution, "percent": calc_percent(publisher_categories_institution, other_platform)},
"public": {"total": publisher_categories_public, "percent": calc_percent(publisher_categories_public, other_platform)},
"aggregator": {"total": publisher_categories_aggregator, "percent": calc_percent(publisher_categories_aggregator, other_platform)},
"other_internet": {"total": publisher_categories_other_internet, "percent": calc_percent(publisher_categories_other_internet, other_platform)},
"unknown": {"total": publisher_categories_unknown, "percent": calc_percent(publisher_categories_unknown, other_platform)},
},
}
},
"repositories": repositories
},
"citations": {},
"output_types": [],
"disciplines": {},
"funders": [],
"members": [],
"publishers": [],
"journals": [],
"events": [],
}
)
# fmt: on
return records
|
995,644 | 5d90ee051de923ad1e68e189ac7be34069371cae | ###########################################
# Project: CMSIS DSP Library
# Title: FileSource.py
# Description: Node for creating file source
#
# $Date: 30 July 2021
# $Revision: V1.10.0
#
# Target Processor: Cortex-M and Cortex-A cores
# -------------------------------------------------------------------- */
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############################################
from ..simu import *
# Read a list of float from a file
# and pad with 0 indefinitely when end of file is reached.
class FileSource(GenericSource):
def __init__(self,outputSize,fifoout,name):
GenericSource.__init__(self,outputSize,fifoout)
self._file=open(name,"r")
def run(self):
a=self.getWriteBuffer()
for i in range(self._outputSize):
s=self._file.readline()
if (len(s)>0):
a[i]=float(s)
else:
a[i] = 0
return(0)
def __del__(self):
self._file.close() |
995,645 | 473c1869e981500c2ddba49732ffbb825153bba8 | # Generated by Django 2.0.5 on 2018-06-25 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Paciente', '0002_paciente_user'),
('Laboratorio', '0004_auto_20180624_1449'),
]
operations = [
migrations.CreateModel(
name='ResultadoDat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateField(auto_now_add=True)),
('paciente', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Paciente.Paciente')),
],
),
migrations.AddField(
model_name='detalle_examen',
name='resultadodat',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Laboratorio.ResultadoDat'),
),
]
|
995,646 | 1e9de2936ccf0b88753e155361f0886e7ad65176 | import pytest
@pytest.yield_fixture()
def setup():
print("this url is opened")
yield
print("Browser is closed")
def test_Signupbyemail(setup):
print("This is simple Sign Up email function to test")
def test_signupbyfacebook(setup):
print("This is simple Sign Up facebook function to test")
|
995,647 | 5d8c6856f061f8469941615ad748801277a36a97 | #!/usr/bin/env python
from analyze_logs import LogAnalyzer
analyzer = LogAnalyzer()
# 1. What are the most popular three articles of all time?
r = analyzer.most_popular_articles()
assert(r[0][1] == 338647)
# 2. Who are the most popular article authors of all time?
r = analyzer.most_popular_authors()
assert(r[0][1] == 507594)
# 3. On which days did more than 1% of requests lead to errors?
r = analyzer.error_days()
assert(r[0][0] == "2016-07-17")
print("Tests passed.")
|
995,648 | 9a41d71bf77b249077829fa9d2d8f64ba6a062fa | from django.contrib import admin
from .models import GalleryModel
admin.site.register(GalleryModel)
|
995,649 | 6e463cf73c3d8a4b7ff54b2ed71326267c5377b7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 25 14:52:15 2020
@author: cxue2
"""
from datetime import datetime
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.metrics import precision_recall_curve, average_precision_score
from sklearn.metrics import f1_score
from scipy import interp
def get_date():
"""
get yyyy mm dd
"""
return str(datetime.now()).split(' ')[0]
def get_time():
"""
get full timestamp
"""
return str(datetime.now()).replace(' ', '_').replace(':', '')
def calc_performance_metrics(scr, lbl):
"""
calculate performance metrics;
"""
met = dict()
# prediction
prd = (scr > .5) * 1
# metrics
met['mat'] = confusion_matrix(y_true=lbl, y_pred=prd)
TN, FP, FN, TP = met['mat'].ravel()
N = TN + TP + FN + FP
S = (TP + FN) / N
P = (TP + FP) / N
sen = TP / (TP + FN)
spc = TN / (TN + FP)
met['acc'] = (TN + TP) / N
met['balanced_acc'] = (sen + spc) / 2
met['sen'] = sen
met['spc'] = spc
met['prc'] = TP / (TP + FP)
met['f1s'] = 2 * (met['prc'] * met['sen']) / (met['prc'] + met['sen'])
met['wt_f1s'] = f1_score(lbl, prd, average='weighted')
met['mcc'] = (TP / N - S * P) / np.sqrt(P * S * (1-S) * (1-P))
try:
met['auc'] = roc_auc_score(y_true=lbl, y_score=scr)
except KeyboardInterrupt as kbi:
raise kbi
except:
met['auc'] = np.nan
return met
def show_performance_metrics(met):
"""
print performance metrics;
"""
print('\tmat: {}'.format(np.array_repr(met['mat']).replace('\n', '')))
print('\tacc: {}'.format(met['acc']))
print('\tsen: {}'.format(met['sen']))
print('\tspc: {}'.format(met['spc']))
print('\tprc: {}'.format(met['prc']))
print('\tf1s: {}'.format(met['f1s']))
print('\tmcc: {}'.format(met['mcc']))
print('\tauc: {}'.format(met['auc']))
def get_roc_info(lst_lbl, lst_scr):
"""
calculate ROC information;
"""
fpr_pt = np.linspace(0, 1, 1001)
tprs, aucs = [], []
for lbl, scr in zip(lst_lbl, lst_scr):
fpr, tpr, _ = roc_curve(y_true=lbl, y_score=scr, drop_intermediate=True)
tprs.append(interp(fpr_pt, fpr, tpr))
tprs[-1][0] = 0.0
aucs.append(auc(fpr, tpr))
tprs_mean = np.mean(tprs, axis=0)
tprs_std = np.std(tprs, axis=0)
tprs_upper = np.minimum(tprs_mean + tprs_std, 1)
tprs_lower = np.maximum(tprs_mean - tprs_std, 0)
auc_mean = auc(fpr_pt, tprs_mean)
auc_std = np.std(aucs)
auc_std = 1 - auc_mean if auc_mean + auc_std > 1 else auc_std
rslt = {'xs': fpr_pt,
'ys_mean': tprs_mean,
'ys_upper': tprs_upper,
'ys_lower': tprs_lower,
'auc_mean': auc_mean,
'auc_std': auc_std}
return rslt
def pr_interp(rc_, rc, pr):
"""
interpolate PR;
"""
pr_ = np.zeros_like(rc_)
locs = np.searchsorted(rc, rc_)
for idx, loc in enumerate(locs):
l = loc - 1
r = loc
r1 = rc[l] if l > -1 else 0
r2 = rc[r] if r < len(rc) else 1
p1 = pr[l] if l > -1 else 1
p2 = pr[r] if r < len(rc) else 0
t1 = (1 - p2) * r2 / p2 / (r2 - r1) if p2 * (r2 - r1) > 1e-16 else (1 - p2) * r2 / 1e-16
t2 = (1 - p1) * r1 / p1 / (r2 - r1) if p1 * (r2 - r1) > 1e-16 else (1 - p1) * r1 / 1e-16
t3 = (1 - p1) * r1 / p1 if p1 > 1e-16 else (1 - p1) * r1 / 1e-16
a = 1 + t1 - t2
b = t3 - t1 * r1 + t2 * r1
pr_[idx] = rc_[idx] / (a * rc_[idx] + b)
return pr_
def get_pr_info(lst_lbl, lst_scr):
"""
calculate PR info;
"""
rc_pt = np.linspace(0, 1, 1001)
rc_pt[0] = 1e-16
prs = []
aps = []
for lbl, scr in zip(lst_lbl, lst_scr):
pr, rc, _ = precision_recall_curve(y_true=lbl, probas_pred=scr)
aps.append(average_precision_score(y_true=lbl, y_score=scr))
pr, rc = pr[::-1], rc[::-1]
prs.append(pr_interp(rc_pt, rc, pr))
prs_mean = np.mean(prs, axis=0)
prs_std = np.std(prs, axis=0)
prs_upper = np.minimum(prs_mean + prs_std, 1)
prs_lower = np.maximum(prs_mean - prs_std, 0)
aps_mean = np.mean(aps)
aps_std = np.std(aps)
aps_std = 1 - aps_mean if aps_mean + aps_std > 1 else aps_std
rslt = {'xs': rc_pt,
'ys_mean': prs_mean,
'ys_upper': prs_upper,
'ys_lower': prs_lower,
'auc_mean': aps_mean,
'auc_std': aps_std}
return rslt
|
995,650 | 40f728d674d83e396b25cc06c08f6f8a5f6e0892 | # PYTHON2 file to be run on the Pi Zero
import RPi.GPIO as r
from time import sleep
from math import cos, radians
r.setmode(r.BOARD)
pins = [11, 12, 13, 15, 16, 18,
22, 7, 3, 5, 24, 26,
19, 21, 23, 8, 10]
r.setup(pins[0], r.OUT)
pwm = r.PWM(pins[0], 50)
i = 0
while True:
pwm.start(50 * (1 - cos(radians(i))))
i += 1
sleep(0.01)
|
995,651 | d9d0c1a7ce6e26342a9e7becc7c75792fbe16940 | __source__ = 'https://leetcode.com/problems/unique-email-addresses/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 929. Unique Email Addresses
#
# Every email consists of a local name and a domain name, separated by the @ sign.
#
# For example, in alice@leetcode.com, alice is the local name, and leetcode.com is the domain name.
#
# Besides lowercase letters, these emails may contain '.'s or '+'s.
#
# If you add periods ('.') between some characters in the local name part of an email address,
# mail sent there will be forwarded to the same address without dots in the local name.
# For example, "alice.z@leetcode.com" and "alicez@leetcode.com" forward to the same email address.
# (Note that this rule does not apply for domain names.)
#
# If you add a plus ('+') in the local name, everything after the first plus sign will be ignored.
# This allows certain emails to be filtered, for example m.y+name@email.com will be forwarded to my@email.com.
# (Again, this rule does not apply for domain names.)
#
# It is possible to use both of these rules at the same time.
#
# Given a list of emails, we send one email to each address in the list.
# How many different addresses actually receive mails?
#
# Example 1:
#
# Input: ["test.email+alex@leetcode.com","test.e.mail+bob.cathy@leetcode.com","testemail+david@lee.tcode.com"]
# Output: 2
# Explanation: "testemail@leetcode.com" and "testemail@lee.tcode.com" actually receive mails
#
# Note:
#
# 1 <= emails[i].length <= 100
# 1 <= emails.length <= 100
# Each emails[i] contains exactly one '@' character.
#
import unittest
# Approach 1: Canonical Form
# 32ms 98.92%
class Solution(object):
def numUniqueEmails(self, emails):
seen = set()
for email in emails:
local, _, domain = email.partition('@')
if '+' in local:
local = local[:local.index('+')]
seen.add(local.replace('.','') + '@' + domain)
return len(seen)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://leetcode.com/problems/unique-email-addresses/solution/
# Approach 1: Canonical Form
# Time Complexity: O(C), where C is the total content of emails.
# Space Complexity: O(C).
#
# 22ms 84.02%
class Solution {
public int numUniqueEmails(String[] emails) {
Set<String> res = new HashSet<>();
for (String email: emails) {
int i = email.indexOf('@');
String local = email.substring(0, i);
String rest = email.substring(i);
if (local.contains("+")) {
local = local.substring(0, local.indexOf('+'));
}
local = local.replace(".", ""); //local = local.replaceAll("\\.", ""); //37ms, 59.93%
res.add(local + rest);
}
return res.size();
}
}
#Cheating
# 13ms 97.81%
class Solution {
public int numUniqueEmails(String[] emails) {
Set<String> seen = new HashSet();
for(String email:emails){
int i = email.indexOf('@');
String rest = email.substring(i);
seen.add(rest);
}
return seen.size();
}
}
'''
|
995,652 | 0ff4bbb05fa13b9e8f7911f4ae71e4d1af7e22a6 | # Create by CodeMeow
# -*- coding:utf-8 -*-
import urllib2
import logging
import logging.handlers
import sys, os, re
reload(sys)
sys.setdefaultencoding('utf8')
WEATHER_URI = 'http://vehiclenet-python-0-codemeow.myalauda.cn/carlink/weather/findWeather.htm?city=%s'
def fetch_weather(city_name):
logger = logging.getLogger('attacker')
print 'Try to fetch the weather in %s' % city_name
try:
#import pdb
#pdb.set_trace()
city_encode_name = city_name.encode('utf-8')
city_encode_name = urllib2.quote(city_encode_name)
response = urllib2.urlopen(WEATHER_URI % city_encode_name)
content = response.read()
if content == str(201) or content == str(501):
errorStr = 'Return error code: %s (%s)' % (content, city_name)
logger.error(errorStr)
print errorStr
return 1
except urllib2.HTTPError, e:
errorStr = 'Error: %s (%s)' % (e.code, city_name)
logger.error(errorStr)
print errorStr
finally:
return 0
if __name__ == '__main__':
#import pdb
#pdb.set_trace()
print 'Begin read \'city_name.txt\' file'
f = open('city_name.txt', 'r')
i_total = 0
i_success = 0
logdir = 'logs'
if not os.path.exists(logdir):
os.makedirs(logdir)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler = logging.handlers.TimedRotatingFileHandler(
'%s/logging' % logdir, 'H', 24, 0)
handler.suffix = '%Y%m%d%H%M%S.log'
handler.extMatch = re.compile(r'^\d{4}\d{2}\d{2}\d{2}\d{2}\d{2}')
handler.setFormatter(formatter)
logger = logging.getLogger('attacker')
logger.addHandler(handler)
for line in f.readlines():
line = line.strip()
line = line.decode('unicode_escape')
if not len(line) or line.startswith('#'):
continue
i_success += fetch_weather(line)
i_total += 1
print 'Finished (Total: %s, Success: %s)' % (i_total, i_success)
f.close() |
995,653 | 63aecb1297120a78f0b5ebde5642f4e74ffa055a | import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/../..'))
import unittest
from tests.cases.google_test_case import GoogleTestCaseSimple
class TestSuite1(unittest.TestCase):
def test_main(self):
# suite of TestCases
self.suite = unittest.TestSuite()
self.suite.addTests([
unittest.defaultTestLoader.loadTestsFromTestCase(GoogleTestCaseSimple),
])
unittest.TextTestRunner()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
995,654 | 7a92a6b22c31be1a14f6f2da5143e757fb0c8ebe | from mock_api.response_rules import response_rules_provider
class ResponseResolver(object):
rules_provider = response_rules_provider
def __init__(self, api_endpoint, response_rule_provider=None):
self.api_endpoint = api_endpoint
if response_rule_provider:
self.rules_provider = response_rule_provider
def resolve(self, request):
for response_rule in self.api_endpoint.response_rules.all():
matcher = self.rules_provider.get_matcher(response_rule.rule, response_rule.param_name,
response_rule.param_value)
if matcher.match(request):
return response_rule.response
return self.api_endpoint.response
|
995,655 | bde478969734138ba60464de702a1538cba2c946 | # class Test:
# def __init__(self):
# print("This is in __init__")
# pass
# def __call__(self, func):
# print("This is in __call__")
# def inter(a, b):
# print("This is in inter")
# ret = func(a, b)
# return a + b - ret
# return inter
# @Test()
# def deb(a, b):
# return 1
# ##############
# class Test:
# def __init__(self, func):
# print("This is in __init__")
# self.func = func
# def __call__(self, a, b):
# print("This is in __call__")
# ret = self.func(a, b)
# return a + b - ret
# @Test
# def deb(a, b):
# return 1
# #############
# class Test:
# def __init__(self, par1):
# print("This is in __init__")
# self.par1 = par1
# def __call__(self, func):
# print("This is in __call__")
# if self.par1:
# print("This is par1 = True")
# def inter1(a, b):
# print("This is in inter1")
# ret = func(a, b)
# return a + b - ret
# return inter1
# else:
# print("This is par1 = False")
# def inter2(a, b):
# print("This is in inter2")
# ret = func(a, b)
# return a + b - ret + 1
# return inter2
# @Test(True)
# def deb(a, b):
# return 1
# @Test(False)
# def deb1(a, b):
# return 1
|
995,656 | fbafc641bf18a43595f7177625059eb317272506 | import math
x, y, n_devices = map(int, input().split())
devices = []
for _ in range(n_devices):
devices.append(tuple(map(int, input().split())))
close_devices = sorted(devices, key=lambda dev:math.sqrt((x-dev[0])**2 + (y-dev[1])**2)-dev[2])
dev = close_devices[2]
dist = math.sqrt((x - dev[0]) ** 2 + (y - dev[1]) ** 2) - dev[2]
print(math.floor(dist)) if dist > 0 else print(0)
|
995,657 | c590b59aa142fa4e412c08cb6b25b0a7d94f9b35 | from Queue import Queue
class IterableQueue(Queue):
_sentinel = object()
def __iter__(self):
return self
def close(self):
self.put(self._sentinel)
def next(self):
item = self.get()
if item is self._sentinel:
raise StopIteration
else:
return item
|
995,658 | 118055756466c23917ade71f27e35933122d827b | import math
import random
LEVELS = ['user', 'easy', 'medium', 'hard']
class TicTacToe:
def __init__(self):
self.board = self.make_board()
self.current_winner = None
@staticmethod
def make_board():
return ['_'] * 9
def print_board(self):
print('---------')
for row in [self.board[i * 3: (i + 1) * 3] for i in range(3)]:
print('|', ' '.join(row).replace('_', ' '), '|')
print('---------')
def make_move(self, square, player):
if self.board[square] == '_':
self.board[square] = player
if self.winner(square, player):
self.current_winner = player
return True
return False
def winner(self, square, player):
# Check row
row_ind = square // 3
row = self.board[row_ind * 3:(row_ind + 1) * 3]
if all([symbol == player for symbol in row]):
return True
# Check column
col_ind = square % 3
column = [self.board[col_ind + i * 3] for i in range(3)]
if all([symbol == player for symbol in column]):
return True
# Check diagonals
if square % 2 == 0:
diag_1 = [self.board[i] for i in [0, 4, 8]]
if all([symbol == player for symbol in diag_1]):
return True
diag_2 = [self.board[i] for i in [2, 4, 6]]
if all([symbol == player for symbol in diag_2]):
return True
return False
def empty_squares(self):
return '_' in self.board
def num_empty_squares(self):
return self.board.count('_')
def available_moves(self):
return [i for i, x in enumerate(self.board) if x == '_']
class Player:
def __init__(self, player):
self.player = player
def get_move(self, game):
pass
class Human(Player):
def __init__(self, player):
super().__init__(player)
def get_move(self, game):
valid_square = False
val = None
while not valid_square:
coord = input('Enter the coordinates: ').split()
try:
x, y = int(coord[0]), int(coord[1])
if not (x in (1, 2, 3) and y in (1, 2, 3)):
print('Coordinates should be from 1 to 3')
continue
val = x - 3 * y + 8
if val not in game.available_moves():
print('This cell is occupied! Choose another one!')
continue
valid_square = True
except (ValueError, IndexError):
print('You should enter numbers!')
return val
class EasyAI(Player):
def __init__(self, player):
super().__init__(player)
self.player = player
def get_move(self, game):
print('Making move level "easy"')
square = random.choice(game.available_moves())
return square
class MediumAI(Player):
def __init__(self, player):
super().__init__(player)
self.player = player
def get_move(self, game):
print('Making move level "medium"')
rival = 'X' if self.player == 'O' else 'O'
# if len(game.available_moves()) == 9:
square = random.choice(game.available_moves())
return square
class HardAI(Player):
def __init__(self, player):
super().__init__(player)
def get_move(self, game):
print('Making move level "hard"')
if len(game.available_moves()) == 9:
square = random.choice(game.available_moves())
else:
square = self.minimax(game, self.player)['position']
return square
def minimax(self, state, player):
"""
:type state: TicTacToe
"""
max_player = self.player
other_player = 'X' if player == 'O' else 'O'
if state.current_winner == other_player:
return {'position': None, 'score': 1 * (state.num_empty_squares() + 1) if other_player == max_player else -1 * (state.num_empty_squares() + 1)}
elif not state.empty_squares():
return {'position': None, 'score': 0}
if player == max_player:
best = {'position': None, 'score': -math.inf} # Maximize
else:
best = {'position': None, 'score': math.inf} # Minimize
for possible_move in state.available_moves():
state.make_move(possible_move, player)
sim_score = self.minimax(state, other_player) # Simulate one more move
# Undo move
state.board[possible_move] = '_'
state.current_winner = None
sim_score['position'] = possible_move # Optimal next move
if player == max_player: # X max player
if sim_score['score'] > best['score']:
best = sim_score
else:
if sim_score['score'] < best['score']:
best = sim_score
return best
def play(game, x_player, o_player, print_game=True):
"""
:type o_player: Player
:type x_player: Player
:type game: TicTacToe
"""
if print_game:
game.print_board()
player = 'X'
while game.empty_squares():
if player == 'O':
square = o_player.get_move(game)
else:
square = x_player.get_move(game)
if game.make_move(square, player):
if print_game:
# print(f'{player} makes a move to square {square}')
game.print_board()
# print('')
if game.current_winner:
if print_game:
print(player + ' wins')
return player
player = 'X' if player == 'O' else 'O'
if print_game:
print('Draw')
def get_command():
while True:
command = input('Input command: ').split()
if command[0] == 'exit':
return
elif len(command) == 3 and command[0] == 'start' and command[1] in LEVELS and command[2] in LEVELS:
break
else:
print('Bad parameters!')
x_player = HardAI('X') if command[1] == 'hard' else EasyAI('X') if command[1] == 'easy' else MediumAI('X') if command[1] == 'medium' else Human('X')
o_player = HardAI('O') if command[1] == 'hard' else EasyAI('O') if command[1] == 'easy' else MediumAI('O') if command[1] == 'medium' else Human('O')
t = TicTacToe()
play(t, x_player, o_player)
get_command()
|
995,659 | 0a4e959da401c83fd86ca2302732de272218be69 | a, b = list(map(int, input().split()))
seki = a*b
seki = seki % 2
if seki == 0:
print("Even")
else:
print("Odd") |
995,660 | f18e3baf5757219110b9e64e4bd180921782a7a7 | import numpy as np
import tensorflow as tf
from config import PREDICT_THRESHOLD
IOU_METRIC_THRESHOLDS = list(np.arange(0.5, 1, 0.05))
def _seg_iou(seg1, seg2):
batch_size = seg1.shape[0]
metrics = []
for idx in range(batch_size):
ans1 = seg1[idx] > 0
ans2 = seg2[idx] > 0
intersection = np.logical_and(ans1, ans2)
union = np.logical_or(ans1, ans2)
iou = (np.sum(intersection > 0) + 1e-10 ) / (np.sum(union > 0) + 1e-10)
scores = [iou > threshold for threshold in IOU_METRIC_THRESHOLDS]
metrics.append(np.mean(scores))
return np.mean(metrics)
def iou_metric(y_true, y_pred):
return tf.py_func(_seg_iou, [y_true, y_pred > PREDICT_THRESHOLD], np.float64)
|
995,661 | 2f97593d020e0e931c4072607fac5a384e24c982 | from geopy.geocoders import Nominatim #Used for grabbing geocode info from open street maps
from geopy.exc import GeocoderTimedOut
import time
import datetime
def do_geocode(street, city, state, unit_number=''):
street = street.title().strip()
city = city.title().strip()
state = state.upper().strip()
address = '"' + street + ' ' + city + ', ' + state + '"'
try:
addresses = dict()
time.sleep(0.5 ) # delays for 1 seconds. You can Also Use Float Value.
geolocator = Nominatim(scheme='http', user_agent='user-Bot-parcelminer')
geo = geolocator.geocode(address, addressdetails=True, timeout=50)
print(datetime.datetime.now(), ': ', geo)
addresses['street_number'] = street.split(' ')[0].split(',')[0] if (street != '' and street.isdigit()) else None
addresses['street'] = street
addresses['city'] = city
addresses['state'] = state
addresses['unit_number'] = unit_number
if geo is not None:
for key, value in geo.raw.items():
if key == 'place_id':
addresses['place_id'] = value
elif key == 'osm_id':
addresses['osm_id'] = value
elif key == 'address':
for subKey, subValue in value.items():
if subKey == 'country_code':
addresses['country_code'] = subValue
else:
addresses['country_code'] = ''
if subKey == 'county':
addresses['county'] = subValue
else:
addresses['county'] = ''
if subKey == 'postcode':
addresses['postcode'] = subValue
else:
addresses['postcode'] = ''
addresses['longitude'] = geo.longitude
addresses['latitude'] = geo.latitude
else:
addresses.update({'place_id' : '',
'osm_id' : '',
'country_code' : '',
'county' : '',
'postcode' : '',
'latitude' : '',
'longitude' : ''})
if addresses['unit_number'] != '':
street = street + ', ' + addresses['unit_number']
if addresses['postcode'] != '':
addresses['address'] = street +' '+ city +', '+ state + ' '+ addresses['postcode']
else:
addresses['address'] = street +' '+ city +', ' + state
addresses['slug'] = addresses['address'].replace(' ', '-').replace(',', '')
return addresses
except GeocoderTimedOut:
return do_geocode(street, city, state)
|
995,662 | d42ae44baf1a3415d04522ba209c27dd54944407 | from django.shortcuts import render, redirect
from student.models import StudentCourses, Student
from course.models import Catalog, Prerequisites
from authentication.models import UserType
from registrar.models import Constraints
from django.contrib import messages
from history.models import StudentCourseHistory
import ast
#constarints = {'min-credits':12,'max-credits':30,'max-enroll-inst':3,'max-enroll-reg':10}
def view(request):
total_credits_registered = 0
codelist = StudentCourses.objects.filter(UserId=request.user.id).values_list('UserId', 'courseid')
for item in codelist:
total_credits_registered = total_credits_registered + Catalog.objects.get(id=item[1]).credits
data = StudentCourses.objects.filter(UserId=request.user.id)
d1 = []
codelistid = []
for item in data:
d = []
d.append(item.courseid.code)
d.append(item.courseid.name)
d.append(item.courseid.instructor)
d.append(item.courseid.credits)
d.append(item.courseid.coursetag)
d.append(Prerequisites.objects.filter(cid=item.courseid.id).values_list('prereq', flat=True))
d1.append(d)
codelist = StudentCourses.objects.filter(UserId=request.user.id).values_list('UserId', 'courseid')
for item in codelist:
codelistid.append(int(item[1]))
courses = Catalog.objects.exclude(id__in = codelistid).values_list('id','code','name','instructor','credits','coursetag')
coursedata = []
for i, elem in enumerate(courses):
coursedata.append([elem, Prerequisites.objects.filter(cid=elem[0]).values_list('prereq', flat=True)])
min_credits = Constraints.objects.get().min_credits
if (total_credits_registered<min_credits):
messages.add_message(request,messages.INFO,"Minimum number of credits needed are "+str(min_credits),extra_tags='viewerror')
return render(request, 'StudentView.html', {'user': request.user.first_name, 'data': d1, 'courses':coursedata, 'mode': request.session['mode'], 'nbar': 'home'})
def delete(request):
credits_to_delete = 0
success = 0
total_credits_registered = 0
enroll_limit_status_reg = ""
enroll_limit_status_inst = ""
max_enroll_inst = 0
max_enroll_reg = 0
min_credits = Constraints.objects.get().min_credits
rcodelist = request.POST.getlist('code')
codelist = StudentCourses.objects.filter(UserId=request.user.id).values_list('UserId', 'courseid')
for item in codelist:
total_credits_registered = total_credits_registered + Catalog.objects.get(id=item[1]).credits
for i in rcodelist:
credits_to_delete = credits_to_delete + Catalog.objects.get(code=i).credits
if ((total_credits_registered-credits_to_delete)>=min_credits):
for item in rcodelist:
id1 = Catalog.objects.get(code=item).id
max_enroll_inst = Catalog.objects.get(id=id1).max_enroll_limit
max_enroll_reg = Constraints.objects.get().max_enroll_limit_reg
number_of_course_reg = StudentCourses.objects.filter(courseid_id=id1).count()
myobj = StudentCourses.objects.filter(UserId=request.user.id,courseid_id=id1).values_list('UserId','courseid','enroll_limit_status_inst','enroll_limit_status_reg')
if str(myobj[0][2]) == "C":
#code for confirming next student from waitlist
print "before modifying" , myobj
if (number_of_course_reg > max_enroll_inst):
list_of_waiting_students = StudentCourses.objects.filter(courseid_id=id1,enroll_limit_status_inst="W")[:1].get()
list_of_waiting_students.enroll_limit_status_inst = "C"
list_of_waiting_students.enroll_limit_status_reg = "A"
#print "new" , list_of_waiting_students
list_of_waiting_students.save()
if (number_of_course_reg > max_enroll_reg):
list_of_waiting_students = StudentCourses.objects.filter(courseid_id=id1,enroll_limit_status_inst="W",enroll_limit_status_reg="NA")[:1].get()
list_of_waiting_students.enroll_limit_status_inst = "W"
list_of_waiting_students.enroll_limit_status_reg = "A"
list_of_waiting_students.save()
if str(myobj[0][2]) == "W":
if str(myobj[0][3]) == "A":
if (number_of_course_reg > max_enroll_reg):
list_of_waiting_students = StudentCourses.objects.filter(courseid_id=id1,enroll_limit_status_inst="W",enroll_limit_status_reg="NA")[:1].get()
list_of_waiting_students.enroll_limit_status_inst = "W"
list_of_waiting_students.enroll_limit_status_reg = "A"
list_of_waiting_students.save()
obj = StudentCourses.objects.get(courseid=id1, UserId=request.user.id)
obj.delete()
success = 1
if success :
successmsg = "Courses deleted successfully"
messages.add_message(request,messages.INFO,successmsg,extra_tags='deletesuccess')
else:
errormsg = "Minimum number of credits needed are " + str(min_credits)
messages.add_message(request,messages.INFO,errormsg,extra_tags='deleteerror')
return redirect('/student/')
def add(request):
rcodelist = request.POST.getlist('code')
"""Check Prerequisites are met or not"""
stu_courses = StudentCourseHistory.objects.filter(user=request.user.id).values_list('course', flat=True)
for item in rcodelist:
course_prereq = Prerequisites.objects.filter(cid=item).values_list('prereq', flat=True)
print course_prereq , " :: " , stu_courses
c = 0
for i in course_prereq:
if i in stu_courses:
c += 1
if c == len(course_prereq):
"""Prerequisites satisfied, add the course"""
add_the_course(request, [item])
else:
"""Course not added"""
errormsg = "Course prerequisites not satisfied"
messages.add_message(request,messages.INFO,errormsg,extra_tags='adderror')
return redirect('/student/')
def add_the_course(request, code):
credits_to_add = 0
total_credits_registered = 0
success = 0
enroll_limit_status_reg = ""
enroll_limit_status_inst = ""
max_enroll_reg = 0
max_enroll_inst = 0
rcodelist = code
codelist = StudentCourses.objects.filter(UserId=request.user.id).values_list('UserId', 'courseid')
for item in codelist:
total_credits_registered = total_credits_registered + Catalog.objects.get(id=int(item[1])).credits
for i in rcodelist:
credits_to_add = credits_to_add + Catalog.objects.get(id=int(i)).credits
max_credits = Constraints.objects.get().max_credits
if ((total_credits_registered+credits_to_add)<=max_credits):
for item in rcodelist:
id1 = Catalog.objects.get(id=int(item)).id
max_enroll_inst = Catalog.objects.get(id=int(item)).max_enroll_limit
max_enroll_reg = Constraints.objects.get().max_enroll_limit_reg
number_of_course_reg = StudentCourses.objects.filter(courseid_id=id1).count()
z = StudentCourses.objects.filter(UserId=request.user.id).values_list('UserId','courseid','enroll_limit_status_inst','enroll_limit_status_reg')
if number_of_course_reg >= max_enroll_reg:
enroll_limit_status_reg = "NA"
else:
enroll_limit_status_reg = "A"
if number_of_course_reg >= max_enroll_inst:
enroll_limit_status_inst = "W"
else:
enroll_limit_status_inst = "C"
obj = StudentCourses(UserId_id=int(request.user.id),courseid_id=int(id1),enroll_limit_status_inst=enroll_limit_status_inst,enroll_limit_status_reg=enroll_limit_status_reg)
success = 1
obj.save()
if success :
successmsg = "Courses added successfully"
messages.add_message(request,messages.INFO,successmsg,extra_tags='addsuccess')
else:
errormsg = "Maximum number of credits can be added are " + str(max_credits)
messages.add_message(request,messages.INFO,errormsg,extra_tags='adderror')
def req_instr_prev(request):
userdetails = UserType.objects.get(UserId=request.user.id)
userdetails.Type = "P"
userdetails.save()
request.session['mode'] = 'P'
return redirect('/student/')
def can_req_prev(request):
userdetails = UserType.objects.get(UserId=request.user.id)
userdetails.Type = "S"
userdetails.save()
request.session['mode'] = 'S'
return redirect('/student/')
def edit_profile(request):
if request.method == 'POST':
uid = request.user.id
rn = request.POST['rn']
dept = request.POST['dept']
try:
sd_new = Student(rollno=rn, department=dept, UserId_id=uid)
sd_old = Student.objects.filter(UserId=uid)
sd_old.delete()
sd_new.save()
msg = "Details successfully added!"
messages.add_message(request,messages.INFO,msg,extra_tags='adderror')
except:
errormsg = "Error while adding details"
messages.add_message(request,messages.ERROR,errormsg,extra_tags='adderror')
try:
sd = Student.objects.filter(UserId=request.user.id).values_list('rollno', 'department')
return render(request, 'profile.html', {'uid': request.user.id, 'rn': sd[0][0], 'nm': request.user.first_name, 'dept': sd[0][1]})
except:
return render(request, 'profile.html', {'uid': request.user.id, 'nm': request.user.first_name})
|
995,663 | 323f7018608aa58238bab93ea33bbdada7728159 | class SwordMeta(type):
"""docstring for ClassName"""
def __instancecheck__(cls, instance):
return cls.__subclasscheck__(type(instance))
def __subclasscheck__(cls, sub):
return (hasattr(sub, 'swipe') and callable(sub.swipe) and hasattr(sub, 'sharpen') and callable(sub.sharpen))
class Sword(metaclass = SwordMeta):
def thrust(self):
print("Thrusting....")
class BroadSword:
def swipe(self):
print('Swoosh!')
def sharpen(self):
print('Shink!')
class SamuraiSword:
def swipe(self):
print('Slice!')
def sharpen(self):
print('Shink!')
class Rifle:
def fire(self):
print('Bang!')
if __name__ == '__main__':
print(issubclass(BroadSword, Sword))
print(issubclass(SamuraiSword, Sword))
print(issubclass(Rifle, Sword))
samurai_sword = SamuraiSword()
print(isinstance(samurai_sword, Sword)) # will result in False if __instancecheck__() is not implemented
# Non-transitive Subclass relationship
from collections.abc import Hashable
print(issubclass(object, Hashable))
print(issubclass(list, object))
print(issubclass(list, Hashable)) # no transitive relation as Hashable-->object-->list
# further investigation
print(object.__hash__)
print(list.__hash__) # List class set __hash__ to none
print("-------------------------------")
# further explanation
broad_sword = BroadSword()
print(isinstance(broad_sword, Sword))
print(broad_sword.swipe())
print(broad_sword.thrust()) # will result in error |
995,664 | e86a0c1b25a706ec04447bf940f69533144d5072 | # -*- coding: utf-8 -*-
import os
import urllib2
import unittest
import simplejson as json
import polls
from codecs import open
TEST_WIKIPEDIA_PAGE = 'tests/resources/full.html'
LATEST_TEMP_FILE = "tests/latest_temp.json"
__author__ = 'Simao Mata'
class TestPolls(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.latest_poll = {
"date" : u"7-12 May",
"source" : {
"href" : u"http://www.publico.pt/Pol%C3%ADtica/ps-passa-psd-e-cds-dispara-para-os-134_1494074?all=1",
"name" : u"INTERCAMPUS",
},
"parties" : {
u"Socialist" : u"36.8",
u"Social Democratic" : u"33.9",
u"People's Party" : u"13.4",
u"Left Bloc" : u"6.0",
u"Green-Communist" : u"7.4",
u"Others / undecided" : u"2.4",
}
}
def tearDown(self):
if os.path.exists(LATEST_TEMP_FILE):
os.unlink(LATEST_TEMP_FILE)
def test_latest_main(self):
self.monkey_patch_urlopen("http://localhost/wikipediapolls")
polls.main("http://localhost/wikipediapolls", LATEST_TEMP_FILE, "tests/all_temp.json")
with open(LATEST_TEMP_FILE, 'r') as fd:
latest_poll = json.load(fd)
self.assertDictEqual(self.latest_poll, latest_poll)
def test_get_newest_poll(self):
file_path = TEST_WIKIPEDIA_PAGE
with open(file_path, 'r', 'utf-8') as fd:
poll_stats = polls.get_poll_newest(fd)
self.assertDictEqual(self.latest_poll, poll_stats)
def monkey_patch_urlopen(self, expected_url):
'''
Substitute urllib2.urlopen by a custom function that checks that the url is equal to expected_url
and returns the content of the resources/full.html file
TODO: This method of testing assumes the code will use urlopen, we shouldn't care about what lib is being
used to get the url. We should setup a small stub http server to serve the page
'''
#noinspection PyUnusedLocal
def monkey_url_open(request, *args, **kwargs):
self.assertEquals(expected_url, request.get_full_url())
return open(TEST_WIKIPEDIA_PAGE)
urllib2.urlopen = monkey_url_open
def test_get_newest_from_url(self):
self.monkey_patch_urlopen("http://en.wikipedia.org/wiki/Portuguese_legislative_election,_2011")
poll_stats = polls.get_poll_newest_from_url("http://en.wikipedia.org/wiki/Portuguese_legislative_election,_2011")
self.assertDictEqual(self.latest_poll, poll_stats)
|
995,665 | a19c87c68344794f7434119fd1372e22034a5d6c |
from bs4 import BeautifulSoup
import requests
import re
import random
base_url = 'https://baike.baidu.com'
his=['/item/%E7%BD%91%E7%BB%9C%E7%88%AC%E8%99%AB']
url = base_url + his[-1]
html = requests.get(url).text
soup=BeautifulSoup(html,'lxml')
# print(soup.find('h1').get_text(),' url:',his[-1])
print(html) |
995,666 | ef6340aad9699af5fea2451df95bfbf78c9adfef | # -*- coding: utf-8 -*-
import re
import unittest
from aliyunsdkrds.request.v20140815.DescribeSlowLogRecordsRequest import DescribeSlowLogRecordsRequest
from ali_rds import AliRds
class AliRdsTestCase(unittest.TestCase):
def setUp(self):
#print("init")
pass
def tearDown(self):
#print("teardown")
pass
def test_get_result(self):
#正确的参数
request=DescribeSlowLogRecordsRequest()
request.set_DBInstanceId('rdsx2y9yieqqer90yl2z')
request.set_EndTime('2016-12-25T15:01Z')
request.set_StartTime('2016-12-25T15:00Z')
result=AliRds.get_result(request)
print result
self.assertEquals(True,result.has_key("RequestId"))
def t_test_get_result(self):
#正确的参数
request=DescribeSlowLogRecordsRequest()
request.set_DBInstanceId('rdsx2y9yieqqer90yl2z')
request.set_EndTime('2016-12-25T15:01Z')
request.set_StartTime('2016-12-25T15:00Z')
result=AliRds.get_result(request)
print result
self.assertEquals(True,result.has_key("RequestId"))
#for (k,v) in result.items():
# print k
request=DescribeSlowLogRecordsRequest()
request.set_DBInstanceId('rdsx2y9yieqqer90yl2z')
request.set_EndTime('2016-12-25T15:01Z')
#错误的时间标识
request.set_StartTime(1)
result=AliRds.get_result(request)
self.assertEquals(u'InvalidStartTime.Malformed',result["Code"])
request=DescribeSlowLogRecordsRequest()
result=AliRds.get_result(request)
self.assertEquals(u'MissingParameter',result["Code"])
def t_test_result_is_ok(self):
result=None
(ok,message)=AliRds.result_is_ok(result)
self.assertEquals(u"ResultIsNone_My_Defined",message["Code"])
result=1
(ok,message)=AliRds.result_is_ok(result)
self.assertEquals(u"ResultIsNotDict_My_Defined",message["Code"])
request=DescribeSlowLogRecordsRequest()
request.set_DBInstanceId('rdsx2y9yieqqer90yl2z')
request.set_EndTime('2016-12-25T15:01Z')
request.set_StartTime(1)
result=AliRds.get_result(request)
(ok,message)=AliRds.result_is_ok(result)
#print message
self.assertEquals(False,ok) |
995,667 | 020116b00caa294c52f0ae0931c46f384daf9d05 | #detecting hands with mediapipe
#using google's pre-made ML algos in mediapipe
import cv2
import mediapipe as mp
img = cv2.imread("hands.jfif",-1)
mpHands = mp.solutions.hands
hands = mpHands.Hands()
drawTools = mp.solutions.drawing_utils
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
results = hands.process(imgRGB)
for handlms in results.multi_hand_landmarks:
drawTools.draw_landmarks(img,handlms, mpHands.HAND_CONNECTIONS)
print(results.multi_hand_landmarks)
cv2.imshow("hands",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
995,668 | 7653ac525960586985fc2345d8e966c83c722547 | from pynta.apps import PyntaApp
from pynta.apps.decorators import require_method
from pynta.core.paginator import Paginator
from pynta.storage.base import Anydbm
class CRUDApp(PyntaApp):
"""
Provide full CRUD (Create, Read, Update, Delete) interface for any data set
via five actions: `_create`, `_list`, `_detail`, `_update`, `_delete`.
"""
urls = (
(r'^$', 'self', {'_action': 'list'}, ''),
(r'^(?P<_action>create)$', 'self', {}, ''),
(r'^(?P<slug>\w+)$', 'self', {'_action': 'detail'}, ''),
(r'^(?P<slug>\w+)/(?P<_action>(update|delete))$', 'self', {}, ''),
)
object_name = 'object'
storage = Anydbm
list_page_size = 20
def create_object(self, object_data):
object_id = self.storage.get_free_key(self.object_name)
self.storage.put(self.object_name, object_id, object_data)
return self.storage.get(self.object_name, object_id)
def get_dataset(self):
return self.storage.get_dataset(self.object_name)
def get_object(self, object_id):
return self.storage.get(self.object_name, object_id)
def update_object(self, object_id, object_data):
self.storage.put(self.object_name, object_id, object_data)
return self.storage.get(self.object_name, object_id)
def delete_object(self, object_id):
self.storage.delete(self.object_name, object_id)
@require_method('POST')
def do_create(self):
obj = self.create_object(self.request.POST)
self.context.update({self.object_name: obj})
return self.context
def do_list(self):
dataset = self.get_dataset()
paginator = Paginator(dataset, self.list_page_size)
page = paginator.get_page(self.request.GET.get('page', 1))
self.context.update({'%s_list' % self.object_name: page})
return self.context
def do_detail(self, slug):
obj = self.get_object(slug)
self.context.update({self.object_name: obj})
return self.context
@require_method('POST')
def do_update(self, slug):
obj = self.update_object(slug, self.request.POST)
self.context.update({self.object_name: obj})
return self.context
@require_method('POST')
def do_delete(self, slug):
self.delete_object(slug)
return self.context
|
995,669 | 7258e1fc714b6d1fccbaf2ef91bf08b96dd32cc1 | import socket
def client():
s = socket.socket()
HOST="127.0.0.1"
PORT=6666
s.connect((HOST,PORT))
s.send(b'hello word')
msg = s.recv(1024)
print("form server %s " % msg)
if __name__=='__main__':
client()
|
995,670 | 12af87a2756d8f86cb688eaa0617114e3b7b5b92 | from flask import abort
from flask import Blueprint
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import Response
from flask import session
from flask import url_for
from flask_login import current_user
from pingpong.decorators.LoginRequired import loginRequired
from pingpong.forms.CourtesyForm import CourtesyForm
from pingpong.services.CourtesyService import CourtesyService
courtesyController = Blueprint("courtesyController", __name__)
courtesyService = CourtesyService()
courtesyForm = CourtesyForm()
@courtesyController.route("/courtesies", methods = ["GET"])
def index():
if current_user.is_authenticated:
courtesies = courtesyService.select(session["office"]["id"])
else:
courtesies = courtesyService.selectApproved(session["office"]["id"])
return render_template("courtesies/index.html", courtesies = courtesies)
@courtesyController.route("/courtesies.json", methods = ["GET"])
def index_json():
if current_user.is_authenticated:
courtesies = courtesyService.select(session["office"]["id"])
else:
courtesies = courtesyService.selectApproved(session["office"]["id"])
return Response(courtesyService.serialize(courtesies), status = 200, mimetype = "application/json")
@courtesyController.route("/courtesies/new", methods = ["GET"])
def new():
courtesy = courtesyService.new()
return render_template("courtesies/new.html", courtesy = courtesy)
@courtesyController.route("/courtesies", methods = ["POST"])
def create():
hasErrors = courtesyForm.validate(request.form)
if hasErrors:
courtesy = courtesyService.new()
courtesyForm.load(courtesy, request.form)
return render_template("courtesies/new.html", courtesy = courtesy), 400
else:
courtesy = courtesyService.create(session["office"]["id"], request.form)
flash("Courtesy '{}' has been successfully created.".format(courtesy.text), "success")
return redirect(url_for("courtesyController.index"))
@courtesyController.route("/courtesies/<int:id>/edit", methods = ["GET"])
def edit(id):
courtesy = courtesyService.selectById(id)
if courtesy == None:
abort(404)
return render_template("courtesies/edit.html", courtesy = courtesy)
@courtesyController.route("/courtesies/<int:id>", methods = ["POST"])
def update(id):
courtesy = courtesyService.selectById(id)
if courtesy == None:
abort(404)
hasErrors = courtesyForm.validate(request.form)
if hasErrors:
courtesyForm.load(courtesy, request.form)
return render_template("courtesies/edit.html", courtesy = courtesy), 400
else:
courtesy = courtesyService.update(id, request.form)
flash("Courtesy '{}' has been successfully updated.".format(courtesy.text), "success")
return redirect(url_for("courtesyController.index"))
@courtesyController.route("/courtesies/<int:id>/approve", methods = ["POST"])
@loginRequired("courtesyController.index")
def approve(id):
courtesy = courtesyService.selectById(id)
if courtesy == None:
abort(404)
courtesyService.approve(courtesy)
flash("Courtesy '{}' has been approved.".format(courtesy.text), "success")
return redirect(url_for("courtesyController.index"))
@courtesyController.route("/courtesies/<int:id>/reject", methods = ["POST"])
@loginRequired("courtesyController.index")
def reject(id):
courtesy = courtesyService.selectById(id)
if courtesy == None:
abort(404)
courtesyService.reject(courtesy)
flash("Courtesy '{}' has been rejected.".format(courtesy.text), "success")
return redirect(url_for("courtesyController.index"))
@courtesyController.route("/courtesies/<int:id>/delete", methods = ["POST"])
@loginRequired("courtesyController.index")
def delete(id):
courtesy = courtesyService.selectById(id)
if courtesy == None:
abort(404)
courtesy, success = courtesyService.delete(courtesy)
if success:
flash("Courtesy '{}' has been successfully deleted.".format(courtesy.text), "success")
else:
flash("Courtesy '{}' could not be deleted.".format(courtesy.text), "warning")
return redirect(url_for("courtesyController.index"))
|
995,671 | 3243d512dfd11d5667a64ab1473c1dc4d476548f | /usr/share/pyshared/openerp/addons/document/document.py |
995,672 | ed614ecdd646f3b58916ba648c93d27d7f2f3e05 | # Generated by Django 3.1.4 on 2020-12-28 05:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='objetivos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=600, null=True)),
('descripcion', models.CharField(blank=True, max_length=600, null=True)),
('valor', models.IntegerField()),
],
),
migrations.CreateModel(
name='Palancas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=600, null=True)),
('descripcion', models.CharField(blank=True, max_length=600, null=True)),
('valor', models.IntegerField()),
('objetivoId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gestion.objetivos')),
],
),
migrations.CreateModel(
name='Experimentos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(blank=True, max_length=600, null=True)),
('descripcion', models.CharField(blank=True, max_length=600, null=True)),
('valor', models.IntegerField()),
('PalancaId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gestion.palancas')),
],
),
]
|
995,673 | 8e3a3245eb53935b2ac1edf402a6fd0c933c6cf8 | import numpy as np
from tabulate import tabulate
def iteration(alpha, beta, x, eps):
k=1
err = eps + 1
while err > eps and k < 500:
err = np.linalg.norm(np.dot(alpha, x) + beta - x)
x = np.dot(alpha, x) + beta
k += 1
x = np.dot(alpha, x) + beta
return x, k
def zeidel(A, b, eps):
k = 0
x = np.array(np.zeros((b.shape[0])))
err = eps + 1
while err > eps:
x_new = x.copy()
for i in range(A.shape[0]):
x1 = sum(A[i][j] * x_new[j] for j in range(i))
x2 = sum(A[i][j] * x[j] for j in range(i + 1, A.shape[0]))
x_new[i] = (b[i] - x1 - x2)/A[i][i]
err = np.linalg.norm(x_new - x)
k += 1
x = x_new
return x, k
def calculate_alpha_beta(A, b):
alpha = np.array(np.zeros((A.shape[0], A.shape[0])))
beta = np.array(np.zeros(b.shape[0]))
for i in range(A.shape[0]):
for j in range(A.shape[0]):
if i != j:
alpha[i][j] = - A[i][j] / A[i][i]
beta[i] = b[i] / A[i][i]
else:
alpha[i][i] = 0
return alpha, beta
def iter_form(A):
n = A.shape[0]
for i in range(n):
for j in range(n):
A[i][j] = 1 / (i + 1 + j + 1 - 1)
return A
A2 = np.array([[1, 1 / 2,],
[1 / 2, 1 / 3]],dtype=float)
A3 = np.array([[1, 1 / 2, 1 / 3, ],
[1 / 2, 1 / 3, 1 / 4],
[1 / 3, 1 / 4, 1 / 5]],dtype=float)
A4 = np.array([[-500.7, 120.7],
[ 890.3, -550.6]],dtype=float)
x2 = np.random.uniform(0, 100, size=A2.shape[0])
x4 = np.random.uniform(0, 100, size=A4.shape[0])
x3 = np.random.uniform(0, 100, size=A3.shape[0])
p_A2=A2
p_A3=A3
p_A4=A4
A2 = iter_form(A2)
A4 = iter_form(A4)
A3 = iter_form(A3)
b2 = np.dot(A2,x2)
b4 = np.dot(A4,x4)
b3 = np.dot(A3,x3)
alpha2, beta2 = calculate_alpha_beta(A2, b2)
alpha4, beta4 = calculate_alpha_beta(A4, b4)
alpha3, beta3 = calculate_alpha_beta(A3, b3)
print(p_A4)
print(tabulate([[10**(-5),iteration(alpha4, beta4, beta4, 10**(-5))[1],zeidel(A4, b4,10**(-5))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-5))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-5))[0])],
[10**(-8),iteration(alpha4, beta4, beta4, 10**(-8))[1],zeidel(A4, b4,10**(-8))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-8))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-8))[0])],
[10**(-11),iteration(alpha4, beta4, beta4, 10**(-11))[1],zeidel(A4, b4,10**(-11))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-11))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-11))[0])],
[10**(-14),iteration(alpha4, beta4, beta4, 10**(-14))[1],zeidel(A4, b4,10**(-14))[1],np.linalg.norm(x4 - iteration(alpha4, beta4, beta4, 10**(-14))[0]),np.linalg.norm(x4 - zeidel(A4, b4, 10**(-14))[0])]], headers=['Погрешность','#Итерации простого','#Итерации Зейделя','|x-x_pr|','|x-x_zei|'],tablefmt='orgtbl'))
print(p_A3)
print(tabulate([[10**(-5),iteration(alpha3, beta3, beta3, 10**(-5))[1],zeidel(A3, b3,10**(-5))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-5))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-5))[0])],
[10**(-8),iteration(alpha3, beta3, beta3, 10**(-8))[1],zeidel(A3, b3,10**(-8))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-8))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-8))[0])],
[10**(-11),iteration(alpha3, beta3, beta3, 10**(-11))[1],zeidel(A3, b3,10**(-11))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-11))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-11))[0])],
[10**(-14),iteration(alpha3, beta3, beta3, 10**(-14))[1],zeidel(A3, b3,10**(-14))[1],np.linalg.norm(x3 - iteration(alpha3, beta3, beta3, 10**(-14))[0]),np.linalg.norm(x3 - zeidel(A3, b3, 10**(-14))[0])]], headers=['Погрешность','#Итерации простого','#Итерации Зейделя','|x-x_pr|','|x-x_zei|'],tablefmt='orgtbl'))
print(p_A2)
print(tabulate([[10**(-5),iteration(alpha2, beta2, beta2, 10**(-5))[1],zeidel(A2, b2,10**(-5))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-5))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-5))[0])],
[10**(-8),iteration(alpha2, beta2, beta2, 10**(-8))[1],zeidel(A2, b2,10**(-8))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-8))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-8))[0])],
[10**(-11),iteration(alpha2, beta2, beta2, 10**(-11))[1],zeidel(A2, b2,10**(-11))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-11))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-11))[0])],
[10**(-14),iteration(alpha2, beta2, beta2, 10**(-14))[1],zeidel(A2, b2,10**(-14))[1],np.linalg.norm(x2 - iteration(alpha2, beta2, beta2, 10**(-14))[0]),np.linalg.norm(x2 - zeidel(A2, b2, 10**(-14))[0])]], headers=['Погрешность','#Итерации простого','#Итерации Зейделя','|x-x_pr|','|x-x_zei|'],tablefmt='orgtbl'))
|
995,674 | bb874ec373c77789bbda7c76ed5c9f237084d0a7 |
class Protocol:
def __init__(self):
pass
def decodeMessage(self,*args,**kwargs):
pass
def encodeMessage(self,*args,**kwargs):
pass |
995,675 | 7e09be1aa3185300cdcace9302d1910b17e370d4 | print “https://dataplatform.cloud.ibm.com/analytics/notebooks/v2/b98b218f-4492-4a29-842a-6eddc479001b/view?access_token=a68970d0e2205f734085612c6998864010927d9f79f6a8867b8d6aad5a266b10"
|
995,676 | fbeb50f795133a4fcc20f51215a3614a79ef1736 | # -*- coding: utf-8 -*-
from django.core.serializers import serialize
from django.db import models
from django.conf import settings
import json
from django.core.serializers.json import DjangoJSONEncoder
# Create your models here.
def upload_file(instance,filename):
return "persons/{user}/{filename}".format(user=instance.UserName,filename=filename)
class PersonQuerySet(models.QuerySet):
def serialize(self):
list_values=list(self.values('UserName','PersonId','PersonName','Person_Image','Person_sex','Person_BDate'))
print (list_values)
return json.dumps(list_values,sort_keys=True,indent=1,cls=DjangoJSONEncoder)
class PersonManager(models.Manager):
def get_queryset(self):
return PersonQuerySet(self.model,using=self._db)
class Person(models.Model):
UserName = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,)
PersonId = models.AutoField(primary_key=True)
PersonName = models.CharField("person's first name", max_length=30,null=False)
Person_Image = models.ImageField(upload_to=upload_file,null=True, blank=True)
SEX = (('M','Male'),('F','Female'), ('N','None'), )
Person_sex = models.CharField(max_length=1,choices=SEX,null=False)
Person_BDate = models.DateField(null=False)
Person_CDate = models.DateField(null=False,auto_now_add=True)
objects = PersonManager()
def __str__(self):
return str(self.PersonName) or ""
def serialize(self):
data={
'UserName': self.UserName,
'PersonId': self.PersonId,
'PersonName': self.PersonName,
'Person_Image':self.Person_Image,
'Person_sex': self.Person_sex,
'Person_Bdate': self.Person_BDate
}
data = json.dumps(data,sort_keys=True,indent=1,cls=DjangoJSONEncoder)
return data
@property
def owner(self):
return self.UserName |
995,677 | 03d876ed6018f9b83e2fd065ed871c804dbf65ff | import torch.nn as nn
import torch.nn.functional as F
from layers import GraphConvolution, InnerProductDecoder
import torch
class CONN(nn.Module):
def __init__(self, nfeat, nnode, nattri, nlayer, dropout, drop, hid1=512, hid2=128, act='relu'):
super(CONN, self).__init__()
self.latent_dim = nfeat
self.decoder = InnerProductDecoder(nfeat, dropout)
self.dropout = dropout
self.nlayer = nlayer
self.drop = drop
self.hid1 = hid1
self.hid2 = hid2
if act == 'relu':
self.act = nn.ReLU()
else:
self.act = nn.PReLU()
layer = []
for i in range(self.nlayer):
layer.append(GraphConvolution(nfeat, nfeat))
self.gc1 = nn.ModuleList(layer)
self.num_node = nnode
self.num_attri = nattri
self.embedding_node = torch.nn.Embedding(
num_embeddings=self.num_node, embedding_dim=self.latent_dim)
self.embedding_attri = torch.nn.Embedding(
num_embeddings=self.num_attri, embedding_dim=self.latent_dim)
# prediction layer
n_layer = (self.nlayer + 1) * (self.nlayer + 1)
self.mlp1 = nn.Linear(n_layer * self.latent_dim, self.hid1, bias=False)
self.mlp2 = nn.Linear(self.hid1, self.hid2, bias=False)
self.mlp3 = nn.Linear(self.hid2, 1, bias=True)
self.reset_parameters()
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def reset_parameters(self):
torch.nn.init.normal_(self.embedding_node.weight, std=0.1)
torch.nn.init.normal_(self.embedding_attri.weight, std=0.1)
def forward(self, adj, pos_src, pos_dst, neg_src, neg_dst):
if self.training:
if self.drop:
adj = self.__dropout(adj)
x1 = torch.cat([self.embedding_node.weight, self.embedding_attri.weight], dim=0)
src_emb = []
dst_emb = []
src_neg_emb = []
dst_neg_emb = []
src_emb.append(F.normalize(x1[pos_src], p=2, dim=1))
dst_emb.append(F.normalize(x1[pos_dst], p=2, dim=1))
src_neg_emb.append(F.normalize(x1[neg_src], p=2, dim=1))
dst_neg_emb.append(F.normalize(x1[neg_dst], p=2, dim=1))
for i, layer in enumerate(self.gc1):
x1 = layer(x1, adj)
src_emb.append(F.normalize(x1[pos_src], p=2, dim=1))
dst_emb.append(F.normalize(x1[pos_dst], p=2, dim=1))
src_neg_emb.append(F.normalize(x1[neg_src], p=2, dim=1))
dst_neg_emb.append(F.normalize(x1[neg_dst], p=2, dim=1))
return src_emb, dst_emb, src_neg_emb, dst_neg_emb
def comute_hop_emb(self, src_adj, dst_adj, src_neg_adj, dst_neg_adj):
if self.training:
if self.drop:
src_adj = self.__dropout(src_adj)
dst_adj = self.__dropout(dst_adj)
src_neg_adj = self.__dropout(src_neg_adj)
dst_neg_adj = self.__dropout(dst_neg_adj)
x1 = torch.cat([self.embedding_node.weight, self.embedding_attri.weight], dim=0)
src_emb_2 = self.gc2(x1, src_adj)
dst_emb_2 = self.gc2(x1, dst_adj)
src_neg_emb_2 = self.gc2(x1, src_neg_adj)
dst_neg_emb_2 = self.gc2(x1, dst_neg_adj)
return [src_emb_2], [dst_emb_2], [src_neg_emb_2], [dst_neg_emb_2]
def get_emb(self, node_index, adj):
x1 = torch.cat([self.embedding_node.weight, self.embedding_attri.weight], dim=0)
node_emb = []
node_emb.append(F.normalize(x1[node_index], p=2, dim=1))
for i, layer in enumerate(self.gc1):
x1 = layer(x1, adj)
node_emb.append(F.normalize(x1[node_index], p=2, dim=1))
return node_emb[1]
def bi_cross_layer(self, x_1, x_2):
bi_layer = []
for i in range(len(x_1)):
xi = x_1[i]
for j in range(len(x_2)):
xj = x_2[j]
bi_layer.append(torch.mul(xi, xj))
return bi_layer
def cross_layer(self, src_x, dst_x):
bi_layer = self.bi_cross_layer(src_x, dst_x)
bi_layer = torch.cat(bi_layer, dim=1)
return bi_layer
def compute_logits(self, emb):
emb = self.mlp1(emb)
emb = self.act(emb)
emb = self.mlp2(emb)
emb = self.act(emb)
preds = self.mlp3(emb)
return preds
def pred_logits(self, src_emb, dst_emb, src_neg_emb, dst_neg_emb):
emb_pos = self.cross_layer(src_emb, dst_emb)
emb_neg = self.cross_layer(src_neg_emb, dst_neg_emb)
logits_pos = self.compute_logits(emb_pos)
logits_neg = self.compute_logits(emb_neg)
return logits_pos, logits_neg
def pred_score(self, input_emb):
preds = self.decoder(input_emb)
return torch.sigmoid(preds)
def __dropout(self, graph):
graph = self.__dropout_x(graph)
return graph
def __dropout_x(self, x):
x = x.coalesce()
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + self.dropout
# random_index = random_index.int().bool()
random_index = random_index.int().type(torch.bool)
index = index[random_index]
# values = values[random_index]/self.dropout
values = values[random_index]
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def reg_loss(self):
reg_loss = (1 / 2) * (self.embedding_node.weight.norm(2).pow(2) +
self.embedding_attri.weight.norm(2).pow(2) / float(self.num_node + self.num_attri))
return reg_loss
class connTune(nn.Module):
def __init__(self, nfeat, nnode, nattri, nlayer, dropout, drop, hid1=512, hid2=128, act='relu'):
super(connTune, self).__init__()
self.latent_dim = nfeat
self.decoder = InnerProductDecoder(nfeat, dropout)
self.dropout = dropout
self.nlayer = nlayer
self.drop = drop
self.hid1 = hid1
self.hid2 = hid2
if act == 'relu':
self.act = nn.ReLU()
else:
self.act = nn.PReLU()
layer = []
for i in range(self.nlayer):
layer.append(GraphConvolution(nfeat, nfeat))
self.gc1 = nn.ModuleList(layer)
self.num_node = nnode
self.num_attri = nattri
n_layer = (self.nlayer + 1) * (self.nlayer + 1)
self.mlp1 = nn.Linear(n_layer * self.latent_dim, self.hid1, bias=False)
self.mlp2 = nn.Linear(self.hid1, self.hid2, bias=False)
self.mlp3 = nn.Linear(self.hid2, 1, bias=True)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def reset_parameters(self):
torch.nn.init.normal_(self.embedding_node.weight, std=0.1)
torch.nn.init.normal_(self.embedding_attri.weight, std=0.1)
def forward(self, x1, adj, pos_src, pos_dst, neg_src, neg_dst):
if self.training:
if self.drop:
adj = self.__dropout(adj)
src_emb = []
dst_emb = []
src_neg_emb = []
dst_neg_emb = []
src_emb.append(F.normalize(x1[pos_src], p=2, dim=1))
dst_emb.append(F.normalize(x1[pos_dst], p=2, dim=1))
src_neg_emb.append(F.normalize(x1[neg_src], p=2, dim=1))
dst_neg_emb.append(F.normalize(x1[neg_dst], p=2, dim=1))
for i, layer in enumerate(self.gc1):
x1 = layer(x1, adj)
src_emb.append(F.normalize(x1[pos_src], p=2, dim=1))
dst_emb.append(F.normalize(x1[pos_dst], p=2, dim=1))
src_neg_emb.append(F.normalize(x1[neg_src], p=2, dim=1))
dst_neg_emb.append(F.normalize(x1[neg_dst], p=2, dim=1))
return src_emb, dst_emb, src_neg_emb, dst_neg_emb
def comute_hop_emb(self, src_adj, dst_adj, src_neg_adj, dst_neg_adj):
if self.training:
if self.drop:
src_adj = self.__dropout(src_adj)
dst_adj = self.__dropout(dst_adj)
src_neg_adj = self.__dropout(src_neg_adj)
dst_neg_adj = self.__dropout(dst_neg_adj)
x1 = torch.cat([self.embedding_node.weight, self.embedding_attri.weight], dim=0)
src_emb_2 = self.gc2(x1, src_adj)
dst_emb_2 = self.gc2(x1, dst_adj)
src_neg_emb_2 = self.gc2(x1, src_neg_adj)
dst_neg_emb_2 = self.gc2(x1, dst_neg_adj)
return [src_emb_2], [dst_emb_2], [src_neg_emb_2], [dst_neg_emb_2]
def get_emb(self, x1, node_index, adj):
node_emb = []
node_emb.append(F.normalize(x1[node_index], p=2, dim=1))
for i, layer in enumerate(self.gc1):
x1 = layer(x1, adj)
node_emb.append(F.normalize(x1[node_index], p=2, dim=1))
return node_emb
def get_emb2(self, adj):
xx = torch.cat([self.embedding_node.weight, self.embedding_attri.weight], dim=0)
node_emb = self.gc2(xx, adj)
return node_emb
def bi_cross_layer(self, x_1, x_2):
bi_layer = []
for i in range(len(x_1)):
xi = x_1[i]
for j in range(len(x_2)):
xj = x_2[j]
bi_layer.append(torch.mul(xi, xj))
return bi_layer
def cross_layer(self, src_x, dst_x):
bi_layer = self.bi_cross_layer(src_x, dst_x)
bi_layer = torch.cat(bi_layer, dim=1)
return bi_layer
def compute_logits(self, emb):
emb = self.mlp1(emb)
emb = self.act(emb)
emb = self.mlp2(emb)
emb = self.act(emb)
preds = self.mlp3(emb)
return preds
def pred_logits(self, src_emb, dst_emb, src_neg_emb, dst_neg_emb):
emb_pos = self.cross_layer(src_emb, dst_emb)
emb_neg = self.cross_layer(src_neg_emb, dst_neg_emb)
logits_pos = self.compute_logits(emb_pos)
logits_neg = self.compute_logits(emb_neg)
return logits_pos, logits_neg
def pred_score(self, input_emb):
preds = self.decoder(input_emb)
return torch.sigmoid(preds)
def __dropout(self, graph):
graph = self.__dropout_x(graph)
return graph
def __dropout_x(self, x):
x = x.coalesce()
size = x.size()
index = x.indices().t()
values = x.values()
random_index = torch.rand(len(values)) + self.dropout
# random_index = random_index.int().bool()
random_index = random_index.int().type(torch.bool)
index = index[random_index]
# values = values[random_index]/self.dropout
values = values[random_index]
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def reg_loss(self):
reg_loss = (1 / 2) * (self.embedding_node.weight.norm(2).pow(2) +
self.embedding_attri.weight.norm(2).pow(2) / float(self.num_node + self.num_attri))
return reg_loss |
995,678 | 79351e1edf1c5cbff2addef6a7e1cf0a187ce63e | # %%
import math
def calcular(t):
exponente = math.exp(-t)
seno = math.sin(math.pi*t)
total = exponente*seno
print("EXPONENTE: ",exponente)
print("g(t): ", total)
return
calcular(0)
calcular(1)
'''EXPONENTE: 1.0
g(t): 0.0
EXPONENTE: 0.36787944117144233
g(t): 4.505223801027239e-17'''
#%%
|
995,679 | 3a1439ed4d74d6a5ed1b7f84ce99cc8dd0249444 | """
This file builds the figures which demonstrate the training of a Gaussian process.
"""
figwidth = 6 # 2.5
figheight = 6/1.616
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
cm = cmap.inferno
plt.style.use("../thesis-style.mpl")
import numpy as np
import scipy as sp
import theano
import theano.tensor as tt
import theano.tensor.nlinalg
import sys
#sys.path.insert(0, "../../..")
import pymc3 as pm
np.random.seed(200)
n = 150
X = np.sort(40*np.random.rand(n))[:,None]
# define gp, true parameter values
with pm.Model() as model:
l_per_true = 2
cov_per = pm.gp.cov.Cosine(1, l_per_true)
l_drift_true = 4
cov_drift = pm.gp.cov.Matern52(1, l_drift_true)
s2_p_true = 0.3
s2_d_true = 1.5
s2_w_true = 0.3
periodic_cov = s2_p_true * cov_per
drift_cov = s2_d_true * cov_drift
signal_cov = periodic_cov + drift_cov
noise_cov = s2_w_true**2 * tt.eye(n)
K = theano.function([], signal_cov(X, X) + noise_cov)()
y = np.random.multivariate_normal(np.zeros(n), K)
fig = plt.figure(figsize=(figwidth,figheight)); ax = fig.add_subplot(111)
#ax.plot(X, y, '--', color=cm(0.4))
ax.plot(X, y, '.');
ax.set_xlabel("x");
ax.set_ylabel("f(x)");
plt.tight_layout()
fig.savefig("../figures/gp-training-data.pdf")
with pm.Model() as model:
# prior for periodic lengthscale, or frequency
l_per = pm.Uniform('l_per', lower=1e-5, upper=10)
# prior for the drift lengthscale hyperparameter
l_drift = pm.Uniform('l_drift', lower=1e-5, upper=10)
# uninformative prior on the periodic amplitude
log_s2_p = pm.Uniform('log_s2_p', lower=-10, upper=5)
s2_p = pm.Deterministic('s2_p', tt.exp(log_s2_p))
# uninformative prior on the drift amplitude
log_s2_d = pm.Uniform('log_s2_d', lower=-10, upper=5)
s2_d = pm.Deterministic('s2_d', tt.exp(log_s2_d))
# uninformative prior on the white noise variance
log_s2_w = pm.Uniform('log_s2_w', lower=-10, upper=5)
s2_w = pm.Deterministic('s2_w', tt.exp(log_s2_w))
# the periodic "signal" covariance
signal_cov = s2_p * pm.gp.cov.Cosine(1, l_per)
# the "noise" covariance
drift_cov = s2_d * pm.gp.cov.Matern52(1, l_drift)
y_obs = pm.gp.GP('y_obs', cov_func=signal_cov + drift_cov, sigma=s2_w, observed={'X':X, 'Y':y})
with model:
trace = pm.sample(10000, tune=1000, step=pm.Metropolis(), njobs=4)#2000, step=pm.NUTS())#integrator="two-stage"))#, init=None)
pm.traceplot(trace[1000:], varnames=['l_per', 'l_drift', 's2_d', 's2_p', 's2_w'],
lines={"l_per": l_per_true,
"l_drift": l_drift_true,
"s2_d": s2_d_true,
"s2_p": s2_p_true,
"s2_w": s2_w_true});
#plt.show()
Z = np.linspace(0, 40, 100).reshape(-1, 1)
with model:
gp_samples = pm.gp.sample_gp(trace[1000:], y_obs, Z, samples=50, random_seed=42, progressbar=False)
fig, ax = plt.subplots(figsize=(figwidth,figheight))
[ax.plot(Z, x, color=cm(0.3), alpha=0.3) for x in gp_samples]
# overlay the observed data
ax.plot(X, y, '.');
ax.set_xlabel("x");
ax.set_ylabel("f(x)");
ax.set_title("Posterior predictive distribution");
plt.tight_layout()
plt.savefig("gp-posterior.pdf")
|
995,680 | 468761c107bcb50f86fbb1b64c8296be8793d72c | from pylab import plot, show, bar
y = [1,3,6,9,11,21,5,8,3]
plot(y)
show()
|
995,681 | 2145e06dae22b911877e394cf3f8193c042862a1 | """
假设
我需要一个函数,接收2个参数,分别为方向(direc棋盘大小为50*50,左上角为坐标系原点(0,0),我需要一个函数,接收2个参数,分别为方向(direction),步长(step),
该函数控制棋子的运动。棋子运动的新的坐标除了依赖于方向和步长以外,
当然还要根据原来所处的坐标点,
用闭包就可以保持住这个棋子原来所处的坐标。
"""
#
# 系统已经没有内存可以给你使用了 内存泄露
# 系统只分配给你100m内存,101M 内存溢出
def start_game(direction, step):
# 原点坐标
origin_position = [0, 0]
# x最大坐标
x_max_position = [0, 50]
# y最大坐标
y_max_position = [0, 50]
"""
[-1,1]
[-1,1]
方向 x 轴方向 -1表示反向 1表示正方向 y轴方法 [1,0]
"""
def player(direction, step):
new_x = origin_position[0] + direction[0] * step
new_y = origin_position[1] + direction[1] * step
origin_position[0] = new_x
origin_position[1] = new_y
print(origin_position)
return player
if __name__ == '__main__':
# player = start_game()
# player(direction=[1, 0], step=1)
# player(direction=[0, 1], step=2)
start_game(direction=[1, 0], step=1)
start_game(direction=[0, 1], step=2)
|
995,682 | 4fba6d82c6478bf4770cfff4b346032fce183b49 |
def logger(func):
def wrapper(*args, **kwargs):
print('args: ', args, 'kwargs: ', kwargs)
# try:
result = func(*args, **kwargs)
# except Exception as e:
# print(e)
# result = e
with open('text.txt', 'a') as file:
file.write(f'{args} \n')
file.write(f'{kwargs} \n')
file.write(f'result: {result} \n')
return result
return wrapper
@logger
def sum(a, b, c):
return a + b +c
@logger
def divide(c, d):
return c / d
print('ededlerin cemi: ', sum(1, 2, c=4))
print('ededlerin qistemi: ', divide(c=15,d=0))
print('ededlerin qistemi: ', divide(1,2))
|
995,683 | 38b43a2fcdb74621a72f408c00a3ad5e131e99ae | '''Write a python program to find if a given year is a leap year or not'''
def leapYear(y):
if y % 4 == 0:
if y % 100 == 0 and y % 400 != 0:
print("{} is not a leap year".format(y))
else:
print("{} is a leap year".format(y))
else:
print("{} is not a leap year".format(y))
year = int(input("Please enter the year:"))
leapYear(year)
|
995,684 | 3b9b3dfbd2af2b0333840d2c14e4c9c29e9d6895 | import sys
from collections import namedtuple
Point = namedtuple("Point", "x y")
Rectangle = namedtuple("Rectangle", "left top right bottom")
def outSideCheck(a, b):
return b.right < a.left or a.right < b.left
def outBottomUpCheck(a, b):
return a.top < b.bottom or a.bottom > b.top
def overlapRect(a, b):
return not(outSideCheck(a, b) or outBottomUpCheck(a, b))
path = sys.argv[1]
with open(path, 'r') as inFile:
for line in inFile:
coord = list(map(int, line.rstrip().split(',')))
upLeftA = Point(x=coord[0], y=coord[1])
lowRightA = Point(x=coord[2], y=coord[3])
rectA = Rectangle(left=upLeftA.x, top=upLeftA.y,
right=lowRightA.x, bottom=lowRightA.y)
upLeftB = Point(x=coord[4], y=coord[5])
lowRightB = Point(x=coord[6], y=coord[7])
rectB = Rectangle(left=upLeftB.x, top=upLeftB.y,
right=lowRightB.x, bottom=lowRightB.y)
print(overlapRect(rectA, rectB))
|
995,685 | a26aa54ae77622cf3897cb358f6c3cab30f8f59f | import os
from datetime import datetime
import numpy as np
from msl.io import JSONWriter, read
from .. import __version__
from ..log import log
from ..constants import REL_UNC, DELTA_STR, SUFFIX, MU_STR
def num_to_eng_format(num):
for key, val in SUFFIX.items():
renum = num/val
if abs(renum) < 1000:
eng_num = "{} {}".format(round(renum, 3), key)
return eng_num
def filter_mass_set(masses, inputdata):
"""Takes a set of masses and returns a copy with only the masses included in the data which will be
input into the final mass calculation.
Uses Set type key to determine which other keys are present in the masses dictionary.
Parameters
----------
masses : dict
mass set as stored in the Configuration class object (from AdminDetails)
inputdata : numpy structured array
use format np.asarray(<data>, dtype =[('+ weight group', object), ('- weight group', object),
('mass difference (g)', 'float64'), ('balance uncertainty (ug)', 'float64')])
Returns
-------
dict of only the masses which appear in inputdata
"""
weightgroups = []
for i in np.append(inputdata['+ weight group'], inputdata['- weight group']):
if '+' in i:
for j in i.split('+'):
weightgroups.append(j)
else:
weightgroups.append(i)
# create copy of masses with empty mass lists
masses_new = dict()
for key, val in masses.items():
masses_new[key] = val
if masses['Set type'] == 'Standard' or masses['Set type'] == 'Check':
to_append = ['Shape/Mark', 'Nominal (g)', 'Weight ID', 'mass values (g)', 'u_cal', 'uncertainties (' + MU_STR + 'g)', 'u_drift']
elif masses['Set type'] == 'Client':
to_append = ['Weight ID', 'Nominal (g)', 'Shape/Mark', 'Container',
'u_mag (mg)', 'Density (kg/m3)', 'u_density (kg/m3)']
else:
log.error("Mass Set type not recognised: must be 'std' or 'client'")
return None
for key in to_append:
masses_new[key] = []
# add info for included masses only
for i, item in enumerate(masses['Weight ID']):
if item in weightgroups:
for key in to_append:
masses_new[key].append(masses[key][i])
return masses_new
class FinalMassCalc(object):
REL_UNC = REL_UNC
def __init__(self, folder, client, client_masses, check_masses, std_masses, inputdata, nbc=True, corr=None):
"""Initialises the calculation of mass values using matrix least squares methods
Parameters
----------
folder : url
folder in which to save json file with output data; ideally an absolute path
client : str
name of client
client_masses : dict
dict of client weights
Weight IDs are the strings used in the circular weighing scheme
check_masses : dict or None
dict of check weights as for std_masses, or None if no check weights are used
std_masses : dict
keys: 'MASSREF file', 'Sheet name', 'Set name', 'Set type', 'Set identifier', 'Calibrated',
'Shape/Mark', 'Nominal (g)', 'Weight ID', 'mass values (g)', 'u_cal', 'uncertainties (' + MU_STR + 'g)',
'u_drift'
Weight ID values must match those used in the circular weighing scheme
inputdata : numpy structured array
use format np.asarray(<data>, dtype =[('+ weight group', object), ('- weight group', object),
('mass difference (g)', 'float64'), ('balance uncertainty (ug)', 'float64')])
Returns
-------
json file containing structured array of weight IDs, mass values, and uncertainties,
along with a record of the input data and other relevant information
"""
self.folder = folder
self.client = client
self.filesavepath = os.path.join(folder, client + '_finalmasscalc.json')
metadata = {
'Program Version': __version__,
'Timestamp': datetime.now().isoformat(sep=' ', timespec='minutes'),
"Client": client
}
self.finalmasscalc = JSONWriter(metadata=metadata)
self.structure_jsonfile()
self.client_masses = client_masses
self.client_wt_IDs = client_masses["Weight ID"]
self.check_masses = check_masses
self.std_masses = std_masses
self.inputdata = inputdata
self.nbc = nbc
self.corr = corr
self.num_client_masses = None
self.num_check_masses = None
self.num_stds = None
self.num_unknowns = None
self.allmassIDs = None
self.num_obs = None
self.leastsq_meta = {}
self.differences = np.empty(len(inputdata))
self.uncerts = np.empty(len(inputdata))
self.designmatrix = None
self.inputdatares = None
self.b = None
self.psi_bmeas = None
self.std_uncert_b = None
self.summarytable = None
def structure_jsonfile(self):
"Creates relevant groups in JSONWriter object"
mass_sets = self.finalmasscalc.require_group('1: Mass Sets')
mass_sets.require_group('Client')
mass_sets.require_group('Check')
mass_sets.require_group('Standard')
def import_mass_lists(self, ):
# import lists of masses from supplied info
log.info('Beginning mass calculation for the following client masses:\n' + str(self.client_wt_IDs))
# get client Weight IDs for metadata
self.num_client_masses = len(self.client_wt_IDs)
self.finalmasscalc['1: Mass Sets']['Client'].add_metadata(**{
'Number of masses': self.num_client_masses,
'Weight ID': self.client_wt_IDs
})
# get number of check masses, if used, and save as dataset
if not self.check_masses:
self.num_check_masses = 0
check_wt_IDs = []
self.finalmasscalc['1: Mass Sets']['Check'].add_metadata(**{
'Number of masses': self.num_check_masses,
'Set identifier': 'No check set'})
log.info('Checks: None')
else:
check_wt_IDs = self.check_masses['Weight ID']
self.num_check_masses = make_stds_dataset('Checks', self.check_masses, self.finalmasscalc['1: Mass Sets']['Check'])
# get number of standards, and save as dataset
self.num_stds = make_stds_dataset('Standards', self.std_masses, self.finalmasscalc['1: Mass Sets']['Standard'])
self.num_unknowns = self.num_client_masses + self.num_check_masses + self.num_stds
log.info('Number of unknowns = '+str(self.num_unknowns))
self.allmassIDs = np.append(np.append(self.client_wt_IDs, check_wt_IDs), self.std_masses['Weight ID'])
# note that stds are grouped last
self.num_obs = len(self.inputdata) + self.num_stds
self.leastsq_meta['Number of observations'] = self.num_obs
self.leastsq_meta['Number of unknowns'] = self.num_unknowns
self.leastsq_meta['Degrees of freedom'] = self.num_obs - self.num_unknowns
def parse_inputdata_to_matrices(self, ):
if self.allmassIDs is None:
self.import_mass_lists()
# Create design matrix and collect relevant data into differences and uncerts arrays
designmatrix = np.zeros((self.num_obs, self.num_unknowns))
rowcounter = 0
log.debug('Input data: \n+ weight group, - weight group, mass difference (g), balance uncertainty (' + MU_STR + 'g)'
'\n' + str(self.inputdata))
for entry in self.inputdata:
log.debug("{} {} {} {}".format(entry[0], entry[1], entry[2], entry[3]))
grp1 = entry[0].split('+')
for m in range(len(grp1)):
try:
log.debug('mass ' + grp1[m] + ' is in position ' + str(np.where(self.allmassIDs == grp1[m])[0][0]))
designmatrix[rowcounter, np.where(self.allmassIDs == grp1[m])] = 1
except IndexError:
log.error("Index error raised at mass {}".format(grp1[m]))
grp2 = entry[1].split('+')
for m in range(len(grp2)):
log.debug('mass ' + grp2[m] + ' is in position ' + str(np.where(self.allmassIDs == grp2[m])[0][0]))
designmatrix[rowcounter, np.where(self.allmassIDs == grp2[m])] = -1
self.differences[rowcounter] = entry[2]
self.uncerts[rowcounter] = entry[3]
rowcounter += 1
for std in self.std_masses['Weight ID']:
designmatrix[rowcounter, np.where(self.allmassIDs == std)] = 1
rowcounter += 1
self.differences = np.append(self.differences, self.std_masses['mass values (g)']) # corresponds to Y, in g
self.uncerts = np.append(self.uncerts, self.std_masses['uncertainties (' + MU_STR + 'g)']) # balance uncertainties in ug
log.debug('differences:\n' + str(self.differences))
log.debug('uncerts:\n' + str(self.uncerts))
self.designmatrix = designmatrix
def check_design_matrix(self,):
if self.designmatrix is None:
self.parse_inputdata_to_matrices()
# double checks that all columns in the design matrix contain at least one non-zero value
error_tally = 0
for i in range(self.num_unknowns):
sum = 0
for r in range(self.num_obs):
sum += self.designmatrix[r, i] ** 2
if not sum:
log.error(f"No comparisons in design matrix for {self.allmassIDs[i]}")
error_tally += 1
if error_tally > 0:
return False
return True
def do_least_squares(self):
if not self.check_design_matrix():
log.error("Error in design matrix. Calculation aborted")
return False
# Calculate least squares solution, following the mathcad example in Tech proc MSLT.M.001.008
x = self.designmatrix
xT = self.designmatrix.T
# Hadamard product: element-wise multiplication
uumeas = np.vstack(self.uncerts) * np.hstack(self.uncerts) # becomes square matrix dim num_obs
rmeas = np.identity(self.num_obs)
if type(self.corr) == np.ndarray: # Add off-diagonal terms for correlations
for mass1 in self.std_masses['Weight ID']:
i = np.where(self.std_masses['Weight ID'] == mass1)
for mass2 in self.std_masses['Weight ID']:
j = np.where(self.std_masses['Weight ID'] == mass2)
rmeas[len(self.inputdata)+i[0], len(self.inputdata)+j[0]] = self.corr[i, j]
log.debug(f'rmeas matrix includes correlations for stds:\n{rmeas[:, len(self.inputdata)-self.num_obs:]}')
psi_y_hadamard = np.zeros((self.num_obs, self.num_obs)) # Hadamard product is element-wise multiplication
for i in range(self.num_obs):
for j in range(self.num_obs):
if not rmeas[i, j] == 0:
psi_y_hadamard[i, j] = uumeas[i, j] * rmeas[i, j]
psi_y_inv = np.linalg.inv(psi_y_hadamard)
psi_bmeas_inv = np.linalg.multi_dot([xT, psi_y_inv, x])
self.psi_bmeas = np.linalg.inv(psi_bmeas_inv)
self.b = np.linalg.multi_dot([self.psi_bmeas, xT, psi_y_inv, self.differences])
log.debug('Mass values before corrections:\n'+str(self.b))
r0 = (self.differences - np.dot(x, self.b))*1e6 # residuals, converted from g to ug
sum_residues_squared = np.dot(r0, r0)
self.leastsq_meta['Sum of residues squared (' + MU_STR + 'g^2)'] = np.round(sum_residues_squared, 6)
log.debug('Residuals:\n'+str(np.round(r0, 4))) # also save as column with input data for checking
inputdata = self.inputdata
inputdatares = np.empty((self.num_obs, 5), dtype=object)
# dtype =[('+ weight group', object), ('- weight group', object), ('mass difference (g)', object),
# ('balance uncertainty (ug)', 'float64'), ('residual (ug)', 'float64')])
inputdatares[0:len(inputdata), 0] = inputdata['+ weight group']
inputdatares[len(inputdata):, 0] = self.std_masses['Weight ID']
inputdatares[0:len(inputdata), 1] = inputdata['- weight group']
inputdatares[:, 2] = self.differences
inputdatares[:, 3] = self.uncerts
inputdatares[:, 4] = np.round(r0, 3)
self.inputdatares = inputdatares
def check_residuals(self):
if self.inputdatares is None:
self.do_least_squares()
# check that the calculated residuals are less than twice the balance uncertainties in ug
flag = []
for entry in self.inputdatares:
if np.absolute(entry[4]) > 2 * entry[3]:
flag.append(str(entry[0]) + ' - ' + str(entry[1]))
log.warn(f"A residual for {entry[0]} - {entry[1]} is too large")
if flag:
self.leastsq_meta['Residuals greater than 2 balance uncerts'] = flag
def cal_rel_unc(self, ):
if self.b is None:
self.do_least_squares()
# Note: the 'psi' variables in this method are variance-covariance matrices
### Uncertainty due to buoyancy ###
psi_buoy = np.zeros((self.num_unknowns, self.num_unknowns))
# uncertainty due to no buoyancy correction
if self.nbc:
cmx1 = np.ones(self.num_client_masses + self.num_check_masses) # from above, stds are added last
cmx1 = np.append(cmx1, np.zeros(self.num_stds)) # 1's for unknowns, 0's for reference stds
reluncert = self.REL_UNC # relative uncertainty in ppm for no buoyancy correction: typ 0.03 or 0.1 (ppm)
unbc = reluncert * self.b * cmx1 # weighing uncertainty in ug as vector of length num_unknowns.
# Note: TP has * 1e-6 for ppm which would give the uncertainty in g
uunbc = np.vstack(unbc) * np.hstack(unbc) # square matrix of dim num_obs
rnbc = np.identity(self.num_unknowns) # TODO: add off-diagonals for any correlations
# psi_nbc_hadamard = np.zeros((self.num_unknowns, self.num_unknowns))
for i in range(self.num_unknowns): # Here the Hadamard product is taking the diagonal of the matrix
for j in range(self.num_unknowns):
if not rnbc[i, j] == 0:
psi_buoy[i, j] = uunbc[i, j] * rnbc[i, j] # psi_nbc_hadamard in TP Mathcad calculation
# TODO: buoyancy correction (not currently implemented)
# The uncertainty in the buoyancy correction to a measured mass difference due to an
# uncertainty uV in the volume of a weight is ρa*uV, where the ambient air density ρa is assumed
# to be 1.2 kg m-3 for the purposes of the uncertainty calculation. TP9, p7, item 4
else:
reluncert = 0
self.leastsq_meta['Relative uncertainty for no buoyancy correction (ppm)'] = reluncert
### Uncertainty due to magnetic effects ###
# magnetic uncertainty
psi_mag = np.zeros((self.num_unknowns, self.num_unknowns))
for i, umag in enumerate(self.client_masses['u_mag (mg)']):
if umag is not None:
psi_mag[i, i] = (umag*1000)**2 # convert u_mag from mg to ug
log.info(f"Uncertainty for {self.client_wt_IDs[i]} includes magnetic uncertainty of {umag} mg")
### Total uncertainty ###
# Add all the squared uncertainty components and square root them to get the final std uncertainty
psi_b = self.psi_bmeas + psi_buoy + psi_mag
self.std_uncert_b = np.sqrt(np.diag(psi_b)) # there should only be diagonal components anyway
# (TODO: check if valid with correlations)
# det_varcovar_bmeas = np.linalg.det(psi_bmeas)
# det_varcovar_nbc = np.linalg.det(psi_nbc_hadamard)
# det_varcovar_b = np.linalg.det(psi_b)
def make_summary_table(self, ):
if self.std_uncert_b is None:
self.cal_rel_unc()
summarytable = np.empty((self.num_unknowns, 8), object)
cov = 2
for i in range(self.num_unknowns):
summarytable[i, 1] = self.allmassIDs[i]
if i < self.num_client_masses:
summarytable[i, 2] = 'Client'
summarytable[i, 7] = ""
elif i >= self.num_client_masses + self.num_check_masses:
summarytable[i, 2] = 'Standard'
delta = self.b[i] - self.std_masses['mass values (g)'][i - self.num_client_masses - self.num_check_masses]
summarytable[i, 7] = '{} g; {} {}'.format(
self.std_masses['mass values (g)'][i - self.num_client_masses - self.num_check_masses],
DELTA_STR,
num_to_eng_format(delta),
)
else:
summarytable[i, 2] = 'Check'
delta = self.b[i] - self.check_masses['mass values (g)'][i - self.num_client_masses]
summarytable[i, 7] = '{} g; {} {}'.format(
self.check_masses['mass values (g)'][i - self.num_client_masses],
DELTA_STR,
num_to_eng_format(delta),
)
summarytable[i, 3] = np.round(self.b[i], 9)
if self.b[i] >= 1:
nom = str(int(round(self.b[i], 0)))
else:
nom = "{0:.1g}".format(self.b[i])
if 'e-' in nom:
nom = 0
summarytable[i, 0] = nom
summarytable[i, 4] = np.round(self.std_uncert_b[i], 3)
summarytable[i, 5] = np.round(cov * self.std_uncert_b[i], 3)
summarytable[i, 6] = cov
log.info('Found least squares solution')
log.debug('Least squares solution:\nWeight ID, Set ID, Mass value (g), Uncertainty (' + MU_STR + 'g), 95% CI\n' + str(
summarytable))
self.summarytable = summarytable
def add_data_to_root(self, ):
if self.summarytable is None:
self.make_summary_table()
leastsq_data = self.finalmasscalc.create_group('2: Matrix Least Squares Analysis', metadata=self.leastsq_meta)
leastsq_data.create_dataset('Input data with least squares residuals', data=self.inputdatares,
metadata={'headers':
['+ weight group', '- weight group', 'mass difference (g)',
'balance uncertainty (' + MU_STR + 'g)', 'residual (' + MU_STR + 'g)']})
leastsq_data.create_dataset('Mass values from least squares solution', data=self.summarytable,
metadata={'headers':
['Nominal (g)', 'Weight ID', 'Set ID',
'Mass value (g)', 'Uncertainty (' + MU_STR + 'g)', '95% CI', 'Cov',
"Reference value (g)",
]})
def save_to_json_file(self, filesavepath=None, folder=None, client=None):
if not filesavepath:
filesavepath = self.filesavepath
if not folder:
folder = self.folder
if not client:
client = self.client
# make a backup of any previous version, then save root object to json file
make_backup(folder, client, filesavepath, )
self.finalmasscalc.save(filesavepath, mode='w')
log.info('Mass calculation saved to {!r}'.format(filesavepath))
def make_backup(folder, client, filesavepath, ):
back_up_folder = os.path.join(folder, "backups")
if os.path.isfile(filesavepath):
existing_root = read(filesavepath)
log.debug(back_up_folder)
if not os.path.exists(back_up_folder):
os.makedirs(back_up_folder)
new_index = len(os.listdir(back_up_folder)) # counts number of files in backup folder
new_file = os.path.join(back_up_folder, client + '_finalmasscalc_backup{}.json'.format(new_index))
existing_root.read_only = False
root = JSONWriter()
root.set_root(existing_root)
root.save(root=existing_root, file=new_file, mode='w', ensure_ascii=False)
log.info('Backup of previous Final Mass Calc saved as {}'.format(new_file))
def make_stds_dataset(set_type, masses_dict, scheme):
num_masses = len(masses_dict['Weight ID'])
masses_dataarray = np.empty(num_masses, dtype=[
('Weight ID', object),
('Nominal (g)', float),
('mass values (g)', float),
('std uncertainties (' + MU_STR + 'g)', float)
])
masses_dataarray['Weight ID'] = masses_dict['Weight ID']
masses_dataarray['Nominal (g)'] = masses_dict['Nominal (g)']
masses_dataarray['mass values (g)'] = masses_dict['mass values (g)']
masses_dataarray['std uncertainties (' + MU_STR + 'g)'] = masses_dict['uncertainties (' + MU_STR + 'g)']
scheme.add_metadata(**{
'Number of masses': num_masses,
'Set identifier': masses_dict['Set identifier'],
'Calibrated': masses_dict['Calibrated'],
'Weight ID': masses_dict['Weight ID'],
})
scheme.create_dataset('mass values', data=masses_dataarray)
log.info(f"{set_type}: {masses_dict['Weight ID']}")
return num_masses
'''Extra bits that aren't used at the moment
var = np.dot(r0.T, r0) / (num_obs - num_unknowns)
log.debug('variance, \u03C3\u00b2, is:'+str(var.item(0)))
stdev = "{0:.5g}".format(np.sqrt(var.item(0)))
log.debug('residual standard deviation, \u03C3, is:'+stdev)
varcovar = np.multiply(var, np.linalg.inv(np.dot(xT, x)))
log.debug('variance-covariance matrix, C ='+str(varcovar))
det_varcovar = np.linalg.det(varcovar)
log.debug('determinant of variance-covariance matrix, det C ='+str(det_varcovar))
''' |
995,686 | 5eccae6e643f27e3e5224e8004205c0b1fc63c52 | from fastapi import FastAPI, Form
from fastapi.testclient import TestClient
app = FastAPI()
@app.post("/form/python-list")
def post_form_param_list(items: list = Form()):
return items
@app.post("/form/python-set")
def post_form_param_set(items: set = Form()):
return items
@app.post("/form/python-tuple")
def post_form_param_tuple(items: tuple = Form()):
return items
client = TestClient(app)
def test_python_list_param_as_form():
response = client.post(
"/form/python-list", data={"items": ["first", "second", "third"]}
)
assert response.status_code == 200, response.text
assert response.json() == ["first", "second", "third"]
def test_python_set_param_as_form():
response = client.post(
"/form/python-set", data={"items": ["first", "second", "third"]}
)
assert response.status_code == 200, response.text
assert set(response.json()) == {"first", "second", "third"}
def test_python_tuple_param_as_form():
response = client.post(
"/form/python-tuple", data={"items": ["first", "second", "third"]}
)
assert response.status_code == 200, response.text
assert response.json() == ["first", "second", "third"]
|
995,687 | 4e0e4e1b2b711581ba826c730a7299a637726383 |
class update:
startOver = False
fctDataPath = None
useCache = False |
995,688 | 86f11623b798c7d0f907610ac9728210e63f422e | #!/usr/bin/python
# -*- coding:utf-8 -*-
#功能:把用户输入的不规范的英文名字,变为首字母大写,其他小写的规范名字
def normalize(name):
return name.capitalize()
L1 = ['adam', 'LISA', 'barT']
L2 = list(map(normalize, L1))
print(L2) |
995,689 | fde31ae3a78158facefe00e237326ffe3cee6bff | # list
My_list = [1,2,3,4,56,67]
my_list2 = list(range(1,7))
print(My_list)
print(my_list2)
my_list3 = list(range(1, 50, 10))
# it will increment each time with diff of 10
print(my_list3)
# operations on list :
# this will return the fifth element
print(my_list2[4])
print(my_list2[-2])
# lets create a slice from the second element to the 4th element
print(my_list2[2:5])
# range of a list
print(len(my_list3))
# max number of list
print(max(My_list))
print(min(My_list))
# add element on to our list
My_list.append(38)
print(My_list)
# reversing our list
My_list.reverse()
print(My_list)
# create another list and add two list together
print(My_list + my_list2 + my_list3)
print(len(my_list3))
|
995,690 | 90d53d2c2cd074080287f7449c8b3e91143fbc5d | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple
from ...types.querylang.queryset.dunderkey import dunder_get
from .. import QuerySetReader, BaseRecursiveDriver
if False:
from ...types.sets import DocumentSet
class SortQL(QuerySetReader, BaseRecursiveDriver):
"""Sorts the incoming of the documents by the value of a given field.
It can also work in reverse mode
Example::
- !ReduceAllDriver
with:
traversal_paths: ['m']
- !SortQL
with:
reverse: true
field: 'score__value'
traversal_paths: ['m']
- !SliceQL
with:
start: 0
end: 50
traversal_paths: ['m']
`SortQL` will ensure that only the documents are sorted by the score value before slicing the first top 50 documents
"""
def __init__(self, field: str, reverse: bool = False, traversal_paths: Tuple[str] = ('r',), *args, **kwargs):
"""
:param field: the value of the field drives the sort of the iterable docs
:param reverse: sort the value from big to small
:param traversal_paths: the traversal paths
:param *args: *args
:param **kwargs: **kwargs
"""
super().__init__(traversal_paths=traversal_paths, *args, **kwargs)
self._reverse = reverse
self._field = field
def _apply_all(self, docs: 'DocumentSet', *args, **kwargs) -> None:
docs.sort(key=lambda x: dunder_get(x, self.field), reverse=self.reverse)
|
995,691 | cdb05864bebbe10ffa563542dd51d889af85b4c1 | # !/usr/bin/env python
"""
CLPSO.py
Description: the implemention of CLPSO
Refrence paper:
Liang, Jing J., et al. "Comprehensive learning particle swarm optimizer for global optimization of multimodal functions."
IEEE transactions on evolutionary computation 10.3 (2006): 281-295.
Member variables:
Name: CLPSO
FERuntime: time for fitness evaluation
FENum: number of fitness evaluation
runtime: time for whole algorithm
optimalX: optimal solution for problem
optimalY: optimal value for problem
convergeCurve: the procedure of convergence
convergeCurveInterval: inverval between two saved points
w(weight): default w_max = 0.9, w_min = 0.4
learningRate: default = 1.4995
refreshingGap: default = 7
Member function:
setParameters(weight, learningRate): setting parameters
optimize(cfp, ap, printLog): the main process of optimization
cfp: config for continue function parameters
ap: config for algorithm parameters
printLog: determine whether to print Log after opitmization
(true default)
Example:
agent = CLPSO()
agent.optimize(cfp, ap, printLog=True) # cfp ap need to config at first
"""
from function import continueFunction as cF
import numpy as np
import time
import sys
import copy
class CLPSO:
def __init__(self):
self.Name = "CLPSO"
self.FERuntime = 0
self.FENum = 0
self.setParameters()
def setParameters(self, weight=[0.9, 0.4], learningRate=1.4995,refreshingGap = 7):
self.w = weight
self.learningRate = learningRate
self.refreshingGap = refreshingGap
def optimize(self, cfp, ap, printLog=True):
runtimeStart = time.clock()
self.mainLoop(cfp, ap, printLog)
self.runtime = time.clock() - runtimeStart
def mainLoop(self, cfp, ap, printLog):
np.random.seed(ap.initialSeed)
popSize = ap.populationSize
Dim = cfp.funcDim
function = getattr(cF, cfp.funcName)
lowerBoundX = np.kron(np.ones((popSize, 1)), cfp.funcLowerBound)
upperBoundX = np.kron(np.ones((popSize, 1)), cfp.funcUpperBound)
lowerInitBoundX = np.kron(np.ones((popSize, 1)), cfp.funcInitLowerBound)
upperInitBoundX = np.kron(np.ones((popSize, 1)), cfp.funcInitUpperBound)
upperBoundV = 0.2 * (upperBoundX - lowerBoundX)
lowerBoundV = -1 * upperBoundV
# initial X position and velocity
X = (upperInitBoundX - lowerInitBoundX) * \
np.random.random_sample((popSize, Dim)) + lowerInitBoundX
V = (upperBoundV - lowerBoundV) * \
np.random.random_sample((popSize, Dim)) + lowerBoundV
start = time.clock()
y = function(X)
self.FERuntime += (time.clock()-start)
self.FENum += popSize
# initialize personal best X and y
personBestX, personBestY = copy.deepcopy(X), copy.deepcopy(y)
# initialize global best X and y
gBestX, gBestY = X[np.argmin(y), :], np.min(y)
self.convergeCurve = [y[0], gBestY]
# initialize learn probability and exemplar learned for each particle
learnX = copy.deepcopy(personBestX)
# initialize weight, refresh gap, learning rate
learnProbability = 0.05 + 0.45 * \
(np.exp(10 * (np.array(range(1, popSize+1)) - 1) /
(popSize - 1)) - 1) / (np.exp(10) - 1)
refreshGaps = np.zeros((popSize, 1))
maxGen, gen = ap.iterationMax, 0
while self.FENum < ap.FEMax:
wk = self.w[0] - (self.w[0] - self.w[1]) * gen / maxGen
for pi in range(popSize):
# allow the particle to learn from the exemplar until the particle
# stops improving for a certain number of generations
if refreshGaps[pi] >= self.refreshingGap:
refreshGaps[pi] = 0
learnFlag = False # flag to learn to at least other particle
for fd in range(Dim):
if np.random.random() < learnProbability[pi]:
aOrb = np.random.permutation(popSize)
if personBestY[aOrb[1]] < personBestY[aOrb[0]]:
learnX[pi, fd] = personBestX[aOrb[1], fd]
else:
learnX[pi, fd] = personBestX[aOrb[0], fd]
learningFlag = True
else:
learnX[pi, fd] = personBestX[pi, fd]
# make sure to learn to at least other particle for one random dimension
if not learnFlag:
fd = np.random.randint(Dim)
aOrb = np.random.permutation(popSize)
if aOrb[0] == pi:
exemplar = aOrb[1]
else:
exemplar = aOrb[0]
learnX[pi, fd] = personBestX[exemplar, fd]
# update and limit V
V[pi, :] = wk * V[pi, :] + self.learningRate * \
np.random.random_sample(
(1, Dim)) * (learnX[pi, :] - X[pi, :])
V[pi, :][V[pi, :] < lowerBoundV[pi, :]
] = lowerBoundV[pi, :][V[pi, :] < lowerBoundV[pi, :]]
V[pi, :][V[pi, :] > upperBoundV[pi, :]
] = upperBoundV[pi, :][V[pi, :] > upperBoundV[pi, :]]
# update X
X[pi, :] = X[pi, :] + V[pi, :]
# update personal and global best X and y
if (X[pi, :] > lowerBoundX[pi, :]).all() & (X[pi, :] < upperBoundX[pi, :]).all():
start = time.clock()
# print(X[pi, :])
y[pi] = function(X[pi, :][np.newaxis, :])
self.FERuntime += (time.clock() - start)
self.FENum += 1
if y[pi] < personBestY[pi]:
personBestX[pi, :] = X[pi, :]
personBestY[pi] = y[pi]
refreshGaps[pi] = 0
if personBestY[pi] < gBestY:
gBestX = personBestX[pi, :]
gBestY = personBestY[pi]
else:
refreshGaps[pi] += 1
if self.FENum % popSize == 0:
self.convergeCurve.append(gBestY)
gen = gen + 1
# print('Gen:{0} BestV: {1} \n'.format(self.FENum, gBestY))
self.optimalX = gBestX
self.optimalY = gBestY
self.convergeCurveIntrval = popSize
if printLog:
# summary
print('$--------Result--------$\n')
print('*Function: {0}\tDimension: {1}\t FEMax: {2}\n'.format(cfp.funcName, cfp.funcDim, self.FENum))
print('Optimal Y : {0} \n'.format(self.optimalY))
|
995,692 | 91a30f195f7f1f9d88f447b050188196f5b507d6 | #! /usr/bin/python
# Filename: Str_cont.py
#
# Story from paultyma.blogspot.jp/2010/11/google-interviewing-story.html
#
# Example: String1: ABCDEFGHLMNOPQRS String1: ABCDEFGHLMNOPQRS
# String2: DCGSRQPOM String2: DCGSRQPOZ
# Output: true Output: false
#
#
# Best complexity: o(m+n) m:len(String1) & n:len(String2) and it is much better than hash-table solution when there are repeated letters...
#
# Notice: you must input the string in upper case!
dst = { 'A' : 2,
'B' : 3,
'C' : 5,
'D' : 7,
'E' : 11,
'F' : 13,
'G' : 17,
'H' : 19,
'I' : 23,
'J' : 29,
'K' : 31,
'L' : 37,
'M' : 41,
'N' : 43,
'O' : 47,
'P' : 53,
'Q' : 59,
'R' : 61,
'S' : 67,
'T' : 71,
'U' : 73,
'V' : 79,
'W' : 83,
'X' : 89,
'Y' : 97,
'Z' : 101}
String1 = 'ABCDEFGHLMNOPQRS'
String2 = 'DCGSRQPOZ'
pro = 1
for i in String1:
pro *= dst[i]
for i in String2:
pro /= dst[i]
if pro % dst[i] != 0:
print 'false'
exit()
print 'true'
|
995,693 | 522153553cb78501acc788a70c839d9abd8987e3 | import pymysql
#mysql connection
conn = pymysql.connect(host='localhost', user='python', password='qwer1234',
db='python_app1',charset='utf8')
try:
with conn.cursor() as c: #conn.cursor(pymysql.cursors.dictcursor) : 디셔너리 형태로 리턴 #
c.execute("select * from users")
#1개 로우
# print(c.fetchone())
#선택 지정
# print(c.fetchmany(3))
#전체 로우
# print(c.fetchall())
#순회
c.execute("select * from users order by id ASC")
rows = c.fetchall()
for row in rows:
print('usage1 > ', row)
#순회2
c.execute("select * from users order by id DESC")
rows = c.fetchall()
for row in rows:
print('usage2 > ', row)
#조건 조회1
param1 = (1,)
c.execute("select * from users where id=%s", param1)
print('param1',c.fetchall())
#조건 조회2
param2 = 1
c.execute("select * from users where id='%d'" %param2)
print('param1',c.fetchall())
#조건 조회3
param3 = 4,5
c.execute("select * from users where id in(%s,%s)", param3)
print('param1',c.fetchall())
#조건 조회3
c.execute("select * from users where id in('%d','%d')" %(4,5))
print('param1',c.fetchall())
finally:
conn.close()
|
995,694 | 3ec6370865d157326eb2459c286496cc28244e0b | import subprocess
import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--files', type=str, help='Comma separated test filenames')
args = parser.parse_args()
test_files = {x for x in os.listdir() if re.search(r'.*_test.py', x)}
if args.files:
arg_files = set(args.files.split(','))
test_files = test_files.intersection(arg_files)
if test_files != arg_files:
print('Invalid Files:', ', '.join(arg_files-test_files))
parser.print_help()
else:
for test in test_files:
subprocess.Popen(f'pytest {test}', subprocess.STARTF_USESTDHANDLES | subprocess.STARTF_USESHOWWINDOW).communicate()
else:
subprocess.Popen(f'pytest', subprocess.STARTF_USESTDHANDLES | subprocess.STARTF_USESHOWWINDOW).communicate()
|
995,695 | 959bcff418c448d478f9b1f22a75ab6e25fde2d5 | import time
import torch
import numpy as np
import pandas as pd
from torch.optim import Adam
from options import *
from Encoder import *
from Decoder import *
from util import *
def inference(opt, encoder, decoder, test_loader):
encoder.eval()
decoder.eval()
result = []
for batch_idx, (utterances, u_lens) in enumerate(test_loader):
utterances = utterances.permute(1, 0, 2)
utterances = utterances.to(opt.device)
u_lens = u_lens.to(opt.device)
outs, out_lens, hidden = encoder(utterances, u_lens)
hidden = (hidden[0].permute(1, 0, 2), hidden[1].permute(1, 0, 2))
hidden = (hidden[0].reshape(hidden[0].size(0), -1), hidden[1].reshape(hidden[1].size(0), -1))
outs = outs.permute(1, 0, 2)
predict_labels = decoder.BeamSearch(outs, lens = out_lens, hidden = hidden)
tmp_res = ''
for i in predict_labels:
tmp_res += letter_list[i]
print(batch_idx, tmp_res)
result.append(tmp_res)
del utterances
del u_lens
del outs
del out_lens
torch.cuda.empty_cache()
return result
# def inference(opt, encoder, decoder, test_loader):
# encoder.eval()
# decoder.eval()
# result = []
# for batch_idx, (utterances, u_lens) in enumerate(test_loader):
# utterances = utterances.permute(1, 0, 2)
# utterances = utterances.to(opt.device)
# u_lens = u_lens.to(opt.device)
# outs, out_lens, hidden = encoder(utterances, u_lens)
# hidden = (hidden[0].permute(1, 0, 2), hidden[1].permute(1, 0, 2))
# hidden = (hidden[0].reshape(hidden[0].size(0), -1), hidden[1].reshape(hidden[1].size(0), -1))
# outs = outs.permute(1, 0, 2)
# predict_labels = decoder.Greedy(outs, lens = out_lens, hidden = hidden)
# predict_labels = predict_labels.permute(0, 2, 1)
# result.append(predict_labels)
# del utterances
# del u_lens
# del outs
# del out_lens
# torch.cuda.empty_cache()
# return result
if __name__ == '__main__':
opt = BaseOptions().parser.parse_args()
speech_test = np.load(opt.dataroot + 'test_new.npy', allow_pickle=True, encoding='bytes')
encoder = Encoder(opt)
decoder = Decoder(opt)
encoder.load_state_dict(torch.load('./' + opt.model_name + '/encoder_latest.pt'))
decoder.load_state_dict(torch.load('./' + opt.model_name + '/decoder_latest.pt'))
encoder.to(opt.device)
decoder.to(opt.device)
criterion = nn.CrossEntropyLoss(reduction = 'none')
criterion.to(opt.device)
test_data = TestDataset(speech_test)
test_loader_args = dict(shuffle=False, batch_size = 1, pin_memory=True, collate_fn = collate_fn_test)
test_loader = Data.DataLoader(test_data, **test_loader_args)
result = inference(opt, encoder, decoder, test_loader)
# tmp_result = transform_index_to_letter(tmp_result)
# result = []
# for utterance in tmp_result:
# for word_idx in range(len(utterance)):
# if utterance[word_idx] == '<':
# break
# result.append(utterance[:word_idx - 1])
dataframe = pd.DataFrame({'Id':[i for i in range(len(result))],'Predicted':result})
dataframe.to_csv("submission.csv", index=False)
|
995,696 | 60579df55b7342f11938e9150d04a57ee8c6474c | from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import sys
import pickle
# split a string into tokens based on a given character
def parse (str1, char) :
l = str1.split(char)
return l
# building inputs x and y
# Reading them in from x.csv and y.csv
xs = []
ys = []
Z = []
size = 0
f = open('x.csv', 'r')
for line in f.readlines():
for num in parse(line, ','):
xs.append(float(num))
f.close()
f = open('y.csv', 'r')
for line in f.readlines():
for num in parse(line, ','):
ys.append(float(num))
f.close()
f = open('z.csv', 'r')
for line in f.readlines():
for num in parse(line, ','):
Z.append(float(num))
f.close()
size = len(xs)
# normalizing the inputs to 0-1 scale
# could use a scikit learn normalize funtion instead but this works for now
for i in range(size):
xs[i] = (xs[i]+1.)/4.0
ys[i] = (ys[i]+3.)/4.0
# paired input array (xs and ys)
XY = []
# used to build out new pairs for single input array
for i in range(size):
for k in range(size):
XY = XY + [[xs[i], ys[k]]]
XY_train, XY_test, Z_train, Z_test = train_test_split(XY, Z, test_size=0.5, random_state=42)
Z_predicted = []
model = MLPRegressor(hidden_layer_sizes=(15,7), solver = 'adam', learning_rate='invscaling', random_state=42, max_iter=500, early_stopping=True)
model.fit(XY_train, Z_train)
filename = 'mlpModel.sav'
pickle.dump(model, open(filename, 'wb'))
for xy in XY_test:
Z_predicted.append(model.predict([xy])[0])
print('r2_score on test data: ')
print(r2_score(Z_test, Z_predicted))
# f = open('output.txt', 'w')
# for i in range(size*size):
# s = str(model.predict([XY[i]]))[1:-1]
# f.write(s + "\n")
# f.close()
# result = []
# for xy in XY:
# result.append(model.predict([xy])[0])
# print('r2_score: ')
# print(r2_score(Z, result))
|
995,697 | b4a93369065aab23a7134539b9b56b6301558ed0 | from hmmlearn.hmm import GaussianHMM
import matplotlib.pyplot as plt
import numpy as np
# Here n_components correspond to number of states in the hidden
# variables.
model_gaussian = GaussianHMM(n_components=3, covariance_type='full')
# Transition probability as specified above
transition_matrix = np.array([[0.2, 0.6, 0.2],
[0.4, 0.3, 0.3],
[0.05, 0.05, 0.9]])
# Setting the transition probability
model_gaussian.transmat_ = transition_matrix
# Initial state probability
initial_state_prob = np.array([0.1, 0.4, 0.5])
# Setting initial state probability
model_gaussian.startprob_ = initial_state_prob
# As we want to have a 2-D gaussian distribution the mean has to
# be in the shape of (n_components, 2)
mean = np.array([[0.0, 0.0],
[0.0, 10.0],
[10.0, 0.0]])
# Setting the mean
model_gaussian.means_ = mean
# As emission probability is a 2-D gaussian distribution, thus
# covariance matrix for each state would be a 2-D matrix, thus
# overall the covariance matrix for all the states would be in the
# form of (n_components, 2, 2)
covariance = 0.5 * np.tile(np.identity(2), (3, 1, 1))
model_gaussian.covars_ = covariance
# model.sample returns both observations as well as hidden states
# the first return argument being the observation and the second
# being the hidden states
Z, X = model_gaussian.sample(100)
# Plotting the observations
plt.plot(Z[:, 0], Z[:, 1], "-o", label="observations",
ms=6, mfc="orange", alpha=0.7)
# Indicate the state numbers
for i, m in enumerate(mean):
plt.text(m[0], m[1], 'Component %i' % (i + 1),
size=17, horizontalalignment='center',
bbox=dict(alpha=.7, facecolor='w'))
plt.legend(loc='best')
plt.show()
|
995,698 | aa4f681c011f798ef93bf77b8db4e5a46758297d | # -*- coding: utf-8 -*-
"""XDCGAN-MINST-RVL-FVL.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1UBOzY5n5KsevH-j3znDhaFUcSDXg66Bp
"""
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Activation, Flatten, Reshape
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D, Convolution2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
import random
import datetime
from tqdm import tqdm_notebook
import pickle
# Dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images.
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
#print(X_train.shape)
z_dim = 100
import cv2
X_train = X_train.reshape(60000, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
X_train=X_train[0:30000]
nch = 20
g_input = Input(shape=[100])
# Generator
adam = Adam(lr=0.0002, beta_1=0.5)
g = Sequential()
g.add(Dense(7*7*112, input_dim=z_dim))
g.add(Reshape((7, 7, 112)))
g.add(BatchNormalization())
g.add(Activation(LeakyReLU(alpha=0.2)))
g.add(Conv2DTranspose(56, 5, strides=2, padding='same'))
g.add(BatchNormalization())
g.add(Activation(LeakyReLU(alpha=0.2)))
g.add(Conv2DTranspose(1, 5, strides=2, padding='same', activation='sigmoid'))
g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
g.summary()
d = Sequential()
d.add(Conv2D(56, 5, strides=2, padding='same', input_shape=(28, 28, 1), activation=LeakyReLU(alpha=0.2)))
d.add(Conv2D(112, 5, strides=2, padding='same'))
g.add(BatchNormalization())
g.add(Activation(LeakyReLU(alpha=0.2)))
d.add(Conv2D(224, 5, strides=2, padding='same'))
g.add(Activation(LeakyReLU(alpha=0.2)))
d.add(Flatten())
d.add(Dense(112, activation=LeakyReLU(alpha=0.2)))
d.add(Dense(1, activation='sigmoid'))
d.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
d.summary()
d.trainable = False
inputs = Input(shape=(z_dim, ))
hidden = g(inputs)
output = d(hidden)
gan = Model(inputs, output)
gan.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
gan.summary()
def plot_loss(losses):
"""
@losses.keys():
0: loss
1: accuracy
"""
d_loss = [v[0] for v in losses["D"]]
g_loss = [v[0] for v in losses["G"]]
plt.figure(figsize=(6.4,4.8))
plt.plot(d_loss,color='red', label="Discriminator loss")
plt.plot(g_loss,color='green', label="Generator loss")
plt.title("GAN : MNIST dataset")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('loss.png')
#plt.show()
def plot_generated(n_ex=20, dim=(2, 10), figsize=(48, 8)):
noise = np.random.normal(0, 1, size=(n_ex, z_dim))
generated_images = g.predict(noise)
generated_images = generated_images.reshape(generated_images.shape[0], 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
#plt.imshow(generated_images[i, :, :], interpolation='nearest', cmap='gray_r')
sss= str(i)
#plt.imsave(sss, generated_images[i, :, :], cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.plot()
plt.show()
# Set up a vector (dict) to store the losses
losses = {"D":[], "G":[]}
samples = []
pic_val=[]
pic_val2=[]
def train(d,epochs=1, plt_frq=1, BATCH_SIZE=128):
autoencoder =''
autoencoder2 =''
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
decoded = Dense(64, activation='relu')(encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
autoencoder = Model(input_img, decoded)
autoencoder2 = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder2.compile(optimizer='adadelta', loss='binary_crossentropy')
#batchCount = int(X_train.shape[0] / BATCH_SIZE)
batchCount=100
print('Epochs:', epochs)
print('Batch size:', BATCH_SIZE)
print('Batches per epoch:', batchCount)
d_v=[]
ttt=[]
for e in tqdm_notebook(range(1, epochs+1)):
a222 = datetime.datetime.now()
if e == 1 or e%plt_frq == 0:
print('-'*15, 'Epoch %d' % e, '-'*15)
for _ in range(batchCount): # tqdm_notebook(range(batchCount), leave=False):
# Create a batch by drawing random index numbers from the training set
image_batch = X_train[np.random.randint(0, X_train.shape[0], size=BATCH_SIZE)]
image_batch = image_batch.reshape(image_batch.shape[0], image_batch.shape[1], image_batch.shape[2], 1)
#print(image_batch.shape)
# Create noise vectors for the generator
noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))
# Generate the images from the noise
generated_images = g.predict(noise)
temp_test = image_batch.reshape(BATCH_SIZE,784)
temp = generated_images.reshape(BATCH_SIZE,784)
history2=autoencoder.fit(temp ,temp,
epochs=1,
batch_size=256,
shuffle=True,verbose=0,
validation_data=(temp_test, temp_test))
history20=autoencoder2.fit(temp_test ,temp_test,
epochs=1,
batch_size=256,
shuffle=True,verbose=0,
validation_data=(temp, temp))
val_loss = history2.history['val_loss']
val_loss2 = history20.history['val_loss']
samples.append(generated_images)
X = np.concatenate((image_batch, generated_images))
# Create labels
y = np.zeros(2*BATCH_SIZE)
y[:BATCH_SIZE] = 0.9 # One-sided label smoothing
# Train discriminator on generated images
d.trainable = True
d_loss = d.train_on_batch(X, y)
d_v.append(d)
tempi=random.randint(0,e-1)
#d=d_v[tempi]
# Train generator
noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))
y2 = np.ones(BATCH_SIZE)
d.trainable = False
g_loss = gan.train_on_batch(noise, y2)
# Only store losses from final batch of epoch
print('RVL : ' + str(val_loss2[len( val_loss)-1]) + '---' + 'FVL : ' + str(val_loss[len( val_loss)-1]) )
b222 = datetime.datetime.now()
c=b222-a222
ttt.append(c.microseconds)
pic_val.append(val_loss[len( val_loss)-1])
pic_val2.append(val_loss2[len( val_loss2)-1])
losses["D"].append(d_loss)
losses["G"].append(g_loss)
# Update the plots
if e == 1 or e%plt_frq == 0:
plot_generated()
print('time= ' + str(np.mean(ttt)))
plot_loss(losses)
return generated_images
b=train(d,epochs=10, plt_frq=20, BATCH_SIZE=128)
dbfile = open('XDC_FVL_ite_234', 'ab')
# source, destination
pickle.dump(pic_val2, dbfile)
dbfile.close()
dbfile = open('XDC_RVL_ite_234', 'ab')
# source, destination
pickle.dump(pic_val, dbfile)
dbfile.close()
for i in range(0,len(losses["G"])):
print(losses["G"][i][0]) |
995,699 | 69b5916108712cd0e56e09501991abbd4792b53b | from typing import Tuple
import os
from PIL import Image
import imagehash
import math
import random
INPUT_DATA_DIR = "d:\\ml\\goat-data\\goat-train-compressed"
OUTPUT_DATA_DIR = "d:\\ml\\goat-data\\goat-train-input"
def to_ratio(image: Image, ratio: Tuple[int, int] = (4, 3)) -> Image:
width = image.size[0]
height = image.size[1]
wanted_ratio = ratio[0] / ratio[1]
new_width = int(height * wanted_ratio)
if new_width == width:
return image
if width > new_width: # cut edges
left_corner = (width - new_width) // 2
right_corner = left_corner + new_width
part = image.crop((left_corner, 0, right_corner, height))
result = Image.new(image.mode, (width, height))
result.paste(part, (left_corner, 0, right_corner, height))
return result
else: # add edges
new_image = Image.new(image.mode, (new_width, height))
left_corner = (new_width - width) // 2
right_corner = left_corner + width
new_image.paste(image, (left_corner, 0, right_corner, height))
return new_image
def rotate(image: Image, angle: int = 0) -> Image:
return image.rotate(angle, expand=False, fillcolor=0)
def puzzle(image: Image, size: Tuple[int, int], rotate_parts: bool = False, random_seed: int = 0):
seed = random_seed
if seed == 0:
seed = hash(imagehash.dhash(image))
width = image.size[0]
height = image.size[1]
square_length = math.gcd(size[0], size[1])
redundant_width = (width - size[0])
start_point_width = redundant_width // 2
start_point_height = (height - size[1]) // 2
parts = list()
for h in range(size[1] // square_length):
h_current = (h * square_length) + start_point_height
for w in range(size[0] // square_length):
w_current = (w * square_length) + start_point_width
square = (w_current, h_current, w_current + square_length, h_current + square_length)
parts.append(image.crop(square))
rnd = random.Random(seed)
rnd.shuffle(parts)
if rotate_parts:
parts = [i.rotate(rnd.sample([0, 90, 180, 270], 1)[0], expand=False, fillcolor=0) for i in parts]
new_image = Image.new(image.mode, (width, height))
h = w = 0
for i in parts:
left_corner = (w * square_length) + start_point_width
left_height = (h * square_length) + start_point_height
square = (left_corner, left_height, left_corner + square_length, left_height + square_length)
new_image.paste(i, square)
w += 1
if (w * square_length) + redundant_width >= width:
w = 0
h += 1
return new_image
if __name__ == '__main__':
actions = [lambda x: x, lambda x: to_ratio(x, (4, 3)), lambda x: rotate(x, 90), lambda x: rotate(x, 180),
lambda x: rotate(x, 270), lambda x: puzzle(x, (450, 270)), lambda x: puzzle(x, (450, 270), True)]
files = os.listdir(INPUT_DATA_DIR)
for i in range(len(files)):
action_idx = i % len(actions)
action = actions[action_idx]
im = Image.open(INPUT_DATA_DIR + "\\" + files[i])
result = action(im)
result.save(OUTPUT_DATA_DIR + "\\" + files[i].replace(".jpg", ".png"))
im.close()
result.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.