index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
2,300 | 5d3f7d74cf1cc2612d599c65393abed11181c981 | team = input("Wymien wszystkich czlonkow swojego zespolu: ").split(",")
for member in team:
print("Hello, " + member)
|
2,301 | 400f9b6fb0ab73a920e6b73373615b2f8d1103bb | #!/usr/bin/env python3
#coding=utf-8
"""
dfsbuild.py
单Git仓库多Dockerfile构建工具,提高了构建效率
快速使用:
chmod +x ./dfsbuild.py
只构建Git最近一次修改的Dockerfile
./dfsbuild.py -a auto -r registry.cn-shanghai.aliyuncs.com/userename
构建所有的Dockerfile
./dfsbuild.py -a all -r registry.cn-shanghai.aliyuncs.com/userename
构建特定的Dockerfile
./dfsbuild.py -a dfs -r registry.cn-shanghai.aliyuncs.com/userename nginx
解决的问题:
通常我们用大量的基础Dockerfile需要维护
很多时候这些大量的Dockerfile会放在同一个Git仓库当中
当Git push时Git server的webhook功能去触发CI(Jenkins等)系统
CI系统会去自动docker build镜像
产生的问题是每次都会docker build全部的Dockerfile文件
构建的过程中虽然会使用缓存,但实际的构建时间还是不能接受的
本工具可以自动处理只构建Git最近一次修改的Dockerfile
从而大大提高了单Git仓库多Dockerfile的docker build构建速度
关键点:
git最近一次修改的Dockerfile
git --no-pager whatchanged --name-only --oneline -1
参看gitLastDockerFiles函数实现
"""
import os
import argparse
import datetime
def walkDockerfiles(path,splitFirt=True):
""" 遍历目录中的所有dockerfile
Arguments:
path {string} -- 目录路径
Keyword Arguments:
splitFirt {bool} -- 去除文件开头的path (default: {True})
Returns:
array -- dockerfile文件列表
"""
files_list = []
if not os.path.exists(path):
return -1
for root, sub_dirs, files in os.walk(path):
for filename in files:
if isDockerfile(filename):
fullFileName = os.path.join(root, filename)
if splitFirt:
fullFileName = fullFileName.replace(path,"")
files_list.append(fullFileName) # 路径和文件名连接构成完整路径
return files_list
def isDockerfile(filename):
dockerfileStr = "Dockerfile"
if dockerfileStr in filename:
return True
return False
def gitLastDockerFiles():
""" git最近一次修改的Dockerfile文件
Returns:
array -- 最近一次修改的Dockerfile
"""
gitlastcmd = "git --no-pager whatchanged --name-only --oneline -1"
os.chdir(os.path.dirname(os.path.realpath(__file__)))
process = os.popen(gitlastcmd) # return file
gitlastOut = process.read()
process.close()
lines = gitlastOut.split('\n')
last_files = []
for line in lines:
line = line.strip('\n')
if isDockerfile(line):
last_files.append(line)
return last_files
def dockerDo(df="", action="build", registry=""):
if df == "" or registry == "":
printMsg("E","输入的参数不完整")
"""tag生成策略
nginx/Dockerfile >> registry/nginx:latest
nginx/alpine/Dockerfile >> registry/nginx:alpine
php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine
目前只支持两级目录
"""
dfpath = df.replace('/Dockerfile','')
tagArr = dfpath.split('/')
tagArrLen = len(tagArr)
if 1 == tagArrLen:
tag = registry + "/" + tagArr[0] + ":latest"
elif 2 <= tagArrLen:
tag = registry + "/" + tagArr[0] + ":" + tagArr[1]
cmd = "docker info"
if action == "build":
cmd = 'docker build -t ' + tag + ' ./' + dfpath
elif action == "push":
cmd = 'docker push ' + tag
os.system(cmd)
def scan_files(directory,prefix=None,postfix=None):
files_list=[]
for root, sub_dirs, files in os.walk(directory):
for special_file in files:
if postfix:
if special_file.endswith(postfix):
files_list.append(os.path.join(root,special_file))
elif prefix:
if special_file.startswith(prefix):
files_list.append(os.path.join(root,special_file))
else:
files_list.append(os.path.join(root,special_file))
return files_list
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'dfs',
nargs='*',
help='Dockerfile文件相对路径支持多个,用空格分割',
metavar='dfs'
)
parser.add_argument(
'-a', '--action',
default='auto',
help="设置build Dockerfile的范围 \
auto(默认)为自动模式取git最后一次修改的Dockerfile \
all全部的Dockerfile \
dfs指定的Dockerfile",
metavar='action',
)
parser.add_argument(
'-r', '--registry',
default='index.docker.io',
help="定义docker仓库地址",
metavar='registry',
)
parser.add_argument(
'-p', '--push',
default=True,
help="build完成是否运行docker push",
metavar='push',
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s 1.0.0',
)
return parser.parse_args()
def printMsg(level="I",msg=""):
print(datetime.datetime.now().isoformat() + " ["+level+"] "+msg)
def main():
parser = _parse_args()
dfs = parser.dfs
registry = parser.registry
push = parser.push
action = parser.action
if action == "auto":
dfs = gitLastDockerFiles()
if len(dfs) < 1:
printMsg("I", "最近1次无Dockerfile修改")
elif action == "all":
dfs = walkDockerfiles("./")
elif action == "dfs":
pass
else:
printMsg("E","-a 错误,输入的参数,未定义")
if len(dfs) > 0:
for df in dfs:
dockerDo(df, 'build', registry)
if True == push:
dockerDo(df, 'push', registry)
else:
printMsg("E", "Dockerfile未找到")
if __name__ == '__main__':
main()
|
2,302 | 3ba9ff00b0d6a2006c714a9818c8b561d884e252 | import boto3
import pprint
import yaml
#initialize empty dictionary to store values
new_dict = {}
count = 0
new_dict2 = {}
# dev = boto3.session.Session(profile_name='shipt')
mybatch = boto3.client('batch')
#load config properties
with open('config.yml') as f:
content = yaml.load(f)
# pprint.pprint(content) #to print config properties in file
#get current job definition
response = mybatch.describe_job_definitions(
jobDefinitions = [
'axiom-staging-abcfinewine:1'
# 'axiom-staging-costco:1'
],
status='ACTIVE'
)
# print(type(response))
for k, v in response.items():
if k == 'jobDefinitions':
# pprint.pprint(v) #to print container properties
# pprint.pprint(v[0]['containerProperties'])
new_dict = v[0]['containerProperties']
#check if config properties match with current job definition properties
# for key in new_dict.keys():
# if key in content.keys():
# count = count + 1
# if content[key] == new_dict[key]:
# new_dict2[key] == content[key]
print(content.items())
# new_dict2 = dict(content.items() & new_dict.items())
print(new_dict2)
# if v == new_dict[k]:
# # print('woooh00!')
# print(content[k])
# print(v)
# print(new_dict[k])
# for k,v in new_dict.items():
# print(v)
# if content != new_dict:
# print('\n\n\n\twooohooo!')
# print(response)
# pp = pprint.PrettyPrinter(indent = 4)
# pp.pprint(response)
|
2,303 | 255cdbce1f9f7709165b1a29362026ad92ba4712 | #day11
n = int(input("Enter a number: "))
c = 0
a,b = 0, 1
list = [a, b]
for i in range(2,n+1):
c = a+b
list.append(c)
a,b = b, c
print(n,"th fibonacci number is ",list[n])
|
2,304 | a6d5552fa0648fcf9484a1498e4132eb80ecfc86 | import sys, warnings
if sys.version_info[0] < 3:
warnings.warn("At least Python 3.0 is required to run this program", RuntimeWarning)
else:
print('Normal continuation')
|
2,305 | 502f405f48df92583757ebc9edb4b15910c1f76a | # Copyright (c) Facebook, Inc. and its affiliates.
from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip
from .backbone import Backbone
from .fpn import FPN
from .resnet import ResNet, ResNetBlockBase, build_resnet_backbone, make_stage
__all__ = [k for k in globals().keys() if not k.startswith("_")]
# TODO can expose more resnet blocks after careful consideration
|
2,306 | d81e8478d60c9ee778e1aeb0dd7b05f675e4ecad | import pymarc
from pymarc import JSONReader, Field, JSONWriter, XMLWriter
import psycopg2
import psycopg2.extras
import time
import logging
import json
#WRITTEN W/PYTHON 3.7.3
print("...starting export");
# constructing file and log name
timestr = time.strftime("%Y%m%d-%H%M%S")
logging.basicConfig(filename=timestr + "-export.log")
#LOCAL DB
DATABASE_HOST = "redacted"
DATABASE_USERNAME = "redacted"
DATABASE_PASSWORD = "redacted"
DATABASE_PORT = 5432
DATABASE_NAME = "redacted"
TENANT = "redacted"
count = 0
folio_db = psycopg2.connect(
user=DATABASE_USERNAME,
password=DATABASE_PASSWORD,
host=DATABASE_HOST,
port=DATABASE_PORT,
database=DATABASE_NAME
)
#init a list of material types
materialTypeLookup = {}
matCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
select_all_mat = '''
select id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type'''.format(TENANT)
matCursor.execute(select_all_mat)
materialTypes = matCursor.fetchall()
for m in materialTypes:
materialTypeLookup[m['id']] = m['name']
#init a list of locations
locLookup = {}
locCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
select_all_loc = '''
select id, jsonb->>'name' as name from {}_mod_inventory_storage.location'''.format(TENANT)
locCursor.execute(select_all_loc)
locations = locCursor.fetchall()
for l in locations:
locLookup[l['id']] = l['name']
#init a list of call number types
callNoTypeLookup = {}
callNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
select_all_call_no_types = '''
select id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type'''.format(TENANT)
callNoTypeCursor.execute(select_all_call_no_types)
callNoTypes = callNoTypeCursor.fetchall()
for c in callNoTypes:
callNoTypeLookup[c['id']] = c['name']
cursor = folio_db.cursor(name='folio',cursor_factory=psycopg2.extras.DictCursor)
#THIS COULD BE MODIFIED TO RETREIVE X NUMBER OF RECORDS PER FILE
cursor.itersize=300000
#from {}_mod_marc_storage.marc_record'''.format(TENANT)
select_ids_sql = '''
select
id,
instance_id
from {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)'''.format(TENANT,"'ACTUAL'")
print("executing query")
cursor.execute(select_ids_sql)
while True:
print("in the while true - fetching...")
rows = cursor.fetchmany(cursor.itersize)
print("fetch is done")
marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
if rows:
save_file = timestr + "." + str(count) + ".json"
writer = open(save_file,'wt')
print("created the file: " + save_file)
count += 1
for row in rows:
try:
rowId = row['id'];
rowInstanceId = row['instance_id'];
if rowInstanceId == None:
logging.error("BAD RECORD: INSTANCE ID WAS NULL" + str(row))
continue
select_record_sql = '''
select id,
content as marc
from {}_mod_source_record_storage.marc_records_lb where
id = '{}' limit 1'''.format(TENANT, rowId)
#print(select_record_sql)
marcRecordCursor.execute(select_record_sql)
marcRow = marcRecordCursor.fetchone()
marcJsonAsString = json.dumps(marcRow['marc'])
marcString = marcJsonAsString.encode('utf-8').strip()
#print(marcJsonAsString);
for record in JSONReader(marcJsonAsString):
#write MARC JSON to output file
#ADD A 998 FOR EACH HOLDING RECORD
if record['6xx'] is not None:
logging.error("BAD RECORD: 6xx" + str(row))
continue
if record['4xx'] is not None:
logging.error("BAD RECORD: 4xx" + str(row))
continue
select_holding_sql = '''
select id, creation_date, callnumbertypeid,
jsonb->>'permanentLocationId' as permanentlocationid,
jsonb->'holdingsStatements' as holdingstatements,
jsonb->>'callNumber' as callNumber from
{}_mod_inventory_storage.holdings_record
where instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowInstanceId)
#print(select_holding_sql)
marcRecordCursor.execute(select_holding_sql)
holdingRows = marcRecordCursor.fetchall()
for holding in holdingRows:
#print(holding['callnumber'])
holdingsStatements = holding['holdingstatements']
rowHoldingsId = holding['id']
newField = Field(tag = '998',
indicators = [' ',' '],
subfields = ['a',holding.get('callnumber',''),
'l',locLookup.get(holding.get('permanentlocationid',''),'')])
for statement in holdingsStatements:
if statement is not None:
newField.add_subfield('s',statement.get('statement','').replace('Extent of ownership:',''));
record.add_field(newField)
#ADD AN 952 FOR EACH ITEM
select_item_sql = '''
select id, materialtypeid,
jsonb->>'effectiveLocationId' as effectivelocationid,
jsonb->>'barcode' as barcode,
jsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,
jsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,
jsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber
from {}_mod_inventory_storage.item where
holdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowHoldingsId)
#print(select_item_sql)
marcRecordCursor.execute(select_item_sql)
itemRows = marcRecordCursor.fetchall()
for item in itemRows:
callNoToUse = item.get('callnumber','na')
#print(callNoToUse)
prefix = item.get('prefix',None)
if (prefix is not None):
callNoToUse = prefix + " " + callNoToUse
record.add_field(
Field(tag = '952',
indicators = [' ',' '],
subfields = ['m',item.get('barcode',''),
'j',callNoTypeLookup.get(item.get('callnotype',''),''),
'd',locLookup.get(item.get('effectivelocationid'),''),
'i',materialTypeLookup.get(item.get('materialtypeid'),''),
'e',callNoToUse]))
if (len(record.leader) < 24):
logging.error("BAD LEADER" + record.leader + " " + str(row))
record.leader = "{:<24}".format(record.leader)
writer.write(record.as_json())
writer.write('\n')
except Exception as e:
print("ERROR PROCESSING ROW:" + str(row))
print(e)
if rowInstanceId == None:
rowInstanceId = "None" #FOR LOGGING
logging.error("UNABLE TO WRITE TO FILE: " + rowInstanceId)
logging.error(e)
continue
writer.close()
else:
print("in the else --> finishing")
break
if (folio_db):
cursor.close()
marcRecordCursor.close()
folio_db.close()
print("complete")
|
2,307 | 8db952ba5bf42443da89f4064caf012036471541 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 11:51:49 2019
@author: Christian Post
"""
# TODO: row index as an attribute of Data?
# make iterrows return a row object to access column names for each row
import csv
import os
import datetime
def euro(number):
return f'{number:.2f} €'.replace('.',',')
def date_s(date):
# accepts datetime, returns formatted string
return str(date.strftime("%d.%m.%Y"))
def convert_to_date(date):
if type(date) == datetime.date:
return date
else:
return date.date()
class Data():
def __init__(self, data=None, columns=[]):
self.data = {}
self.columns = columns # column names
self.shape = (0, 0)
if data:
if columns:
for i in range(len(data[0])):
self.data[self.columns[i]] = []
else:
for i in range(len(data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(data):
for j, col in enumerate(row):
self.data[self.columns[j]].append(col)
self.shape = (len(data), len(data[0]))
print(self.data)
for col in self.columns:
setattr(self, col, self.data[col])
def write_csv(self, filename, decimal=',', sep=';', head=True):
# writes self.data to a give csv file
with open(filename, 'w+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep)
if head:
writer.writerow(self.columns)
for i, row in self.iterrows():
str_row = [str(r).replace('.', decimal) for r in row]
writer.writerow(str_row)
def read_csv(self, filename, head=True, column_names=[],
decimal=',', parse_dates=[], date_parser=None):
# make an array to store the csv data with shape (rows, columns)
if not os.path.isfile(filename):
print(f'Error: "{filename}" does not exist.')
return
file_data = []
try:
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
file_data.append(row)
except csv.Error:
print(f'Error: Could not read "{filename}"')
return
if len(file_data) == 0:
print(f'Error: "{filename}" does not contain any data.')
return
self.shape = (len(file_data), len(file_data[0]))
if column_names and len(column_names) != self.shape[1]:
print('Error: Mismatching length of column names ' +
f'(Got {len(column_names)} instead of {self.shape[1]}).')
return
if head and not column_names:
# set or store column names
self.columns = file_data[0]
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif head and column_names:
# TODO: check if len of column names is compatible
self.columns = list(column_names)
file_data = file_data[1:]
for col in self.columns:
self.data[col] = []
elif not head and column_names:
self.columns = list(column_names)
for col in self.columns:
self.data[col] = []
else:
for i in range(len(file_data[0])):
self.columns.append(str(i))
self.data[str(i)] = []
for i, row in enumerate(file_data):
for j, col in enumerate(row):
# check if data is boolean
if col == 'True':
self.data[self.columns[j]].append(True)
continue
elif col == 'False':
self.data[self.columns[j]].append(False)
continue
# check if data is date
if parse_dates and self.columns[j] in parse_dates:
self.data[self.columns[j]].append(date_parser(col))
continue
# convert numbers to float or int
value = col.replace(decimal, '.')
try:
value = float(value)
if value.is_integer():
self.data[self.columns[j]].append(int(value))
else:
self.data[self.columns[j]].append(value)
except ValueError:
# data is not a number
self.data[self.columns[j]].append(col)
# set attributes of data object based on column names
for col in self.columns:
setattr(self, col, self.data[col])
class Row():
def __init__(self, data, columns):
self.data = data
self.columns = columns
for i, col in enumerate(self.columns):
setattr(self, col, data[i])
def __getitem__(self, key):
return self.data[self.columns.index(key)]
def __iter__(self):
return iter(self.data)
def iterrows(self):
# similar to iterrows
# but yields a row object as well as the index
# TODO: maybe replace iterrows with this
v = list(self.data.values())
if len(v) == 0:
return
i = 0
while i < len(v[0]):
data = []
for col in v:
data.append(col[i])
row = self.Row(data, self.columns)
yield i, row
i += 1
def sort(self, by=None, reverse=False):
'''
sorts the rows
"by" has to be a column name
'''
#temp_data = list(self.iterrows())
temp_data = [list(row) for i, row in self.iterrows()]
#print(temp_data)
if not by or by not in self.columns:
i = 0
else:
i = self.columns.index(by)
temp_data = sorted(temp_data, key=lambda x: x[i], reverse=reverse)
# convert back to self.data structure
for i, row in enumerate(temp_data):
for j, col in enumerate(row):
self.data[self.columns[j]][i] = col
#return temp_data
def to_html(self, filename, format_values={}, rename_columns={},
css=[], column_align={}, caption=None,
format_columns={}):
'''
construct a html table out of this objects's data
filename is a valid *.html or *.htm filename
format_values is a dictionary with column names as keys
and functions as values that take a single value as an argument
and return the formatted (or otherwise processed) value
rename_columns is a dictionary with pairs of
current col name: new col name
css is a list of css elements that are inserted into the
<style> tag
column_align is a dict with column name: align (left, right, center)
caption specifies the table's caption
format_columns is a dictionary with format options for the respective
columns
'''
if len(self.data) == 0:
# return if this has no data
print('HTML building aborted: No data')
return
if filename[-4:] != 'html' and filename[-3:] != 'htm':
print(f'Error: "{filename}" is not a valid html file')
return
strTable = '<html><head><style>'
# css table style
# add classes for alignment
strTable += ('.right {text-align: right;} ' +
'.left {text-align: left;} ' +
'.center {text-align: center;}')
for style in css:
# add css elements to style tag
strTable += style
strTable += '</style></head><body><table>'
if caption:
strTable += f'<caption>{caption}</caption>'
strTable += '<tr>'
for col in self.columns:
# add column names to table header
if col in rename_columns.keys():
col = rename_columns[col]
strTable += f'<th>{col}</th>'
strTable += '</tr>'
for i, row in self.iterrows():
# add rows to table
strRW = '<tr>'
for col in self.columns:
strTD = '<td '
value = row[col]
if col in format_values.keys():
value = format_values[col](value)
if col in format_columns.keys():
strTD += format_columns[col]
if col in column_align.keys():
strTD += f' class=\"{column_align[col]}\">{value}'
else:
strTD += f'>{value}'
strTD += '</td>'
strRW += strTD
strRW += '</tr>'
strTable += strRW
strTable += '</table></body></html>'
with open(filename, 'w') as html_file:
html_file.write(strTable)
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(file_path, 'exported_csv', 'staff.csv')
data = Data()
data.read_csv(filename,
head=True,
column_names = ['A', 'B', 'C', 'D', 'E'],
parse_dates=['date'],
date_parser=lambda x: datetime.datetime.strptime(x, '%d.%m.%Y').date())
table_css = [
'table {border-collapse: collapse;}',
'table, th, td {border: 1px solid black;}',
'th, td {text-align: left; padding: 2px 6px 2px 6px;}'
]
data.to_html('temp/test.html',
format_values={'payment': euro,
'date': date_s},
format_columns={'payment': 'width=400px;'},
rename_columns={'number': 'Number',
'name': 'Name',
'date': 'Date',
'payment': 'Payment'},
css=table_css,
column_align={'payment': 'right'})
#data.write_csv('test.csv')
|
2,308 | c2069113f322c97e953fba6b9d21b90a8b13a066 | from django.apps import AppConfig
class BoletoGerenciaNetConfig(AppConfig):
name = 'boletogerencianet' |
2,309 | e4a66617adbe863459e33f77c32c89e901f66995 |
import numpy as np
class settings:
def __init__(self, xmax, xmin, ymax, ymin, yrange, xrange):
self.xmax = xmax
self.xmin = xmin
self.ymax = ymax
self.ymin = ymin
self.yrange = yrange
self.xrange = xrange
pass
def mapminmax(x, ymin=-1.0, ymax=1.0):
return create(x, ymin, ymax)
def create(x, ymin, ymax):
xrows = x.shape[0]
xmin = x.min(1)
xmax = x.max(1)
xrange = xmax - xmin
yrows = xrows
yrange = ymax - ymin
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if(not all(fix)):
None
else:
gain[fix] = 1
xmin[fix] = ymin
return [mapminmax_apply(x, xrange, xmin, yrange, ymin),
settings(xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin, yrange=yrange, xrange=xrange)]
def mapminmax_apply(x, xrange, xmin, yrange, ymin):
gain = yrange / xrange
fix = np.nonzero(~np.isfinite(xrange) | (xrange == 0))
if(not all(fix)):
None
else:
gain[fix] = 1
xmin[fix] = ymin
cd = np.multiply((np.ones((x.shape[0], x.shape[1]))), xmin.values.reshape(x.shape[0], 1))
a = x - cd
b = np.multiply((np.ones((x.shape[0], x.shape[1]))), gain.values.reshape(x.shape[0], 1))
return np.multiply(a, b) + ymin
class MapMinMaxApplier(object):
def __init__(self, slope, intercept):
self.slope = slope
self.intercept = intercept
def __call__(self, x):
return x * self.slope + self.intercept
def reverse(self, y):
return (y-self.intercept) / self.slope
def mapminmax_rev(x, ymin=-1, ymax=+1):
x = np.asanyarray(x)
xmax = x.max(axis=-1)
xmin = x.min(axis=-1)
if (xmax==xmin).any():
raise ValueError("some rows have no variation")
slope = ((ymax-ymin) / (xmax - xmin))[:,np.newaxis]
intercept = (-xmin*(ymax-ymin)/(xmax-xmin))[:,np.newaxis] + ymin
ps = MapMinMaxApplier(slope, intercept)
return ps(x), ps |
2,310 | e5e516b6a39a6df03f1e5f80fe2d9e3978e856aa | # What is the 10 001st prime number?
primes = [2]
def is_prime(a, primes):
b = a
for x in primes:
d, m = divmod(b, x)
if m == 0:
return False
else:
return True
a = 3
while len(primes) <= 10001:
# There's something faster than just checking all of them, but this
# will do for now.
if is_prime(a, primes):
primes.append(a)
print a
a += 1
print primes[10000]
|
2,311 | 49f1b4c9c6d15b8322b83396c22e1027d241da33 | from tkinter import *
root = Tk()
ent = Entry(root)
ent.pack()
def click():
ent_text = ent.get()
lab = Label(root, text=ent_text)
lab.pack()
btn = Button(root, text="Click Me!", command=click)
btn.pack()
root.mainloop()
|
2,312 | 454fd88af552d7a46cb39167f21d641420973959 | # python2.7
#formats for oracle lists
import pyperclip
text = str(pyperclip.paste()).strip()
lines = text.split('\n')
for i in range(len(lines)):
if (i+1) < len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","") + str('\',')
elif (i+1) == len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","")+ '\''
text = '(' + '\n'.join(lines) + ')'
pyperclip.copy(text)
|
2,313 | bc536440a8982d2d4a1bc5809c0d9bab5ac6553a | import os
import time
import uuid
import subprocess
# Global variables. ADJUST THEM TO YOUR NEEDS
chia_executable = os.path.expanduser('~')+"/chia-blockchain/venv/bin/chia" # directory of chia binary file
numberOfLogicalCores = 16 # number of logical cores that you want to use overall
run_loop_interval = 10 # seconds of delay before this algorithm executes another loop
refresh_logs_interval = 10 # seconds of delay before this algorithm will try to re-read all logs after adding plot
logs_location = os.path.expanduser('~')+"/.chia/mainnet/plotter/" # location of the log files. Remove all corrupted and interrupted log files!
string_contained_in_all_logs = ".txt" # shared part of the name of all the log files (all logfiles must have it!)
phase_one_finished = "Time for phase 1 =" # part of the log file that means 1/2 core should be freed
phase_four_finished = "Time for phase 4 =" # part of the log file that means 2/2 core should be freed
temporary_directory = "/srv/chia/plots/" # plotting final destination
final_directory = "/mnt/chia/plots/" # plotting directory
farmer_public_key = "8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2" # change to your key
pool_public_key = "907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1" # change to your key
# Functions
def fetch_file_content(file_path):
if not os.path.isfile(file_path):
print('File does not exist.')
else:
with open(file_path) as file:
return file.readlines()
def fetch_logs():
item_in_location_list = os.listdir(logs_location)
content_path_list = list(map(lambda log: logs_location + log, item_in_location_list))
text_file_list = list(filter(lambda path: string_contained_in_all_logs in path, content_path_list))
logs_content = list(map(fetch_file_content, text_file_list))
return logs_content
def count_used_cores(logs):
print("===START COUNTING===")
used_cores_counter = 0
for (index, log) in enumerate(logs):
print(f"Starting log #{index}")
print("Potentially it's still in phase one assigning 4 cores")
used_cores_counter += 4
for line in log:
if phase_one_finished in line:
print("Phase one was finished in the log, deallocating two cores")
used_cores_counter -= 2
if phase_four_finished in line:
print("Phase four was finished in the log, deallocating two cores")
used_cores_counter -= 2
print(f"===FINISH COUNTING: {used_cores_counter} USED CORES===")
return used_cores_counter
def use_all_cores():
log_list = fetch_logs()
cores_used = count_used_cores(log_list)
while numberOfLogicalCores > cores_used +1:
print("There are four cores free, adding new plot!")
add_plot()
time.sleep(refresh_logs_interval)
log_list = fetch_logs()
cores_used = count_used_cores(log_list)
def add_plot():
command = f"{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &"
unique_filename = str(uuid.uuid4())
new_log_file_path = f"{logs_location}/{unique_filename}{string_contained_in_all_logs}"
with open(new_log_file_path, "w") as file:
subprocess.run(command, shell=True, stdout=file)
def run_loop():
while True:
use_all_cores()
time.sleep(run_loop_interval)
# Entry point
run_loop()
|
2,314 | 23c75840efd9a8fd68ac22d004bfe3b390fbe612 | from connect_to_elasticsearch import *
# returns the name of all indices in the elasticsearch server
def getAllIndiciesNames():
indicies = set()
for index in connect_to_elasticsearch().indices.get_alias( "*" ):
indicies.add( index )
print( index )
return indicies
|
2,315 | 614d6484678890df2ae0f750a3cad51a2b9bd1c6 | from django.contrib import admin, messages
from django.conf.urls import url
from django.shortcuts import render
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect, HttpResponse
from website_data.models import *
from website_data.forms import *
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class WebsiteDataAdmin(admin.ModelAdmin):
# URLs overwriting to add new admin views (with auth check and without cache)
def get_urls(self):
urls = super(WebsiteDataAdmin, self).get_urls()
my_urls = [
# url(r'^edit-site/(?:(?P<site_id>\d+)/)$', self.admin_site.admin_view(self.edit_site)),
url(r'^create-defaults/$', self.admin_site.admin_view(self.create_defaults)),
]
# return custom URLs with default URLs
return my_urls + urls
"""
def edit_site(self, request, site_id):
""Function to select a site to edit""
WebsiteData_obj = WebsiteData()
Site_obj = Site.objects.get(pk=site_id)
if request.method == 'POST':
form = EditTextSiteForm(request.POST)
if form.is_valid():
# TODO: salvo i valori delle relative chiavi
WebsiteData_obj.set_all_keys_about_site(site_id=site_id, post=request.POST)
# pagina di successo con i dati aggiornati precompilati
messages.add_message(request, messages.SUCCESS, 'Dati salvati con successo.')
return HttpResponseRedirect('/admin/website_data/websitedata/edit-site/' + str(site_id)) # Redirect after POST
else:
form = EditTextSiteForm() # An unbound form
# precompilo la post con eventuali valori presenti
request.POST = WebsiteData_obj.get_all_keys_about_site(site_domain=Site_obj.domain)
# logger.info("chiavi salvate in db per il sito " + str(site_id) + ": " + str(request.POST))
context = {
'form' : form,
'post': request.POST,
'title': "Modifica informazioni sito: " + str(Site_obj.domain),
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
'has_permission': request.user.is_superuser,
'site_url': '/',
}
return render(request, 'admin/custom_view/edit_site.html', context)
"""
def create_defaults(self, request):
"""Function to create default keys and themes"""
ThemeKeys_obj = ThemeKeys()
ThemeKeys_obj.create_default_keys()
WebsitePreferenceKeys_obj = WebsitePreferenceKeys()
WebsitePreferenceKeys_obj.create_default_keys()
context = {
'title': "Creazione chiavi e temi di default",
'opts': self.model._meta,
'app_label': self.model._meta.app_label,
'has_permission': request.user.is_superuser,
'site_url': '/',
}
messages.add_message(request, messages.SUCCESS, 'Valori di default creati con successo.')
return render(request, 'admin/custom_view/create_defaults.html', context)
def get_model_perms(self, request):
"""
https://stackoverflow.com/questions/2431727/django-admin-hide-a-model
Return empty perms dict thus hiding the model from admin index.
Per far funzionare le custom view dell'app website_data ma nascondendo
tutti i modelli, in questo modo gli url funzionano ma nell'admin non si
vede nessun modello da modificare/aggiungere.
"""
return {}
class CustomSiteInstanceInline(admin.StackedInline):
model = CustomSites
class WebsitePreferencesInstanceInline(admin.TabularInline):
model = WebsitePreferences
# Define a new Site admin
class SiteAdmin(admin.ModelAdmin):
list_filter = ('domain', 'name')
inlines = [CustomSiteInstanceInline, WebsitePreferencesInstanceInline]
# TODO: pagine aggiuntive per l'admin (da usare solo per debug o manutenzione)
"""
admin.site.register(Themes)
admin.site.register(ThemeKeys)
admin.site.register(WebsitePreferences)
admin.site.register(WebsitePreferenceKeys)
admin.site.register(CustomSites)
"""
admin.site.unregister(Site)
admin.site.register(Site, SiteAdmin)
admin.site.register(WebsiteData, WebsiteDataAdmin)
|
2,316 | 730fc527f3d2805559e8917e846b0b13f4a9f6ee | from django.apps import AppConfig
class QuadraticEquationsSolverConfig(AppConfig):
name = 'quadratic_equations_solver'
|
2,317 | e6ac742eb74d5d18e4c304a8ea1331e7e16e403d | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
sum = 0
def sumNumbers(self, root):
def dfs(root,sofar):
if root.left is None and root.right is None:
self.sum += int(''.join(map(str,sofar+[root.val])))
return
if root.left is not None:
dfs(root.left,sofar+[root.val])
if root.right is not None:
dfs(root.right,sofar+[root.val])
if root is None:
return 0
dfs(root,[])
return self.sum
|
2,318 | 0f03ff63662b82f813a18cc8ece3d377716ce678 | # -*- coding: utf-8 -*-
"""
@author: longshuicui
@date : 2021/2/4
@function:
32. Longest Valid Parentheses (Hard)
https://leetcode.com/problems/longest-valid-parentheses/
题目描述
在给的字符串里面找到 最大长度的 有效 括号字符串
输入输出示例
Input: s = ")()())"
Output: 4
Explanation: The longest valid parentheses substring is "()()".
题解
使用栈
"""
def longestValidParentheses(s):
stack = []
maxLength = 0
stack.append(-1)
for i in range(len(s)):
if s[i] == "(":
stack.append(i)
else:
stack.pop() # 这里只有小括号, 所以不需要判断,左括号位置出栈即可
if len(stack) == 0:
stack.append(i)
else:
maxLength = max(maxLength, i - stack[-1])
return maxLength
s = ")()"
l = longestValidParentheses(s)
print(l)
|
2,319 | 6f356840944e11f52a280262697d7e33b3cca650 | import cv2 as cv
img = cv.imread('images/gradient.png', 0)
_,th1 = cv.threshold(img, 127,255, cv.THRESH_BINARY)
_,th2 = cv.threshold(img, 127, 255, cv.THRESH_BINARY_INV)
_,th3 = cv.threshold(img, 127, 255, cv.THRESH_TRUNC) #freeze the pixel color after the threshold
_,th4 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO) #less to threshold will be zero
_,th5 = cv.threshold(img, 127, 255, cv.THRESH_TOZERO_INV) #if the value of the pixel is greater than threshold it will be zero
cv.imshow("Threshold Trunc", th3)
cv.imshow("Threshold2", th2)
cv.imshow("Threshold", th1)
cv.imshow("Image",img)
cv.imshow("th4", th4)
cv.imshow("th5", th5)
cv.waitKey(0)
cv.destroyAllWindows() |
2,320 | 38be4e75c2311a1e5a443d39a414058dc4d1879b | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def distribution():
##testing_results = pd.read_csv('https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_testing.csv')
confirmed_results = pd.read_csv('https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_confirmed.csv')
trial = pd.notnull(confirmed_results["age"])
##attempt = pd.isnull(confirmed_results["age"])
return(confirmed_results[trial].drop(columns=['case_id', 'YYYYMMDD','geo_subdivision']))
def distribution_plot():
confirmed_results = pd.read_csv('https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_confirmed.csv')
trial = pd.notnull(confirmed_results["age"])
##attempt = pd.isnull(confirmed_results["age"])
print('Enter the number of bins between 0 and 100')
n_of_bins = input(str())
print('Enter the number of xticks between 0 and 4')
xticks = input(str())
plt.figure(figsize=(15,8)) #Set figure size
plt.title('Distribution of Age of the COVID-19 Positive Cases in South Africa') #Set axis title
plt.xticks(np.arange(confirmed_results[trial]['age'].min(), confirmed_results[trial]['age'].max(), step=4)) # Set label locations.
plots = sns.distplot(confirmed_results[trial]['age'],
bins=int(n_of_bins),
kde=True,
rug=True) #"rug" will give the ticks on the x-axis
print('The highest age of all COVID-19 patients is: ' + str(confirmed_results[trial]['age'].max()))
return(plots)
def other_distributions():
confirmed_results = pd.read_csv('https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_confirmed.csv')
trial = pd.notnull(confirmed_results["age"])
##attempt = pd.isnull(confirmed_results["age"])
plt.figure(figsize=(15,8)) #Set figure size
plt.title('Countplot of the COVID-19 Positive Cases in each South African Province')
sns.countplot(confirmed_results[trial]['province'],
order = confirmed_results[trial]['province'].value_counts().index,
palette='RdBu')
plt.figure(figsize=(15,8)) #Set figure size
plt.title('Gender difference of the COVID-19 in South Africa')
sns.countplot(confirmed_results[trial]['gender'])
print('Number of rows and columns in the dataframe: ' + str(confirmed_results[trial].shape)) #"shape" will give this tupple of rows and columns
print('Number of rows: ' + str(confirmed_results[trial].shape[0])) #you can index a tuple like a list!
confirmed_results[trial][['date', 'country']].groupby('date').count()
confirmed_results[trial][['date', 'country']].groupby('date').count().cumsum().reset_index().rename(columns={'country':'cumulative sum'}) # "cumsum()" will give the cumulative sum
plt.figure(figsize=(25,8)) #Set figure size
plt.title('The Number of patients infected with the COVID-19 in South Africa')
cumulative_cases = confirmed_results[trial][['date', 'country']].groupby('date').count().cumsum().reset_index().rename(columns={'country':'cumulative sum'}) #create cumulative dataframe
ax = sns.lineplot(data=cumulative_cases, x='date', y='cumulative sum',
marker='o',
dashes=False)
for i in cumulative_cases.groupby('date'):
#i[1] is a grouped data frame; looping through each data row in the cumulative dataframe
for x,y,m in i[1][['date','cumulative sum','cumulative sum']].values: # x = x value; y = y_value ; m = marker value
ax.text(x,y,f'{m:.0f}') #ax.text will
return(plt.show())
def overall_data():
confirmed_results = pd.read_csv('https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_confirmed.csv')
trial = pd.notnull(confirmed_results["age"])
attempt = pd.isnull(confirmed_results["age"])
cumulative_cases = confirmed_results[trial][['date', 'country']].groupby('date').count().cumsum().reset_index().rename(columns={'country':'cumulative sum'}) #create cumulative dataframe
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(35,10))
graph1 = sns.distplot(confirmed_results[trial]['age'],
bins=20,
kde=True,
rug=True,
ax=ax[0,0])
ax[0,0].title.set_text('Distribution of Age of the COVID-19 Positive Cases in South Africa')
graph2 = sns.countplot(confirmed_results[trial]['province'],
order = confirmed_results[trial]['province'].value_counts().index,
palette='RdBu',
ax=ax[0,1])
ax[0,1].title.set_text('Countplot of the COVID-19 Positive Cases in each South African Province')
graph3 = sns.countplot(confirmed_results[trial]['gender'], ax=ax[1,0])
ax[1,0].title.set_text('Gender difference of the patients infected with COVID-19 in South Africa')
graph4 = sns.lineplot(data=cumulative_cases, x='date', y='cumulative sum',
marker='o',
dashes=False,
ax=ax[1,1])
for i in cumulative_cases.groupby('date'):
#i[1] is a grouped data frame; looping through each data row in the cumulative dataframe
for x,y,m in i[1][['date','cumulative sum','cumulative sum']].values: # x = x value; y = y_value ; m = marker value
ax[1,1].text(x,y,f'{m:.0f}') #ax.text will
ax[1,1].title.set_text('The Number of patients infected with the COVID-19 in South Africa')
ax[1,1].tick_params(labelrotation=45)
print('Total Number of Cases without Null Values: ' + str(confirmed_results[trial].shape[0]))
print('Total Number of Cases with Null Values: ' + str(confirmed_results[attempt].shape[0]))
print('Total Number of Cases: ' + str(confirmed_results.shape[0]))
return(graph1,graph2,graph3,graph4) |
2,321 | 2251a6064998f25cca41b018a383053d73bd09eb | #!/usr/bin/env python2.7
# Google APIs
from oauth2client import client, crypt
CLIENT_ID = '788221055258-j59svg86sv121jdr7utnhc2rs9tkb9s4.apps.googleusercontent.com'
def fetchIdToken():
url = 'https://www.googleapis.com/oauth2/v3/tokeninfo?id_token='
f = urllib.urlopen(url + urllib.urlencode(CLIENT_ID))
if f.getCode() != 200:
return None
return f.read()
def getIdInfo(token):
try:
idinfo = client.verify_id_token(token, CLIENT_ID)
if idinfo['aud'] not in [CLIENT_ID]:
# raise crypt.AppIdentityError("Unrecognized client.")
return None
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
# raise crypt.AppIdentityError("Wrong issuer.")
return None
except crypt.AppIdentityError:
return None
return idinfo
|
2,322 | c30b0db220bdacd31ab23aa1227ce88affb79daa | from __future__ import absolute_import, division, print_function
import time
from flytekit.sdk.tasks import python_task, dynamic_task, inputs, outputs
from flytekit.sdk.types import Types
from flytekit.sdk.workflow import workflow_class, Input
from six.moves import range
@inputs(value1=Types.Integer)
@outputs(out=Types.Integer)
@python_task(cpu_request="1", cpu_limit="1", memory_request="5G")
def dynamic_sub_task(workflow_parameters, value1, out):
for i in range(11*60):
print("This is load test task. I have been running for {} seconds.".format(i))
time.sleep(1)
output = value1*2
print("Output: {}".format(output))
out.set(output)
@inputs(tasks_count=Types.Integer)
@outputs(out=[Types.Integer])
@dynamic_task(cache_version='1')
def dynamic_task(workflow_parameters, tasks_count, out):
res = []
for i in range(0, tasks_count):
task = dynamic_sub_task(value1=i)
yield task
res.append(task.outputs.out)
# Define how to set the final result of the task
out.set(res)
@workflow_class
class FlyteDJOLoadTestWorkflow(object):
tasks_count = Input(Types.Integer)
dj = dynamic_task(tasks_count=tasks_count)
|
2,323 | 4bb973b598a9c35394a0cd78ed9ba807f3a595d7 | from celery_app import celery_app
@celery_app.task
def demo_celery_run():
return 'result is ok' |
2,324 | d6a73365aa32c74798b6887ff46c0ed2323ed1a6 | import glob
pyfiles = glob.glob('*.py')
modulenames = [f.split('.')[0] for f in pyfiles]
# print(modulenames)
for f in pyfiles:
contents = open(f).read()
for m in modulenames:
v1 = "import " + m
v2 = "from " + m
if v1 or v2 in contents:
contents = contents.replace(v1, "import ."+m)
contents = contents.replace(v2, "from ."+m)
with open('new_'+f, 'w') as outf:
outf.write(contents)
|
2,325 | bf73e2109f11b2214fae060bc343b01091765c2a | from ..IReg import IReg
class RC165(IReg):
def __init__(self):
self._header = ['REG',
'COD_PART',
'VEIC_ID',
'COD_AUT',
'NR_PASSE',
'HORA',
'TEMPER',
'QTD_VOL',
'PESO_BRT',
'PESO_LIQ',
'NOM_MOT',
'CPF',
'UF_ID']
self._hierarchy = "3"
|
2,326 | a47ffd5df49ec627442a491f81a117b3e68ff50b | # Copyright (c) 2019 NVIDIA Corporation
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core.neural_types import *
from nemo.core import DeviceType
import torch
from .datasets import BertPretrainingDataset
class BertPretrainingDataLayer(DataLayerNM):
@staticmethod
def create_ports():
input_ports = {}
output_ports = {
"input_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels":
NeuralType({0: AxisType(BatchTag)}),
}
return input_ports, output_ports
def __init__(self, *, tokenizer, dataset, name, max_seq_length,
sentence_indices_filename=None, mask_probability=0.15,
**kwargs):
DataLayerNM.__init__(self, **kwargs)
self._device = torch.device(
"cuda" if self.placement in [DeviceType.GPU, DeviceType.AllGpu]
else "cpu"
)
self._dataset = BertPretrainingDataset(
tokenizer=tokenizer,
dataset=dataset,
name=name,
sentence_indices_filename=sentence_indices_filename,
max_length=max_seq_length,
mask_probability=mask_probability)
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return self._dataset
@property
def data_iterator(self):
return None
|
2,327 | 7336b8dec95d23cbcebbff2a813bbbd5575ba58f | from collections import namedtuple
from os import getenv
from pathlib import Path
TMP = getenv("TMP", "/tmp")
PYBITES_FAKER_DIR = Path(getenv("PYBITES_FAKER_DIR", TMP))
CACHE_FILENAME = "pybites-fake-data.pkl"
FAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME
BITE_FEED = "https://codechalleng.es/api/bites/"
BLOG_FEED = "https://pybit.es/feeds/all.rss.xml"
Bite = namedtuple("Bite", "number title level")
Article = namedtuple("Article", "author title tags")
|
2,328 | e690587c9b056f8d5a1be6dd062a2aa32e215f50 | import os
import json
import requests
from fin import myBuilder, myParser
import time
def open_config():
if os.path.isfile('fin/config.json') != True:
return ('no config found')
else:
print('config found')
with open('fin/config.json') as conf:
conf = json.load(conf)
return conf
conf = open_config()
logfile = conf.get('game_path')
database_path = conf.get('database_path')
application_id = str(conf.get('application_id'))
url = str(conf.get('url'))
def get_local_date():
try:
with open(logfile) as Log:
LogJSON = json.load(Log)
Log.close()
LocalDateTime = LogJSON['dateTime']
print('LocalDateTime:', LocalDateTime)
return LocalDateTime
except:
print('no logfile found')
def get_remote_date():
try:
r = requests.get(url)
answer = r.json()
if answer is not None:
print('RemoteDate:', answer)
return answer
else:
print('no remote date found')
except:
print('no remote connection found')
def build_exportData(LocalDate):
print('exportData:')
exportData = myBuilder.build_export(LocalDate)
return (exportData)
def post_Result(Result):
try:
res = requests.post(url, json=Result)
if res.ok:
print(res.json())
except:
print('error POST request')
def compare_dates():
RemoteDate = str(get_remote_date())
LocalDate = str(get_local_date())
if LocalDate == RemoteDate:
print('dates match')
else:
print('no match')
print('LocalDate:', LocalDate)
print('RemoteDate:', RemoteDate)
try:
print(myParser.main_update())
Result = build_exportData(LocalDate)
post_Result(Result)
time.sleep(10)
except:
print('error parsing')
def loop():
while True:
compare_dates()
time.sleep(5)
# def main():
loop()
|
2,329 | 044e3479c32357e22ca3165d8601d8bd2a439fcb | from django.forms import ModelForm, ChoiceField, Form, FileField, ModelChoiceField, HiddenInput, ValidationError
from market.models import *
class OrderForm(ModelForm):
"""Order form used in trader view."""
# from http://stackoverflow.com/questions/1697702/how-to-pass-initial-parameter-to-djangos-modelform-instance/1697770#1697770
# price from http://stackoverflow.com/questions/6473895/how-to-restrict-values-in-a-django-decimalfield
# restricts prices to 0.0 through 2.0
PRICE_CHOICES = [(i*.01, str(i*.01)) for i in range(1,201)]
price = ChoiceField(choices=PRICE_CHOICES)
trader = ModelChoiceField(label='', queryset=Trader.objects.all(), widget=HiddenInput())
market = ModelChoiceField(label='', queryset=Market.objects.all(), widget=HiddenInput())
def clean(self):
"""Validates the data. Ensures the trader has enough cash or shares
to complete the requested order."""
cleaned_data = self.cleaned_data
if cleaned_data.get('order') and cleaned_data.get('stock') \
and cleaned_data.get('volume') and cleaned_data.get('price'):
t = cleaned_data['trader']
if cleaned_data['order'] == 'B': # buy order
open_orders = Order.objects.filter(trader=t,
order='B', completed=False)
open_order_value = float(sum([o.volume * o.price for o in open_orders]))
open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])
if open_order_value > t.cash:
raise ValidationError("You don't have enough cash!")
elif cleaned_data['order'] == 'S': # sell order!
open_orders = sum(Order.objects.filter(trader=t, order='S',
stock=cleaned_data['stock'],
completed=False).values_list('volume', flat=True))
open_orders += cleaned_data['volume']
if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:
raise ValidationError("You don't have enough shares!")
return cleaned_data
class Meta:
model = Order
fields = ('stock', 'order', 'volume', 'price', 'trader', 'market')
class UploadFileForm(Form):
file = FileField()
|
2,330 | fab15d34d29301e53a26577725cdd66dca7507bc | # PySNMP SMI module. Autogenerated from smidump -f python DS0BUNDLE-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:37 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( InterfaceIndex, ifIndex, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Integer32, ModuleIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, transmission, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "ModuleIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "transmission")
( DisplayString, RowStatus, TestAndIncr, ) = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TestAndIncr")
# Objects
ds0Bundle = ModuleIdentity((1, 3, 6, 1, 2, 1, 10, 82)).setRevisions(("1998-07-16 16:30","1998-05-24 20:10",))
if mibBuilder.loadTexts: ds0Bundle.setOrganization("IETF Trunk MIB Working Group")
if mibBuilder.loadTexts: ds0Bundle.setContactInfo(" David Fowler\n\nPostal: Newbridge Networks Corporation\n 600 March Road\n Kanata, Ontario, Canada K2K 2E6\n\n Tel: +1 613 591 3600\n Fax: +1 613 599 3619\n\nE-mail: davef@newbridge.com")
if mibBuilder.loadTexts: ds0Bundle.setDescription("The MIB module to describe\nDS0 Bundle interfaces objects.")
dsx0BondingTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 1))
if mibBuilder.loadTexts: dsx0BondingTable.setDescription("The DS0 Bonding table.")
dsx0BondingEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 1, 1)).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: dsx0BondingEntry.setDescription("An entry in the DS0 Bonding table. There is a\nrow in this table for each DS0Bundle interface.")
dsx0BondMode = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 1), Integer().subtype(subtypeSpec=SingleValueConstraint(1,5,6,3,4,2,)).subtype(namedValues=NamedValues(("none", 1), ("other", 2), ("mode0", 3), ("mode1", 4), ("mode2", 5), ("mode3", 6), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BondMode.setDescription("This object indicates which BONDing mode is used,\nif any, for a ds0Bundle. Mode0 provides parameter\nand number exchange with no synchronization. Mode\n1 provides parameter and number exchange. Mode 1\nalso provides synchronization during\ninitialization but does not include inband\nmonitoring. Mode 2 provides all of the above plus\ninband monitoring. Mode 2 also steals 1/64th of\nthe bandwidth of each channel (thus not supporting\nn x 56/64 kbit/s data channels for most values of\nn). Mode 3 provides all of the above, but also\nprovides n x 56/64 kbit/s data channels. Most\ncommon implementations of Mode 3 add an extra\nchannel to support the inband monitoring overhead.\nModeNone should be used when the interface is not\nperforming bandwidth-on-demand.")
dsx0BondStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(1,3,2,)).subtype(namedValues=NamedValues(("idle", 1), ("callSetup", 2), ("dataTransfer", 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx0BondStatus.setDescription("This object indicates the current status of the\nbonding call using this ds0Bundle. idle(1) should\nbe used when the bonding mode is set to none(1).")
dsx0BondRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BondRowStatus.setDescription("This object is used to create new rows in this\ntable, modify existing rows, and to delete\nexisting rows.")
dsx0BundleNextIndex = MibScalar((1, 3, 6, 1, 2, 1, 10, 82, 2), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dsx0BundleNextIndex.setDescription("This object is used to assist the manager in\nselecting a value for dsx0BundleIndex. Because\nthis object is of syntax TestAndIncr (see the\nSNMPv2-TC document, RFC 1903) it can also be used\nto avoid race conditions with multiple managers\ntrying to create rows in the table.\n\nIf the result of the SET for dsx0BundleNextIndex\nis not success, this means the value has been\nchanged from index (i.e. another manager used the\nvalue), so a new value is required.\n\nThe algorithm is:\ndone = false\nwhile done == false\n index = GET (dsx0BundleNextIndex.0)\n SET (dsx0BundleNextIndex.0=index)\n if (set failed)\n done = false\n else\n SET(dsx0BundleRowStatus.index=createAndGo)\n if (set failed)\n done = false\n else\n done = true\n other error handling")
dsx0BundleTable = MibTable((1, 3, 6, 1, 2, 1, 10, 82, 3))
if mibBuilder.loadTexts: dsx0BundleTable.setDescription("There is an row in this table for each ds0Bundle\nin the system. This table can be used to\n(indirectly) create rows in the ifTable with\nifType = 'ds0Bundle(82)'.")
dsx0BundleEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 82, 3, 1)).setIndexNames((0, "DS0BUNDLE-MIB", "dsx0BundleIndex"))
if mibBuilder.loadTexts: dsx0BundleEntry.setDescription("There is a row in entry in this table for each\nds0Bundle interface.")
dsx0BundleIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: dsx0BundleIndex.setDescription("A unique identifier for a ds0Bundle. This is not\nthe same value as ifIndex. This table is not\nindexed by ifIndex because the manager has to\nchoose the index in a createable row and the agent\nmust be allowed to select ifIndex values.")
dsx0BundleIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dsx0BundleIfIndex.setDescription("The ifIndex value the agent selected for the\n(new) ds0Bundle interface.")
dsx0BundleCircuitIdentifier = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BundleCircuitIdentifier.setDescription("This variable contains the transmission vendor's\ncircuit identifier, for the purpose of\nfacilitating troubleshooting.")
dsx0BundleRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 82, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: dsx0BundleRowStatus.setDescription("This object is used to create and delete rows in\nthis table.")
ds0BundleConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4))
ds0BundleGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 1))
ds0BundleCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 82, 4, 2))
# Augmentions
# Groups
ds0BondingGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 1)).setObjects(*(("DS0BUNDLE-MIB", "dsx0BondMode"), ("DS0BUNDLE-MIB", "dsx0BondStatus"), ("DS0BUNDLE-MIB", "dsx0BondRowStatus"), ) )
if mibBuilder.loadTexts: ds0BondingGroup.setDescription("A collection of objects providing\nconfiguration information applicable\nto all DS0 interfaces.")
ds0BundleConfigGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 10, 82, 4, 1, 2)).setObjects(*(("DS0BUNDLE-MIB", "dsx0BundleIfIndex"), ("DS0BUNDLE-MIB", "dsx0BundleRowStatus"), ("DS0BUNDLE-MIB", "dsx0BundleCircuitIdentifier"), ("DS0BUNDLE-MIB", "dsx0BundleNextIndex"), ) )
if mibBuilder.loadTexts: ds0BundleConfigGroup.setDescription("A collection of objects providing the ability to\ncreate a new ds0Bundle in the ifTable as well as\nconfiguration information about the ds0Bundle.")
# Compliances
ds0BundleCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 10, 82, 4, 2, 1)).setObjects(*(("DS0BUNDLE-MIB", "ds0BundleConfigGroup"), ("DS0BUNDLE-MIB", "ds0BondingGroup"), ) )
if mibBuilder.loadTexts: ds0BundleCompliance.setDescription("The compliance statement for DS0Bundle\ninterfaces.")
# Exports
# Module identity
mibBuilder.exportSymbols("DS0BUNDLE-MIB", PYSNMP_MODULE_ID=ds0Bundle)
# Objects
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0Bundle=ds0Bundle, dsx0BondingTable=dsx0BondingTable, dsx0BondingEntry=dsx0BondingEntry, dsx0BondMode=dsx0BondMode, dsx0BondStatus=dsx0BondStatus, dsx0BondRowStatus=dsx0BondRowStatus, dsx0BundleNextIndex=dsx0BundleNextIndex, dsx0BundleTable=dsx0BundleTable, dsx0BundleEntry=dsx0BundleEntry, dsx0BundleIndex=dsx0BundleIndex, dsx0BundleIfIndex=dsx0BundleIfIndex, dsx0BundleCircuitIdentifier=dsx0BundleCircuitIdentifier, dsx0BundleRowStatus=dsx0BundleRowStatus, ds0BundleConformance=ds0BundleConformance, ds0BundleGroups=ds0BundleGroups, ds0BundleCompliances=ds0BundleCompliances)
# Groups
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0BondingGroup=ds0BondingGroup, ds0BundleConfigGroup=ds0BundleConfigGroup)
# Compliances
mibBuilder.exportSymbols("DS0BUNDLE-MIB", ds0BundleCompliance=ds0BundleCompliance)
|
2,331 | b5ec6e0fc4239a53a882b455a113eaac4db6cef5 | from Graph import *
from PrioQueue import *
from GShortestPath import *
from GSpanTree import *
from User import *
infinity = float("inf")
# 这是根据关键字找地点的方法,已经形成了某个依据属性的表后,通过关键词匹配来解决问题
# 最终输出一个yield出的迭代器,将其list化后就可以向末端输出了
def find_by_word(lst, word):
# 这个是字符串匹配函数,word是客户输入,lst是循环的东西
# 最好排成优先队列
# 若没找到,我们可以造一个关于word的任意位置的切片,长度比word短,由此来寻找想要的名称
# 由于景点,地名的长度一般不长,所以即使这里的时间代价极高,我们也可以保证这样做不会引发混乱
ans = []
for x in lst:
if word == x:
ans.append(x)
if len(word) > 20:
raise ValuError("in find_by_word, we don't think it's possible for a city or a town\
to own a name longer than 20")
# 如果客户输入的地名在地名总集中,我们有理由相信他没有输错
if ans != []:
return ans
slices = []
for i in range(len(word)):
# 这里为了保证效率,我们可以通过控制内部循环来使得表中名字串长度从小到大排列
# 并且这样排出来的结果是相似度高的在前面
for j in range(0, len(word) - i + 1):
slices.append(word[j:j + i])
for x in lst:
for i in range(1, len(word)):
if slices[-i] in x:
ans.append(x)
return ans
categorys = {"历史文化", "现代都市", "山区", "海景", "综合"}
infnum = float("inf")
class web:
# land_list是一个list对象,适用相应方法
def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(), graph_time=GraphAL(), graph_line=GraphAL()):
self.graph_money = graph_money
self.graph_time = graph_time
self.graph_line = graph_line
self.lnum = lnum
self.land_list = land_list
def is_empty(self):
return self.lnum == 0
# 获得所有景点名称,用list储存
# self._land_list是以landscape为元素的表
def _get_name(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
namee = []
for x in self.land_list():
namee.append(x.name)
return namee
# 获得所有景点位置
def lst_pos(self, land):
return self.land_list.index(land)
def _get_position(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
positionn = []
for x in self.land_list():
positionn.append(x.position)
return positionn
def add_land(self, landscape):
self.land_list.append(landscape)
self.graph_money.add_vertex()
self.graph_time.add_vertex()
self.graph_line.add_vertex()
self.lnum += 1
# 如果不设置money,time或line,自然landscape之间没有边相连
def set_all(self, land1, land2, money=infnum, time=infnum, line=1):
graph_money.add_edge(self.land_list().index(land1),
self.land_list().index(land2), money)
graph_time.add_edge(self.land_list().index(land1),
self.land_list().index(land2), time)
graph_line.add_edge(self.land_list().index(land1),
self.land_list().index(land2), line)
# 以下基于Dijkstra算法来搞定最短路径问题,可同时作用于时间,金钱和路径长度做邻接图
def set_money(self, land1, land2, money):
self.graph_money.add_edge(self.land_list.index(land1),
self.land_list.index(land2), money)
def get_money(self, land1, land2):
a = self.graph_money.get_edge(self.land_list.index(land1),
self.land_list.index(land2))
return a
def set_time(self, land1, land2, time):
self.graph_money.add_edge(self.land_list.index(land1),
self.land_list.index(land2), time)
def get_time(self, land1, land2):
a = self.graph_time.get_edge(self.land_list.index(land1),
self.land_list.index(land2))
return a
def set_line(self, land1, land2, line):
self.graph_line.add_edge(self.land_list.index(land1),
self.land_list.index(land2), line)
def get_line(self, land1, land2):
a = self.graph_line.get_edge(self.land_list.index(land1),
self.land_list.index(land2))
return a
# shortestmoney等开始
def shortest_money(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError("in shortest_money,\
if the begining is the same as the ending, you don't have to pay anything")
path = dijkstra_shortest_paths(web.graph_money, vi)
path_list = [vi]
while vi != path[vj][0]:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_money_str(web, land1, land2):
str_ = ""
path, pay = shortest_money(web, land1, land2)
for i in range(len(path)):
str_ += str(web.land_list[path[i]].name)
str_ += "->"
str_ += land2.name
return "所求的最短路money路径为", str_, "总money代价为", pay
def shortest_time(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError("in shortest_time,\
if the begining is the same as the ending, you don't have to pay anything")
path = dijkstra_shortest_paths(web.graph_time(), vi)
path_list = [vi]
while vi != vj:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_time_str(web, land1, land2):
str_ = ""
path, pay = shortest_time(web, land1, land2)
for i in range(len(path)):
str_ += str(path[i])
return "所求的最短路time路径为", str_, "总time代价为", pay
def shortest_line(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError("in shortest_line,\
if the begining is the same as the ending, you don't have to pay anything")
path = dijkstra_shortest_paths(web.graph_line(), vi)
path_list = [vi]
while vi != vj:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_time_str(web, land1, land2):
str_ = ""
path, pay = shortest_line(web, land1, land2)
for i in range(len(path)):
str_ += str(path[i])
return "所求的最短路line路径为", str_, "总line代价为", pay
# shortest等结束
class landscape: # landscape代表一个景点,rank表示在图中list的位置
def __init__(self, name, position, category=None, hot=0): # 其中position是一个数,代表一个景点
self.name = name
self.position = position
self.category = category
self.hot = hot
def position(self):
return self._position
def category(self):
return self._category
def name(self):
return self._name
def hot(self):
return hot
def set_category(self, sorts):
if sorts not in categorys:
raise ValuError("in set_category, we do not have {}".format(sorts))
self.category = sorts
# 对于多目标问题,先用既有方法构造一个web,web保存了所有目标landscape
# 现在基于Prim算法给出一个关于多目标问题的算法,其实就是最小生成树问题
def muti_aim_solve(land_list):
sub_web = web()
for x in land_list:
sub_web.add_land(x)
lanst = web.land_list().copy()
for x in lanst:
for y in lanst:
if x == y:
continue
vi = lst_pos(web, x)
vj = lst_pos(web, y)
a, b, c = Edges([0, 2, 4])
lst = ["东方明珠", "西湖", "迪士尼"]
china = web(3, lst, a, b, c)
|
2,332 | 582f2e6972bad85c2aaedd248f050f708c61973b | from django.contrib import admin
from students.models import Child_detail
class ChildAdmin(admin.ModelAdmin):
def queryset(self, request):
"""
Filter the Child objects to only
display those for the currently signed in user.
"""
qs = super(ChildAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
if request.user.user_category == 'block':
return qs.filter(block=request.user.account.associated_with)
if request.user.user_category == 'school':
return qs.filter(school=request.user.account.associated_with)
if request.user.user_category == 'district':
return qs.filter(district=request.user.account.associated_with)
# Register your models here.
admin.site.register(Child_detail,ChildAdmin)
|
2,333 | edd98e3996b0fce46d33dd33340018ab5b029637 | import csv
import os
from collections import namedtuple
from typing import List, Dict
from config import *
HEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue']
Assembly_Stats = namedtuple('Assembly_Stats', HEADER)
dir = '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'
def read_assembly_file(file: str) -> List:
if not os.path.isfile(file):
return [-1, -1, -1, -1, -1, -1]
with open(file, 'r') as f:
file_content_string = f.read()
if 'LKH_Contigs:\nLKH_Objective' in file_content_string:
lkh_gaps = -1
else:
lkh_gaps = len(file_content_string.split('LKH_Contigs:\n')[1].split('\nLKH_Objective')[0].split('\n')) - 1
lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[1].split('\n')[0])
lkh_time = float(file_content_string.split('LKH_Time: ')[1].split('\n')[0])
if 'AP_Contigs:\nAP_Objective' in file_content_string:
ap_gaps = -1
else:
ap_gaps = len(file_content_string.split('AP_Contigs:\n')[1].split('\nAP_Objective')[0].split('\n')) - 1
ap_value = int(file_content_string.split('AP_Objective_Value: ')[1].split('\n')[0])
ap_time = float(file_content_string.split('AP_Time: ')[1].split('\n')[0])
return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]
def read_fasta_stats_file(file: str) -> Dict:
with open(file, 'r') as f:
file_content_string = f.read()
actual_objective_value = int(file_content_string.split('Objective function value: ')[1].split('\n')[0])
actual_gaps = int(file_content_string.split('Actual gaps: ')[1].split('\n')[0])
no_of_reads = int(file_content_string.split('Number of reads: ')[1].split('\n')[0])
return [no_of_reads, actual_objective_value, actual_gaps]
# def write_assembly_stats(assembly_stats_list: List[Assembly_Stats]) -> None:
# with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:
# f_csv = csv.writer(f, delimiter=',')
# f_csv.writerow(
# ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue'])
# for elem in assembly_stats_list:
# f_csv.writerow(elem)
def write_assembly_stats(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:
f_csv = csv.writer(f, delimiter=',')
f_csv.writerow(
['Genome', 'Coverage', 'AvgLength', 'Reads', 'ActualValue', 'ActualGaps',
'CalignLKHValue', 'CalignLKHGaps', 'CalignLKHTime',
'CalignAPValue', 'CalignAPGaps', 'CalignAPTime',
'CalignALKHValue', 'CalignALKHGaps', 'CalignALKHTime',
'CalignAAPValue', 'CalignAAPGaps', 'CalignAAPTime',
'CalignBLKHValue', 'CalignBLKHGaps', 'CalignBLKHTime',
'CalignBAPValue', 'CalignBAPGaps', 'CalignBAPTime',
])
for ref_name in [ref1_name, ref2_name, ref3_name]:
for c in coverages:
for length in average_length_list:
val = stats_dict[(ref_name, c, length)]
row = [ref_name, c, length]
row += val['Actual']
row += val['Calign']
row += val['Calign25']
row += val['Calign50']
f_csv.writerow(row)
def write_assembly_stats_tex(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex', 'w') as f:
for ref_name in [ref1_name, ref2_name, ref3_name]:
if ref1_name == ref_name:
dashline_active = ''
else:
dashline_active = '\\hdashline\n'
f.write('{}\\bfseries {}\\\\\n'.format(dashline_active, ref_name))
for c in coverages:
f.write('$c = {}$\\\\\n'.format(c))
for length in average_length_list:
val = stats_dict[(ref_name, c, length)]
row = [length]
row += [val['Actual'][0]]
row += ['']
row += val['Actual'][1:]
row += ['']
row += [*val['Calign'][0:2], '{0:.2f}'.format(val['Calign'][2]), *val['Calign'][3:5],
'{0:.2f}'.format(val['Calign'][5])]
row += ['']
row += [*val['Calign25'][0:2], '{0:.2f}'.format(val['Calign25'][2]), *val['Calign25'][3:5],
'{0:.2f}'.format(val['Calign25'][5])]
row += ['']
row += [*val['Calign50'][0:2], '{0:.2f}'.format(val['Calign50'][2]), *val['Calign50'][3:5],
'{0:.2f}'.format(val['Calign50'][5])]
f.write(' & '.join([str(x) for x in row]) + '\\\\\n')
def write_assembly_stats2(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv', 'w') as f:
f_csv = csv.writer(f, delimiter=',')
refs = [ref1_name, ref2_name]
f_csv.writerow(range(len(refs) * 9))
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][0] for ref_name in refs for c in
coverages for l in average_length_list])
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][1] for ref_name in refs for c in
coverages for l
in average_length_list])
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][2] for ref_name in refs for c in
coverages for l
in average_length_list])
for foo in ['Calign', 'Calign25', 'Calign50']:
for i in range(6):
if i in [2, 5]:
f_csv.writerow(
['{0:.2f}'.format(stats_dict[(ref_name, c, l)][foo][i]) for ref_name in refs for c in
coverages
for l in average_length_list])
else:
f_csv.writerow(
[stats_dict[(ref_name, c, l)][foo][i] for ref_name in refs for c in
coverages
for l in average_length_list])
assembly_stats_list = []
stats_dict = {}
# for dir in sorted(glob.glob('/home/andreas/GDrive/workspace/sparsedata/ref[1,2,3]_c[5,20,40]*/')):
for ref_number in [1, 2, 3]:
for coverage in coverages:
for length in average_length_list:
# file_sub_dir = dir.split('/')[-2] # example ref1_c5_l100
# ref_number = int(file_sub_dir.split('ref')[1].split('_')[0])
ref_name = references[ref_number - 1]
# coverage = int(file_sub_dir.split('_c')[1].split('_')[0])
# length = int(file_sub_dir.split('_l')[1])
dir = '/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'.format(ref_number, coverage, length)
stats_dict[(ref_name, coverage, length)] = {'Actual': read_fasta_stats_file(dir + 'fasta.stat'),
'Calign': read_assembly_file(dir + 'calign.assembly'),
'Calign25': read_assembly_file(
dir + 'calign_0_{}.assembly'.format(length // 4)),
'Calign50': read_assembly_file(
dir + 'calign_0_{}.assembly'.format(length // 2))}
# dir = '{}-{}-{}'.format(references[ref_number - 1], coverage, length)
# assembly_stats_list.append(
# Assembly_Stats(dir, len(lkh_contigs), lkh_value, lkh_time, len(ap_contigs), ap_value, ap_time,
# actual_Objective_value))
def write_whole_stats() -> None:
headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP', 'CalignBLKH',
'CalignBAP']
vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
with open(DIR + 'assembly_stats.csv', 'r') as f:
f_csv = csv.DictReader(f, delimiter=',')
for row in f_csv:
for elem in headers:
if row['ActualValue'] == row[elem + 'Value']:
vals[elem] += 1
if row['ActualGaps'] == row[elem + 'Gaps']:
gaps[elem] += 1
if row['ActualValue'] == row[elem + 'Value'] and row['ActualGaps'] == row[elem + 'Gaps']:
both[elem] += 1
if row['CalignLKHValue'] == row['CalignAPValue']:
atspvsapval['CalignLKH'] += 1
atspvsapval['CalignAP'] += 1
if row['CalignALKHValue'] == row['CalignAAPValue']:
atspvsapval['CalignALKH'] += 1
atspvsapval['CalignAAP'] += 1
if row['CalignBLKHValue'] == row['CalignBAPValue']:
atspvsapval['CalignBLKH'] += 1
atspvsapval['CalignBAP'] += 1
if row['CalignLKHValue'] == row['CalignAPValue'] and row['CalignLKHGaps'] == row['CalignAPGaps']:
atspvsap['CalignLKH'] += 1
atspvsap['CalignAP'] += 1
if row['CalignALKHValue'] == row['CalignAAPValue'] and row['CalignALKHGaps'] == row['CalignAAPGaps']:
atspvsap['CalignALKH'] += 1
atspvsap['CalignAAP'] += 1
if row['CalignBLKHValue'] == row['CalignBAPValue'] and row['CalignBLKHGaps'] == row['CalignBAPGaps']:
atspvsap['CalignBLKH'] += 1
atspvsap['CalignBAP'] += 1
with open(DIR + 'complete_stats.csv', 'w') as g:
g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)
g_csv.writeheader()
g_csv.writerow(vals)
g_csv.writerow(gaps)
g_csv.writerow(both)
g_csv.writerow(atspvsapval)
g_csv.writerow(atspvsap)
write_assembly_stats(stats_dict)
write_assembly_stats2(stats_dict)
write_assembly_stats_tex(stats_dict)
write_whole_stats()
|
2,334 | 6d032df195854703f36dce7d27524c8f5089c04d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import config
import web
import hashlib
import sys
db = web.database(dbn="mysql", db=config.db, user=config.user, pw=config.passwd)
def signIn(user, pw):
pwhash = hashlib.md5(pw).hexdigest()
uid = db.insert("users", uname=user, passwd=pwhash)
return uid
# def select():
# db.select(, )
def main():
if len(sys.argv) > 1:
user = sys.argv[1]
pw = sys.argv[2]
signIn(user, pw)
if __name__ == "__main__":
main()
r = db.select("users")
for i in r:
print i.uname
# conn = MySQLdb.connect(host=config.host, user=config.user, passwd=config.passwd,
# db=config.db, port=config.port, charset=config.charset)
# conn
|
2,335 | b5e9af166f3b55e44d9273077e5acd05b1fd68fa | import random #importing the random library from python
answers = ["It is certain", "Without a doubt", "Yes, definitely",
"You may rely on it", "As I see it, yes", "Most likely",
"Outlook good", "Yes", "Signs point to yes", "Reply hazy, try again",
"Ask again later", "Better not tell you now", "Cannot predict now",
"Concentrate and ask again", "Don't count on it", "My reply is no",
"My sources say no", "Outlook not so good", "Very doubtful"] #here, we declare a list of strings.
ans = '!' #we give ans a value so that the while loop will execute.
while ans: #This will keep on looping as long as ans is not blank. If a variable stores nothing, it returns false when checked
ans = input("Ask the magic 8 ball a question. (Press enter to leave): \n")
#The reason we store the input is so the user can exit the program by passing in nothing for ans
print(random.choice(answers)) #the random library lets us draw a random string from a list. We then print it
|
2,336 | 151cc71ff1a63897238e2cc55269bd20cc6ee577 | import logging
from typing import List, Optional
import uuid
from pydantic import BaseModel
from obsei.payload import TextPayload
from obsei.preprocessor.base_preprocessor import (
BaseTextPreprocessor,
BaseTextProcessorConfig,
)
logger = logging.getLogger(__name__)
class TextSplitterPayload(BaseModel):
phrase: str
chunk_id: int
chunk_length: int
start_index: int
end_index: int
document_id: str
text_length: int
total_chunks: Optional[int]
class TextSplitterConfig(BaseTextProcessorConfig):
max_split_length: int = 512
split_stride: int = 0 # overlap length
document_id_key: Optional[str] # document_id in meta
class TextSplitter(BaseTextPreprocessor):
def preprocess_input( # type: ignore[override]
self, input_list: List[TextPayload], config: TextSplitterConfig, **kwargs
) -> List[TextPayload]:
text_splits: List[TextPayload] = []
for idx, input_data in enumerate(input_list):
if (
config.document_id_key
and input_data.meta
and config.document_id_key in input_data.meta
):
document_id = str(input_data.meta.get(config.document_id_key))
else:
document_id = uuid.uuid4().hex
start_idx = 0
split_id = 0
document_splits: List[TextSplitterPayload] = []
document_length = len(input_data.processed_text)
while start_idx < document_length:
if config.split_stride > 0 and start_idx > 0:
start_idx = (
self._valid_index(
input_data.processed_text, start_idx - config.split_stride
)
+ 1
)
end_idx = self._valid_index(
input_data.processed_text,
min(start_idx + config.max_split_length, document_length),
)
phrase = input_data.processed_text[start_idx:end_idx]
document_splits.append(
TextSplitterPayload(
phrase=phrase,
chunk_id=split_id,
chunk_length=len(phrase),
start_index=start_idx,
end_index=end_idx,
document_id=document_id,
text_length=document_length,
)
)
start_idx = end_idx + 1
split_id += 1
total_splits = len(document_splits)
for split in document_splits:
split.total_chunks = total_splits
payload = TextPayload(
processed_text=split.phrase,
source_name=input_data.source_name,
segmented_data=input_data.segmented_data,
meta={**input_data.meta, **{"splitter": split}}
if input_data.meta
else {"splitter": split},
)
text_splits.append(payload)
return text_splits
@staticmethod
def _valid_index(document: str, idx: int):
if idx <= 0:
return 0
if idx >= len(document):
return len(document)
new_idx = idx
while new_idx > 0:
if document[new_idx] in [" ", "\n", "\t"]:
break
new_idx -= 1
return new_idx
|
2,337 | 49cdeb59e75ed93122b3a62fbdc508b7d66166d6 | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
# add DenseNet structure
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.x = x
self.block0 = nn.Sequential(
# input image 96x96
nn.ReLU(),
nn.Conv2d(3, 64, (5, 5), (1, 1), (2, 2)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block1 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block2 = nn.Sequential(
nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(64),
)
self.block3 = nn.Sequential(
nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(32),
nn.Conv2d(32, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side0_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side1_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.side2_3 = nn.Sequential(
nn.Conv2d(64, 4, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(4),
)
self.fc = nn.Sequential(
nn.Conv2d(4, 1, (1, 1), (1, 1)),
nn.LeakyReLU(0.1),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
def forward(self, x):
x=x.float()
out = self.block0(x) # 64x96x96
res0_1 = out
res0_2 = out
res0_3 = self.side0_3(out)
out = self.block1(out) # 64x96x96
res1_2 = out
res1_3 = self.side1_3(out)
out = out + res0_1
out = self.block2(out) # 64x96x96
res2_3 = self.side2_3(out)
out = out + res0_2 + res1_2
out = self.block3(out) # 4x96x96
out = out + res0_3 + res1_3 + res2_3
out = self.fc(out)
return out
def _initialize_weights(self):
pass |
2,338 | 4b622c7f9b5caa7f88367dd1fdb0bb9e4a81477b | from StringIO import StringIO
import gzip
import urllib2
import urllib
url="http://api.syosetu.com/novelapi/api/"
get={}
get["gzip"]=5
get["out"]="json"
get["of"]="t-s-w"
get["lim"]=500
get["type"]="er"
url_values = urllib.urlencode(get)
request = urllib2.Request(url+"?"+url_values)
response = urllib2.urlopen(request)
if response.info().get('Content-Type') == 'application/x-gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data = response.read()
f = open('text.txt', 'w')
f.write(data)
f.close()
print(data) |
2,339 | 73bf31e43394c3f922b00b2cfcd5d88cc0e01094 | import cv2 as cv
from threading import Thread
class Reader(Thread):
def __init__(self, width, height, device=0):
super().__init__(daemon=True)
self._stream = cv.VideoCapture(device)
self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)
self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)
self._frame = None
self.start()
def __del__(self):
self._frame = None
self._stream.release()
def run(self):
while True:
ret, frame = self._stream.read()
if not ret:
self._frame = None
break
self._frame = frame
def read(self):
return self._frame
|
2,340 | c7dacdb53efb6935314c5e3718a4a2f1d862b07d | from .file_uploader_routes import FILE_UPLOADER_BLUEPRINT |
2,341 | e5f8301ae22e99c967b2ff3d791379deba7d154a | # module for comparing stats and making recommendataions
"""
Read team names from user input, retrieve features of teams from MySQL DB, compute odds of winning and recommend features to care
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pymysql as mdb
def FeatureImprove(tgtName, yourName):
con = mdb.connect('localhost', 'root', '000000', 'data') #host, user, password, #database
with con:
cur = con.cursor()
#cur.execute("drop table you_rtablename")
featureAPipe = pd.io.sql.read_sql(sql = "SELECT * FROM " + tgtName.replace(' ', '_') + '_featureRanked', con = con)
featureBPipe = pd.io.sql.read_sql(sql = "SELECT * FROM " + yourName.replace(' ', '_') + '_featureRanked', con = con)
teamAPipe = pd.io.sql.read_sql(sql = "SELECT * FROM " + tgtName.replace(' ', '_'), con = con)
teamBPipe = pd.io.sql.read_sql(sql = "SELECT * FROM " + yourName.replace(' ', '_'), con = con)
#df2 = pd.io.sql.read_sql(sql = "SELECT * FROM yourtablename", con = con)
# Get stats of Team A's win and lose matches
featureA = list(featureAPipe['index'][:10])
dfA = teamAPipe.ix[:, ['y'] + featureA]
aWin = dfA[dfA['y'] == 1]
aLose = dfA[dfA['y'] == 0]
# Get stats of Team B: Revert the features of A to retrieve features
featureB = []
for ii in featureA:
if '_op' in ii:
featureB.append(ii[:-3])
else:
featureB.append(ii + '_op')
dfB = teamBPipe.ix[:, ['y'] + featureB]
# Revert again so I'll be comparing A's opponent's with B, and A with B's opppoent
# e.g. pass_op in dfB is actually pass for B
dfB.columns = [['y'] + featureA]
# Get max of stats of both dfA and dfB for normalization
maxStats = dfA.append(dfB).describe().ix['max', 1:11]
# Get mean stats for Team A's win and lose matches; and Team B's all matches
meanAWin = aWin.describe().ix['mean', 1:11]
meanALose = aLose.describe().ix['mean', 1:11]
meanB = dfB.describe().ix['mean', 1:11]
# Get similarity of Team B's match to Team A's win and lose matches and compare
AwinSim = np.sqrt(((meanB - meanAWin) ** 2 / maxStats ** 2).sum())
BwinSim = np.sqrt(((meanB - meanALose) ** 2 / maxStats ** 2).sum())
ratioBWin = (1 / BwinSim) / ((1 / AwinSim) + (1 / BwinSim)) # The smaller BwinSim, the larger Chance B wins
# Get difference of match features and recommend features to focus on
diffB2ALose = meanB / maxStats - meanALose / maxStats
return ratioBWin, diffB2ALose
def PredictWin(tgtName, yourName):
ratioBWin, diffB2ALose = FeatureImprove(tgtName, yourName)
ratioBWinRev, diffB2ALoseRev = FeatureImprove(yourName, tgtName)
odds = (ratioBWin + 1 - ratioBWinRev) / 2
print "The odds of your team winning is " + str(odds)
return diffB2ALose, odds
def MakeRecommendation(diffB2ALose):
absDf = diffB2ALose.abs()
absDf.sort(ascending = False)
featureB = []
featureB_op = []
for ii in absDf.index:
if "_op" in ii:
featureB.append(ii)
else:
featureB_op.append(ii)
print "To increase Your Team's odds of winning:"
yourTeamAct = []
for ii in featureB:
printII = ii[:-3]
#if diffB2ALose[ii] > 0:
# print "You may want to have less " + printII
if diffB2ALose[ii] < 0:
print "You want to have more " + printII
yourTeamAct.append(printII)
tgtTeamAct = []
for ii in featureB_op:
printII = ii
if diffB2ALose[ii] > 0:
print "Be careful of Target Team's " + printII
tgtTeamAct.append(printII)
#if diffB2ALose[ii] < 0:
# print "Allow the Target Team to have more " + printII
return yourTeamAct, tgtTeamAct
def TmpMain(tgtName, yourName):
diffB2ALose, odds = PredictWin(tgtName, yourName)
yourTeamAct = []
tgtTeamAct = []
if tgtName != yourName:
yourTeamAct, tgtTeamAct = MakeRecommendation(diffB2ALose)
return round(odds, 2), yourTeamAct, tgtTeamAct
#TmpMain('Real Madrid', 'Atletico Madrid')
|
2,342 | 5f0e6f6dc645996b486f1292fe05229a7fae9b17 | import unittest
import achemkit.properties_wnx
class TestDummy(unittest.TestCase):
pass
|
2,343 | 98db990f406cc6815480cca33011c8b0b2ad67c7 | # fabric이 실행할 대상을 제어.
from fabric.api import *
AWS_EC2_01 = 'ec2-52-78-143-155.ap-northeast-2.compute.amazonaws.com' # Running
PROJECT_DIR = '/var/www/kamper'
APP_DIR = '%s/app' % PROJECT_DIR
"""
# the user to use for the remote commands
env.user = 'appuser'
# the servers where the commands are executed
env.hosts = ['server1.example.com', 'server2.example.com']
"""
env.user = 'kamper'
env.hosts = [AWS_EC2_01]
env.key_filename = '/Users/Mac/Desktop/Genus/1.제품_서비스/KAMP/dev/flask_kamper_package/KAMPERKOREA.pem'
def pack():
# create a new source distribution as tarball
local('git checkout')
# local('git add *')
local('git commit -a -s -m "Fabric Pack Commit"')
# local('git push origin master', capture=False)
def deploy():
print('deploying')
pass
# with settings(warn_only=True):
# with cd(APP_DIR):
# run('sudo ./deploy.sh')
|
2,344 | bfc4f5e90b7c22a29d33ae9b4a5edfb6086d79f4 | # Представлен список чисел.
# Необходимо вывести элементы исходного списка,
# значения которых больше предыдущего элемента.
from random import randint
list = []
y = int(input("Введите количество элементов в списке>>> "))
for i in range(0, y):
list.append(randint(1, 10))
new = [el for num, el in enumerate(list) if list[num - 1] < list[num]]
print(f"Исходный список: {list}")
print(f"Новый список список: {new}")
|
2,345 | 360813a573f672e3ec380da4237a6e131dbcb7e6 | """
Users model
"""
# Django
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
class User(AbstractUser):
"""User model"""
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exists'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message='Phone number must be entered in the right format'
)
phone_number = models.CharField(
validators=[phone_regex],
max_length=17
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']
def __str__(self):
return self.username
class Profile(models.Model):
"""Profile model"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
description = models.TextField('user description', max_length=255)
picture = models.ImageField(
upload_to='users/pictures',
blank=True,
null=True
)
is_authenticated = models.BooleanField('user is autheticated', default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
|
2,346 | 66e77b8237850a29127402310bfab3061f7ebca4 | # Comic Downloader
#! python3
import urllib, bs4, requests
url = 'http://explosm.net/comics/39/'
base_url = 'http://explosm.net'
for i in range(1,4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, "lxml")
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href') |
2,347 | 25fcf162306b3d6d6307e703a7d829754cba2778 | """
Constant types in Python.
定数上書きチェック用
"""
import os
from common import const
from datetime import timedelta
from linebot.models import (
TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction,
QuickReplyButton, CameraAction, CameraRollAction, LocationAction
)
const.API_PROFILE_URL = 'https://api.line.me/v2/profile'
const.API_NOTIFICATIONTOKEN_URL = 'https://api.line.me/message/v3/notifier/token' # noqa: E501
const.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'
const.API_SENDSERVICEMESSAGE_URL = 'https://api.line.me/message/v3/notifier/send?target=service' # noqa 501
const.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'
const.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'
const.DATA_LIMIT_TIME = 60 * 60 * 12
const.ONE_WEEK = timedelta(days=7)
const.JST_UTC_TIMEDELTA = timedelta(hours=9)
const.FLEX = {
"type": "flex",
"altText": "Flex Message",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901", # noqa:E501
"size": "full",
"aspectRatio": "1:1",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "UseCase Cafe",
"uri": "https://line.me/ja/"
}
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "LINE Cafe",
"size": "xl",
"weight": "bold"
},
{
"type": "box",
"layout": "baseline",
"margin": "md",
"contents": [
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "text",
"text": "4.0",
"flex": 0,
"margin": "md",
"size": "sm",
"color": "#999999"
}
]
},
{
"type": "box",
"layout": "vertical",
"spacing": "sm",
"margin": "lg",
"contents": [
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": [
{
"type": "text",
"text": "Place",
"flex": 1,
"size": "sm",
"color": "#AAAAAA"
},
{
"type": "text",
"text": "Miraina Tower, 4-1-6 Shinjuku, Tokyo", # noqa:E501
"flex": 5,
"size": "sm",
"color": "#666666",
"wrap": True
}
]
},
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": [
{
"type": "text",
"text": "Time",
"flex": 1,
"size": "sm",
"color": "#AAAAAA"
},
{
"type": "text",
"text": "10:00 - 23:00",
"flex": 5,
"size": "sm",
"color": "#666666",
"wrap": True
}
]
}
]
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"flex": 0,
"spacing": "sm",
"contents": [
{
"type": "button",
"action": {
"type": "uri",
"label": "WEBサイト",
"uri": "https://line.me/ja/"
},
"height": "sm",
"style": "link"
},
{
"type": "button",
"action": {
"type": "datetimepicker",
"label": "予約",
"data": "action=reserve",
"mode": "datetime",
"initial": "2020-01-01t00:00",
"max": "2020-12-31t23:59",
"min": "2020-01-01t00:00"
},
"height": "sm",
"style": "link"
},
{
"type": "button",
"action": {
"type": "postback",
"label": "クイックアクション",
"data": "action=quick_reply",
},
"height": "sm",
"style": "link"
},
{
"type": "spacer",
"size": "sm"
}
]
}
}
}
const.CAROUSEL = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186', # noqa:E501
title='最大80%OFF',
text='期間限定SALE',
actions=[
MessageAction(
label='Go to SALE',
text='Choose SALE'
)
]
),
CarouselColumn(
thumbnail_image_url='https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654', # noqa:E501
title='今月のおススメ商品',
text='これがあれば困らない!',
actions=[
MessageAction(
label='Recommended',
text='Choose Recommended'
)
]
),
CarouselColumn(
thumbnail_image_url='https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694', # noqa:E501
title='スッキリ収納特集',
text='大切なお洋服をスッキリ簡単に収納します',
actions=[
MessageAction(
label='To receive clothes',
text='Choose receive clothes'
)
]
)
]
)
)
const.QUICK_REPLY_ITEMS = [
QuickReplyButton(action=LocationAction(label='位置情報')),
QuickReplyButton(action=CameraAction(label='カメラ起動')),
QuickReplyButton(action=CameraRollAction(label='カメラロール起動')),
]
const.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),
'carousel': os.getenv('RICH_MENU_CAROUSEL', None),
'flex': os.getenv('RICH_MENU_FLEX', None)
}
|
2,348 | 57935b560108ef0db59de9eee59aa0c908c58b8f | from __future__ import annotations
from abc import ABC, abstractmethod
class AbstractMoviment(ABC):
@abstractmethod
def move(self, dt) -> None:
pass
class Mov_LinearFall(AbstractMoviment):
def move(self, coordinates, speed, lastcoordinate, dt):
coordinates[1] = round(coordinates[1] + speed * dt)
return coordinates, speed
class Mov_ZigZag(AbstractMoviment):
direct = True
def move(self, coordinates, speed, startcoordinate, dt):
ZigZageamento = 100 # variacao max da nave
coordinates[1] = round(coordinates[1] + speed * dt)
if (startcoordinate[0] + ZigZageamento >= coordinates[0]) and (
self.direct): # se ele tava na esquerda vai pra direita
coordinates[0] = round(coordinates[0] + speed * dt)
elif (startcoordinate[0] - ZigZageamento <= coordinates[0]) and (not self.direct):
coordinates[0] = round(coordinates[0] - speed * dt)
else:
self.direct = not self.direct
return coordinates, speed
class Mov_DiagRight(AbstractMoviment):
def __init__(self, x_speed):
self.x_speed = x_speed # seno do angulo, .17 é bom
def move(self, coordinates, speed, startcoordinate, dt):
ZigZageamento = 100 # variacao max da nave
coordinates[1] = round(coordinates[1] + speed * dt)
# sin(10 degrees) = .17
coordinates[0] = round(coordinates[0] + speed*self.x_speed * dt)
return coordinates, speed
class Mov_DiagLeft(AbstractMoviment):
def __init__(self, x_speed):
self.x_speed = x_speed # seno do angulo, .17 é bom
def move(self, coordinates, speed, startcoordinate, dt):
ZigZageamento = 100 # variacao max da nave
coordinates[1] = round(coordinates[1] + speed * dt)
# sin(10 degrees) = .17
coordinates[0] = round(coordinates[0] - speed*self.x_speed * dt)
return coordinates, speed |
2,349 | 94100d0253ee82513fe024b2826e6182f852db48 | import os.path
class State:
def __init__(self):
self.states=[]
self.actions=[]
class Candidate:
def __init__(self,height,lines,holes,bump,fit):
self.heightWeight = height
self.linesWeight = lines
self.holesWeight = holes
self.bumpinessWeight = bump
self.fitness = fit
def __str__(self):
return "%f , %f , %f , %f, %f " % (self.heightWeight, self.linesWeight, self.holesWeight, self.bumpinessWeight, self.fitness)
if __name__=="__main__":
s = Candidate(None,None,None,None,None)
file = open("gen4.txt", "a")
print naming_file(2)
|
2,350 | ae72d832039f36149988da02d8a4174d80a4ecfb |
# __ __ __ ______ __
# / | / | / | / \ / |
# $$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______
# $$ \/$$// $$ | / \ / \ / \ $$ | $$/ / \ / |/ \ / |
# $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/
# $$$$ \ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \
# $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \__/ |$$ \__$$ |$$ |$$ | $$ | $$$$$$ |
#$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/
#$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/
#made with http://patorjk.com/software/taag/
# Xtern Intern Techincal interview
# Josh Martin
# contact@cjoshmartin.com
# 2016
import json
import uuid
import random
import time
## Location of file
filename ="data.json"
def newGuess():
# makes new numbers each time alled
return random.randint(0,10)
# init guess
correctGuess = newGuess()
def newUser():
# new init of a user
userid = str(uuid.uuid1())
data={userid:{'coins':0,'guess':0}}
with open(filename,'w') as f:
json.dump(data,f)
return userid
def OpenJson():
# opens the json file satisfied at the top of the document
with open(filename,'r+') as f:
data =json.load(f)
return data
def AddACoin(userid):
# adds a coin to current user
data = OpenJson()
tmp=data[userid]['coins']
tmp+=1
data[userid]['coins']=tmp
JsonFile=open(filename,"w+")
JsonFile.write(json.dumps(data))
JsonFile.close()
def GuessCount(userid):
# keeps track of guess
data = OpenJson()
tmp=data[userid]['guess']
tmp+=1
data[userid]['guess']=tmp
JsonFile=open(filename,"w+")
JsonFile.write(json.dumps(data))
JsonFile.close()
print 'that is {} trys in total.'.format(tmp)
def GetCoins(userid):
# gets current amount of coins
getamount =OpenJson()[userid]['coins']
return getamount
def HandleGuess(userid,guess):
# returns a Boolean value based off if the guess is right or not
print 'the current user, "{}" has guessed: {}'.format(userid,guess)
if guess == correctGuess:
print 'the user,"{}" has guessed correctly and now has {} XternCoins.'.format(userid,(GetCoins(userid)+1))
return True
print 'the user has nt guessed right, please try again.'
return False
def StartGuessing():
user =newUser()
while True:
print("""
__ __ __ ______ __
/ | / | / | / \ / |
$$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______
$$ \/$$// $$ | / \ / \ / \ $$ | $$/ / \ / |/ \ / |
$$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/
$$$$ \ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \
$$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \__/ |$$ \__$$ |$$ |$$ | $$ | $$$$$$ |
$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/
$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/
""") #cheap "gui" to clear the screen a bit and look pretty
print 'the current user, "{}" has {} XternCoins'.format(user,OpenJson()[user]['coins'])
guess =HandleGuess(user,random.randint(0,10))
if guess :
AddACoin(user)
correctGuess=newGuess() # makes a new number to guess
GuessCount(user)
time.sleep(3) # makes program readable to humans not just computers
|
2,351 | 1e83fedb8a5ed51704e991aeaa4bde20d5316d11 | # Generated by Django 3.0.3 on 2020-04-27 07:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0002_profile_favorites'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='favorites',
),
]
|
2,352 | 8c6169bd812a5f34693b12ce2c886969542f1ab8 | class ListNode:
def __init__(self, value = 0, next = None):
self.value = value
self.next = next
def count(node: ListNode) -> int:
if node is None:
return 0
else:
return count(node.next) + 1
# Test Cases
LL1 = ListNode(1, ListNode(4, ListNode(5)))
print(count(None)) # 0
print(count(LL1)) # 3
print(count(ListNode())) # 1
|
2,353 | 2d65ffa3fc8a5360702337d749884903b2cb0423 | from django.shortcuts import render, HttpResponse
from django.views.generic import TemplateView
from .models import Person, Stock_history
from django.http import Http404, HttpResponseRedirect
from .forms import NameForm, UploadFileForm
from .back import handle_uploaded_file, read_file
class IndexView(TemplateView):
def get(self, request):
price_history = Stock_history.objects.all()
context = {
'entry': price_history
}
return render(request, 'budget/index.html', context)
class DetailView(TemplateView):
def get(self, request, person_id):
try:
persons = Person.objects.all()
person = Person.objects.get(id=person_id)
except Person.DoesNotExist:
raise Http404("Person does not exist")
context = {
'persons': persons,
'person': person,
'first_name': person.first_name,
'last_name': person.last_name,
'income': person.income,
}
return render(request, 'budget/detail.html', context)
class PersonView(TemplateView):
def get(self, request):
persons = Person.objects.all()
context = {
'persons': persons,
}
return render(request, 'budget/person.html', context)
class AddView(TemplateView):
template = 'budget/add.html'
def get(self, request):
form = NameForm
context = {'form': form}
return render(request, self.template, context)
def post(self, request):
form = NameForm(request.POST)
if form.is_valid():
text = form.cleaned_data
form = NameForm()
p = Person(first_name=text['first_name'], last_name=text['last_name'], income = text['income'])
p.save()
context = {
'form': form,
'text': text,
}
return render(request, self.template, context)
class UploadView(TemplateView):
template_name = 'budget/upload.html'
def get(self, request):
form = UploadFileForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_file(request.FILES['file'])
read_file(request.FILES['file'])
return HttpResponseRedirect('/upload')
#else:
# form = UploadFileForm()
return render(request, self.template_name, {'form': form})
|
2,354 | 488d20a86c5bddbca2db09b26fb8df4b6f87a1dc | import warnings
from re import *
from pattern import collection
warnings.filterwarnings("ignore")
def test():
raw_text = "通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»"
pattern = collection.pattern_test("js_var")
print(f"匹配模式为:{pattern}")
print("----------------------------------------------")
#return_text = findall(pattern, raw_text)
pattern = compile(pattern)
return_text = sub(pattern, "替换成功", raw_text)
print(return_text)
''' if(return_text):
for i, each in enumerate(return_text):
print(f"第{i+1}个匹配结果:{each}")
else:
print("Not Found pattern-like string!") '''
if __name__ == "__main__":
test()
|
2,355 | 91ac4a23573abcb0ab024830dbc1daebd91bd40d | """ OCR that converts images to text """
from pytesseract import image_to_string
from PIL import Image
print image_to_string(Image.open('/Users/williamliu/Desktop/Screen Shot 2014-09-27 at 11.45.34 PM.png'))
#print image_to_string(Image.open('/Users/williamliu/Desktop/Screen Shot 2014-09-27 at 11.45.34 PM.png'))
#print image_to_string(Image.open('test-european.jpg'), lang='fra')
|
2,356 | f2ad95574b65b4d3e44b85c76f3a0150a3275cec | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 10:04:05 2019
@author: cristina
"""
import numpy as np
from itertools import chain
from numpy import linalg as LA
diag = LA.eigh
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 13})
import time
pi = np.pi
exp = np.exp
t1 = time.time()
N = 2000 #number of sites
M = 200 #number of empty sites
m = 1.0 #effective mass
delta =1.35/27211.6 #SC gap
mu = 1.0/27211.6 #chemical potential
mu = 0.0
a = 4.98/0.529 ##lattice constant
phi = pi/2.0#phase of second SC
phi = 0.0
H = np.zeros([2*(2*N + M), 2*(2*N + M)], dtype=complex)
h = np.zeros([2*N + M, 2*N + M, 2, 2], dtype=complex)
factor = 1/(m*a**2) - mu
factor_2 = -1/(2*m*a**2)
hopping = factor_2*10
hopping = 0.0
#diagonal terms
range1_diagonal = range(N)
range2_diagonal = range(N+M, 2*N+M - 1)
for i in range1_diagonal:
g_i = i
h[g_i, g_i, 0, 1] = delta
h[g_i, g_i, 1, 0] = delta
h[g_i, g_i, 0, 0] = factor
h[g_i, g_i, 1, 1] = - factor
for i in range2_diagonal:
g_i = i
h[g_i, g_i, 0, 1] = delta*exp(1j*phi)
h[g_i, g_i, 1, 0] = delta*exp(-1j*phi)
h[g_i, g_i, 0, 0] = factor
h[g_i, g_i, 1, 1] = - factor
#off - diagonal terms
range1_offdiagonal = range(N - 1)
range2_offdiagonal = range(N+M, 2*N+M - 1)
range_offdiagonal = chain(range1_offdiagonal, range2_offdiagonal)
for i in range_offdiagonal:
g_i = i
g_j = i + 1
h[g_i, g_j, 0, 0] = factor_2
h[g_i, g_j, 1, 1] = - factor_2
h[g_j, g_i, 0, 0] = factor_2
h[g_j, g_i, 1, 1] = - factor_2
#hopping between the 2 Chains
h[N - 1, N + M, 0, 0] = hopping
h[N - 1, N + M, 1, 1] = - hopping
h[N + M, N - 1, 0, 0] = hopping
h[N + M, N - 1, 1, 1] = - hopping
for i in range(2*N + M):
for j in range(2*N + M):
for t_i in range(2):
for t_j in range(2):
H[(i) * 2 + t_i, (j) * 2 + t_j] = h[i, j, t_i, t_j]
H = np.matrix(H)
T = np.allclose(H, H.getH())###check if Hermitian
print('Is H an Hermitian matrix?', T)
(E, psi) = diag(H)####diagonalize H
####LDOS functions
def LDOS_up(omega, E, u, Damping):
t = sum ( u**2 / (omega - E + 1j*Damping) )
tt = -1/pi*np.imag(t)
return(tt)
def LDOS_down(omega, E, v, Damping):
t = sum ( v**2 / (omega + E + 1j*Damping) )
tt = -1/pi*np.imag(t)
return(tt)
#### u and v components in the Nth atom
u_borde1 = np.zeros(len(E))
v_borde1 = np.zeros(len(E))
I = N - 1
u_borde2 = np.zeros(len(E))
v_borde2 = np.zeros(len(E))
I2 = N + M - 1
u_bulk1 = np.zeros(len(E))
v_bulk1 = np.zeros(len(E))
I3 = int(N/2) - 1
u_bulk2 = np.zeros(len(E))
v_bulk2 = np.zeros(len(E))
I4 = N + M + int(N/2.0) - 1
I = N
for i in range(len(E)):
u_borde1[i] = psi[2*I-2,i]
v_borde1[i] = psi[2*I-1,i]
u_borde2[i] = psi[2*I2-2,i]
v_borde2[i] = psi[2*I2-1,i]
u_bulk1[i] = psi[2*I3-2,i]
v_bulk1[i] = psi[2*I3-1,i]
u_bulk2[i] = psi[2*I4-2,i]
v_bulk2[i] = psi[2*I4-1,i]
###calculate LDOS
omega = np.linspace(-4*delta, 4*delta, 2000)#omega vector
LDOS_borde1_up = np.zeros(len(omega))
LDOS_borde1_down = np.zeros(len(omega))
LDOS_borde2_up = np.zeros(len(omega))
LDOS_borde2_down = np.zeros(len(omega))
LDOS_bulk1_up = np.zeros(len(omega))
LDOS_bulk1_down = np.zeros(len(omega))
LDOS_bulk2_up = np.zeros(len(omega))
LDOS_bulk2_down = np.zeros(len(omega))
D = 0.02/27211.6
for i in range(len(omega)):
LDOS_borde1_up[i] = LDOS_up(omega[i], E, u_borde1, D)
LDOS_borde1_down[i] = LDOS_up(omega[i], E, v_borde1, D)
LDOS_borde2_up[i] = LDOS_up(omega[i], E, u_borde2, D)
LDOS_borde2_down[i] = LDOS_up(omega[i], E, v_borde2, D)
LDOS_bulk1_up[i] = LDOS_up(omega[i], E, u_bulk1, D)
LDOS_bulk1_down[i] = LDOS_up(omega[i], E, v_bulk1, D)
LDOS_bulk2_up[i] = LDOS_up(omega[i], E, u_bulk2, D)
LDOS_bulk2_down[i] = LDOS_up(omega[i], E, v_bulk2, D)
###plot LDOS
plt.figure(1)
plt.plot(omega*27211.6, LDOS_borde1_up + LDOS_borde1_down)
plt.plot(omega*27211.6, LDOS_borde1_up, label = 'up')
plt.plot(omega*27211.6, LDOS_borde1_down, label = 'down')
plt.title('Borde SC 1')
#plt.title('Site %i' %I)
plt.legend()
plt.figure(2)
plt.plot(omega*27211.6, LDOS_borde2_up + LDOS_borde2_down)
plt.plot(omega*27211.6, LDOS_borde2_up, label = 'up')
plt.plot(omega*27211.6, LDOS_borde2_down, label = 'down')
plt.title('Borde SC 2')
#plt.title('Site %i' %I)
plt.legend()
plt.figure(3)
plt.plot(omega*27211.6, LDOS_bulk1_up + LDOS_bulk1_down)
plt.plot(omega*27211.6, LDOS_bulk1_up, label = 'up')
plt.plot(omega*27211.6, LDOS_bulk1_down, label = 'down')
plt.title('Bulk SC 1')
#plt.title('Site %i' %I)
plt.legend()
plt.figure(4)
plt.plot(omega*27211.6, LDOS_bulk2_up + LDOS_bulk2_down)
plt.plot(omega*27211.6, LDOS_bulk2_up, label = 'up')
plt.plot(omega*27211.6, LDOS_bulk2_down, label = 'down')
plt.title('Bulk SC 2')
#plt.title('Site %i' %I)
plt.legend()
t2 = time.time()
print('Program finished after', (t2 - t1)/60.0, 'mins')
|
2,357 | 4b8038ddea60f371aa8da168ea4456372d6f0388 | """
Subfunction A31 is responsible for inputting the component parameters
and then using the information about the component to determine
the pressure drop across that component
----------------------------------------------------------
Using data structure from /SysEng/jsonParameterFileFormat/ recall that each
cell is only present if there is data stored and thus
we can call "if "parameterName" in dict.keys()" to see if it is there.
"""
#Need math function
import math
class A31:
def __init__(self,dict): #dict is for dictionary
self.dict = dict
#Now we set several new local variables for ease of calling them later
self.CID = self.dict["CID"]
self.val = self.dict["values"]
self.calc = self.val["calculated"]
self.comp = self.val["component"]
self.fluid = self.val["fluid"]
# Create a new key for the pressure drop
self.calc["pressureDrop"] = {}
#We also need to define 'g' for this method (in SI)
self.g = 9.81
#
#Set up the logic tree to see what we need to do
#
#This method of finding the pressure drop for each different type
# of component is WAY underoptimized. Feel free to improve it! :)
if self.CID == 'LNE':
self.calc['pressureDrop']["value"] = self.lineCalc()
elif self.CID == 'BND':
self.calc['pressureDrop']["value"] = self.bendCalc()
elif self.CID == 'VLV':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'ORF':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'INJ':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'CAT':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'BND':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'SPL':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'JON':
self.calc['pressureDrop']["value"] = False
elif self.CID == 'EXP':
self.calc['pressureDrop']["value"] = self.expansionCalc()
elif self.CID == 'CON':
self.calc['pressureDrop']["value"] = self.contractionCalc()
if self.calc['pressureDrop']["value"] == False:
raise NotImplementedError('Calcuations for a '+
str(self.dict['CID'])+' have not yet '+
'been implemented in this' +
'pre-alpha state.')
else:
self.calc["pressureDrop"]["unit"] = "Pa"
self.dict["values"]["calculated"]["pressureDrop"] = self.calc["pressureDrop"]
def expansionCalc(self):
q = self.calc['dynamicPressure']
kt = self.calc['ktLosses']
pDrop = kt * q
return(pDrop)
def contractionCalc(self):
f = self.calc['frictionFactor']
kt = self.calc['ktLosses']
A1 = self.comp['upstreamArea']["value"]
A2 = self.comp['downstreamArea']["value"]
q = self.calc['dynamicPressure']
D1 = 2 * math.sqrt(A1/math.pi)
D2 = 2 * math.sqrt(A2/math.pi)
cL = self.comp['contractionLength']
if self.comp['contractionAngledOrCurved']["value"] == 'angle':
angle = self.comp['angle']["value"]
if angle < math.pi/4:
pDrop = (
kt + 4*f * (
cL / (
(D1 + D2) / 2
)
)
) * q
else:
pDrop = kt * q
else:
pDrop = kt * q
return(pDrop)
def lineCalc(self):
# Create some local variables for ease of use
rho = self.fluid["density"]["value"]
q = self.calc["dynamicPressure"]
g = self.g
z = self.comp["height"]["value"]
f = self.calc["frictionFactor"]
x = self.comp["length"]["value"]
Dh = self.comp["hydraulicDiameter"]["value"]
pDrop = rho*g*z + q * ((4*f*x)/Dh)
return(pDrop)
def bendCalc(self):
rho = self.fluid['density']["value"]
g = self.g
z = self.comp['height']["value"]
f = self.calc['frictionFactor']
x = self.comp['length']["value"]
Dh = self.comp['hydraulicDiameter']["value"]
kt = self.calc['ktLosses']
pDrop = rho*g*z + q * (
((4*f*x)/Dh) + kt
)
return(pDrop)
|
2,358 | 3747e45dcba548060f25bd6d6f0e0e96091ca3df | s1 = {10, 20, 30, 60, 70, 80, 90}
s2 = set()
print(s2)
s1.add(100)
print(s1.pop())
print(10 in s1)
print(10 not in s1) |
2,359 | b4593b3229b88db26c5e200431d00838c357c8e0 | # MolecularMatch API (MM-DATA) Python Example Sheet
# Based on documentation at https://api.molecularmatch.com
# Author: Shane Neeley, MolecularMatch Inc., Jan. 30, 2018
import requests
import json
import numpy as np
import sys
resourceURLs = {
"trialSearch": "/v2/search/trials",
"drugSearch": "/v2/search/drugs",
"publicationSearch": "/v2/search/publications",
"mutationGet": "/v2/mutation/get",
"geneGet": "/v2/gene/get",
"mutationClassify": "/v2/mutation/classify",
"validateTerms": "/v2/validate/terms",
"assertionSearch": "/v2/search/assertions",
"assertionExport": "/v2/export/assertions"
}
mmService = "https://api.molecularmatch.com"
# CHANGE THIS TO YOUR KEY or use as parameter (e.g. $ python3 publicationsAPI.py key)
apiKey = '<your api key>'
if apiKey == '<your api key>' and sys.argv[1]:
apiKey = sys.argv[1]
#// TODO: geolocation searches
#####################search trials##################################
url = mmService + resourceURLs["trialSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
##################################################################
#####################SCENARIOS####################################
##################################################################
#### Clinical trial reporting
# When looking up trials for an actual patient, it is important to include the filters of Enrolling and Interventional
url = mmService + resourceURLs["trialSearch"]
filters = [
{"facet":"CONDITION","term":"Colorectal cancer"},
{"facet":"MUTATION","term":"BRAF V600E"},
{"facet":"STATUS", "term":"Enrolling"},
{"facet":"TRIALTYPE", "term":"Interventional"},
{"facet":"COUNTRY", "term":"France"}
]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
# Question: how many trials for a patient with this mutation and disease are interventional and enrolling in France?
print(r.json()['total'])
# Answer: 4
# Question: what are these trials ClinicalTrials.gov IDs and titles and email addresses for contact?
for i in np.arange(0, len(r.json()['rows']) ):
print(r.json()['rows'][i]['id'])
print(r.json()['rows'][i]['briefTitle'])
print(r.json()['rows'][i]['overallContact'])
# Answer:
# NCT02291289 - A Multi-Center Study of Biomarker-Driven Therapy in Metastatic Colorectal Cancer - global.rochegenentechtrials@roche.com
# NCT01677741 - A Study to Determine Safety, Tolerability and Pharmacokinetics of Oral Dabrafenib In Children and Adolescent Subjects - GSKClinicalSupportHD@gsk.com
# NCT02788279 - A Study to Investigate Efficacy and Safety of Cobimetinib Plus Atezolizumab and Atezolizumab Monotherapy Versus Regorafenib in Participants With Metastatic Colorectal Adenocarcinoma - global.rochegenentechtrials@roche.com
# NCT02751177 - Detection of KRAS, NRAS et BRAF Mutations in Plasma Circulating DNA From Patients With Metastatic Colorectal Cancer - v.gillon@nancy.unicancer.fr
# Question: what are all the mutations that are associated with trial NCT02291289?
filters = [
{"facet":"ID","term":"NCT02291289"}
]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
# Note: must have tags activated on api key for this to work. Not all api key users get tags.
for tag in r.json()['rows'][0]['tags']:
if tag['facet'] == "MUTATION":
print(tag)
# Answer:
# 3 mutations are for inclusion criteria
# {'facet': 'MUTATION', 'term': 'EGFR P546S', 'alias': 'EGFR P546S', 'priority': '0', 'filterType': 'include'}
# {'facet': 'MUTATION', 'term': 'BRAF V600E', 'alias': 'BRAF V600E', 'priority': '0', 'filterType': 'include'}
# {'facet': 'MUTATION', 'term': 'Microsatellite instability', 'alias': 'Microsatellite instability', 'priority': '0', 'filterType': 'include'}
# 2 mutations are for exclusion criteria (filterType = 'exclude')
# {'facet': 'MUTATION', 'term': 'EGFR S492R', 'alias': 'EGFR S492R', 'priority': 1, 'filterType': 'exclude'}
# {'facet': 'MUTATION', 'term': 'BRAF G469L', 'alias': 'BRAF G469L', 'priority': 1, 'filterType': 'exclude'}
# See more about the trial data model at: https://api.molecularmatch.com/#trialDataModel
#### Mutation details lookup
# So you want to know everything there is to know about BRAF V600E?
url = mmService + resourceURLs["mutationGet"]
payload = {
'apiKey': apiKey,
'name': 'BRAF V600E'
}
r = requests.get(url, params=payload)
# Question: what databases have reported this mutation?
print(r.json()['sources'])
# Answer: 'COSMIC', 'CIViC', 'DoCM', 'cBioPortal', 'ClinVar'
# Question: is there a known protein domain this mutation is in?
for i in r.json()['parents']:
if (i['type'] == 'domain'):
print(i)
# Answer: BRAF Pkinase_Tyr domain (protein tyrosine kinase domain)
# What is the clinical interpretation of BRAF V600E? Are there trials, drugs, publications about it?
url = mmService + resourceURLs["mutationClassify"]
payload = {
'apiKey': apiKey,
'variant': 'BRAF V600E',
'condition': 'Lung cancer'
}
r = requests.post(url, json=payload)
# Question: How does MolecularMatch classify this mutation in this condition?
print(r.json()['classifications'][0]['classification'])
# Answer: actionable
# Question: How many drugs approved and on label for the condition provided?
print(r.json()['classifications'][0]['drugsApprovedOnLabelCount'])
# Answer: 0
# Question: How many drugs approved but off-label for the condition provided?
print(r.json()['classifications'][0]['drugsApprovedOffLabelCount'])
# Answer: 6
# Question: What about experimental drugs?
print(r.json()['classifications'][0]['drugsExperimentalCount'])
# Answer: 4
# Question: How many clinical trials are open for this mutation and condition?
print(r.json()['classifications'][0]['trialCount'])
# Answer: 24
# Question: Is there a lot of research publications about this mutation in this condition?
print(r.json()['classifications'][0]['publicationCount'])
# Answer: 47
# Question: Ok, what are these 4 experimental drugs?
url = mmService + resourceURLs["drugSearch"]
# set geneExpand for Drug to False so drugs return only for V600E, not BRAF (see https://api.molecularmatch.com/#geneExpansion)
filters = [
{'facet':'CONDITION','term':'Lung cancer'},
{'facet':'MUTATION','term':'BRAF V600E', "geneExpand": {"Drug": False}}
]
payload = {
'apiKey': apiKey,
'filters': filters,
'mode': 'discovery'
}
r = requests.post(url, json=payload)
for drug in r.json()['rows']:
print(drug)
if drug['approved'] == False:
print(drug['name'])
# Answer:
# Lgx818
# Plx8394
# BGB-283
# Cep-32496
##################################################################
#####################BASIC QUERIES################################
##################################################################
####################search drugs##################################
url = mmService + resourceURLs["drugSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters,
'mode': 'discovery' # 'criteriaunmet' # multiple modes avaiable for drugsearch. see api docs.
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
#####################search trials##################################
url = mmService + resourceURLs["trialSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
# Search trials by various ID types
filters = [
{"facet":"ID","term":"EUDRACT2017-003305-18"}
]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print('r here')
print(r.json())
#####################search publications#############################
url = mmService + resourceURLs["publicationSearch"]
filters = [{'facet':'CONDITION','term':'Lung cancer'}]
payload = {
'apiKey': apiKey,
'filters': filters
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
####################get mutation###################################
url = mmService + resourceURLs["mutationGet"]
payload = {
'apiKey': apiKey,
'name': 'BRAF V600E'
}
r = requests.get(url, params=payload)
print(json.dumps(r.json()))
######################get gene#################################
url = mmService + resourceURLs["geneGet"]
payload = {
'apiKey': apiKey,
'symbol': 'BRAF'
}
r = requests.get(url, params=payload)
print(json.dumps(r.json()))
######################classify mutation##############################
url = mmService + resourceURLs["mutationClassify"]
payload = {
'apiKey': apiKey,
'variant': 'EGFR T790M',
'condition': 'Lung cancer'
}
r = requests.post(url, json=payload)
print(json.dumps(r.json()))
|
2,360 | 9c478c59398618d0e447276f9ff6c1c143702f12 | import pygame
import os
from network import Network
from card import Card
from game import Game, Player
pygame.font.init()
# Initializing window
WIDTH, HEIGHT = 700, 800
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Zole")
CARD_WIDTH = 60
############################## Uploading cards
def get_card_size(card_width, image):
card_height = image.get_height() / (image.get_width()/card_width)
return round(card_height)
CARD_IMAGE_BACK_GRAY = pygame.image.load(
os.path.join("images", "gray_back.png"))
CARD_HEIGHT = get_card_size(CARD_WIDTH, CARD_IMAGE_BACK_GRAY)
# Uploading backside of cards
CARD_IMAGE_BACK_GRAY = pygame.transform.scale(
CARD_IMAGE_BACK_GRAY, (CARD_WIDTH, CARD_HEIGHT))
# Uploading all the cards
def upload_card_images(card_name):
card_n = pygame.image.load(os.path.join("images", card_name + ".png"))
card_n = pygame.transform.scale(
card_n, (CARD_WIDTH, CARD_HEIGHT))
return card_n
CARD_NAMES = ["AC", "AH", "AS", "AD", "KS", "KH", "KD", "KC", "QS", "QH", "QD", "QC", "JS", "JH", "JD", "JC", "10S", "10H", "10D",
"10C", "9S", "9H", "9D", "9C", "8D", "7D"]
CARD_IMAGES = {}
# Uploading all card images in dictionary
for name in CARD_NAMES:
CARD_IMAGES[name] = upload_card_images(name)
############################## Uploading cards End
# Card strengths
STRENGTH_SCALE_TRUMPS = ["QC", "QS", "QH", "QD", "JC", "JS", "JH", "JD", "AD", "10D", "KD", "9D", "8D", "7D", "AC", "10C", "KC", "9C",
"AH", "10H", "KH", "9H", "AS", "10S", "KS", "9S", "None"]
STRENGTH_SCALE_NON_TRUMPS = ["A", "10", "K", "9", "None"]
def draw_player(win,x, y,width,height, cards, card_images):
i = 0
for card in cards:
win.blit(card_images[card.name], (x + i * width, y))
card.position = (x + i * width, y, x +
i * width + width, y + height)
i += 1
def draw_opponents(win,x, y,width,height,back_image,count, hor = True):
if hor:
for i in range(count):
win.blit(back_image, (x + i * width, y))
else:
for i in range(count):
win.blit(pygame.transform.rotate(back_image, 90), (x , y + i * height))
def draw_played_cards(win, cards, card_images, turn_order):
position = [(300,300),(315, 260),(330,300)]
counter = turn_order
for _ in range(len(cards)):
win.blit(card_images[cards[0].name], (position[counter]))
turn_order = (counter + 1) % 3
def main():
run = True
clock = pygame.time.Clock()
main_font = pygame.font.SysFont("comicsans", 30)
n = Network()
player = n.connect()
def redraw_window(win):
win.fill((53, 101, 77))
draw_player(win, 60, 650, CARD_WIDTH, CARD_HEIGHT, player.cards,CARD_IMAGES)
draw_opponents(win, 60, 150, CARD_WIDTH, CARD_WIDTH, CARD_IMAGE_BACK_GRAY, 8)
draw_opponents(win, 550, 150, CARD_WIDTH, CARD_WIDTH, CARD_IMAGE_BACK_GRAY, 8, hor = False)
draw_played_cards(win,game.played_cards_round, CARD_IMAGES, game.turn_order)
if player.turn == True:
for card in player.Cards:
if card.position[0] >= pos[0] and card.position[1] >= pos[1] and card.position[2] <= pos[0] and card.position[3] <= pos[1]:
player.cards.remove(card)
player.played_card = True
player.last_played_card = card
player.turn = False
pygame.display.update()
while run:
pos = (-5, -5)
clock.tick(60)
game = n.send(player)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.MOUSEBUTTONDOWN and player.turn == True:
pos = pygame.mouse.get_pos()
redraw_window(WIN)
main()
|
2,361 | 041a5bf205c1b3b3029623aa93835e99104464b2 | n,k = map(int,raw_input().split())
nums = list(map(int,raw_input().split()))
if k==1:
print min(nums)
elif k==2:
print max(nums[0],nums[-1])
else:
print max(nums)
|
2,362 | 9bd1fd2df7da068ac8aa4e6e24fe14d163a7e6b3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 7/02/2014
@author: marco
Generador de ambientes FACIL 2014
'''
import wx
from formgenerador import FrameGeneral
from Dial_Pagina import ObjPagina
class IncioInterface(FrameGeneral):
def __init__(self):
#self.log = ObLog('Inicio programa')
#self.log.setNivel(0) #debug
FrameGeneral.__init__(self,None)
FrameGeneral.SetTitle(self,u"Administrador de Aplicacion FACIL")
#iconFile = u"imagenes/2s.ico"
#FrameGeneral.SetIcon(self,wx.Icon(iconFile, wx.BITMAP_TYPE_ICO))
#self.Bind(wx.EVT_MENU, self.onConfig,self.f2s_mConfig)
self.__inicio()
self.dibujarPizarra()
#Eventos Menu
self.Bind(wx.EVT_MENU,self.onDefPagina,self.f2s_menuTamPapel)
self.f2s_Pizarra.Bind(wx.EVT_PAINT, self.onPaint)
def __inicio(self):
#Asignacion Variables Globales
self.Guadar=False
self.borde=20
self.AnchoPagina=8.5 * 72
self.AltoPagina = 11 * 72
self.objfacil=[]
self.objFormatos=[]
self._initBuffer()
def onDefPagina(self,event):
pagina= ObjPagina(self.Parent)
if pagina.orientar==None :
return
print pagina.orientar
print pagina.papel
if pagina.orientar ==0 or pagina.orientar==2: #Vertical
self.AnchoPagina=pagina.papel[0] * 72
self.AltoPagina=pagina.papel[1] * 72
else: #Horizontal
self.AnchoPagina=pagina.papel[1] * 72
self.AltoPagina=pagina.papel[0] * 72
print self.AnchoPagina
print self.AltoPagina
self.dibujarPizarra()
self.wrapDC = lambda dc: dc
def dibujarPizarra(self):
print "dibujar Pizarra"
self.f2s_Pizarra.SetBackgroundColour('white')
self.f2s_Pizarra.EnableScrolling(True,True)
self.f2s_Pizarra.SetScrollbars(20, 20, (self.AnchoPagina + self.borde *2) / 20, (self.AltoPagina + self.borde *2) / 20)
def onPaint(self, event):
print "onPaint"
"""
Called when the window is exposed.
"""
# Create a buffered paint DC. It will create the real
# wx.PaintDC and then blit the bitmap to it when dc is
# deleted.
dc = wx.BufferedPaintDC(self.f2s_Pizarra, self.buffer)
# On Windows, if that's all we do things look a little rough
# So in order to make scrolling more polished-looking
# we iterate over the exposed regions and fill in unknown
# areas with a fall-back pattern.
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
dc.DrawRectangle(self.borde, self.borde, self.AnchoPagina, self.AltoPagina)
print self.borde, self.borde, self.AnchoPagina, self.AltoPagina
if wx.Platform != '__WXMSW__':
return
print "Windows?"
# First get the update rects and subtract off the part that
# self.buffer has correct already
region = self.f2s_Pizarra.GetUpdateRegion()
panelRect = self.f2s_Pizarra.GetClientRect()
offset = list(self.f2s_Pizarra.CalcUnscrolledPosition(0,0))
offset[0] -= self.saved_offset[0]
offset[1] -= self.saved_offset[1]
region.Subtract(-offset[0],- offset[1],panelRect.Width, panelRect.Height)
# Now iterate over the remaining region rects and fill in with a pattern
rgn_iter = wx.RegionIterator(region)
if rgn_iter.HaveRects():
self.setBackgroundMissingFillStyle(dc)
offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)
while rgn_iter:
r = rgn_iter.GetRect()
if r.Size != self.f2s_Pizarra.ClientSize:
dc.DrawRectangleRect(r)
rgn_iter.Next()
#def onConfig(self,env):
#self.log.logger.info('onCofig')
#image=ObjConfig(self.Parent,self.log.getNivel())
def _initBuffer(self):
print "_initBuffer"
"""Initialize the bitmap used for buffering the display."""
size = self.f2s_Pizarra.GetSize()
self.buffer = wx.EmptyBitmap(max(1,size.width),max(1,size.height))
dc = wx.BufferedDC(None, self.buffer)
dc.SetBackground(wx.Brush(self.f2s_Pizarra.GetBackgroundColour()))
dc.Clear()
#self.drawContents(dc)
del dc # commits all drawing to the buffer
self.saved_offset = self.f2s_Pizarra.CalcUnscrolledPosition(0,0)
self._reInitBuffer = False
class ObjInicio():
def __init__(self,ActDebug=False):
# Lanzamos aplicación.
#ActDebug=True
#
#print "inicio"
#if ActDebug:
# pass
# aplicacion = ObjDebug(redirect=True)
#else:
# aplicacion=wx.PySimpleApp()
# frame_usuario = IncioInterface()
# frame_usuario.Maximize()
# frame_usuario.Show()
aplicacion=wx.PySimpleApp()
frame_usuario = IncioInterface()
#frame_usuario.Maximize()
frame_usuario.Show()
aplicacion.MainLoop()
aplicacion.Destroy()
if __name__ == '__main__':
# Lanzamos aplicación.
j=ObjInicio(False)
|
2,363 | 6d18aa585c656b244d1e4272caa8419c04b20b6c | #----------------------------
# |
# Instagram Bot- Devesh Kr. Verma
# instagram- @felon_tpf
# |
#----------------------------
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
import random
import string
from time import sleep
from selenium import webdriver
#Change this list to your wanted comments (what you wnat to comment on posts)
comments = ['Please Visite on my page take a look if you like please follow ', 'Nice post- just follow me @eyetunities ', 'loool very nice!-want to earn money just follow me @eyetunities ', 'I like it!-follow me for daily motivational post on your wall', 'Super ;)-follow me guys @eyetunities ', 'hmmm,interesting-follow me for daily money earning tips ', ' wow- follow me for online money earning tips ', 'amazing post dude-also check out my profile , for Online money earning tips ', 'learn something new - follow me @eyetunities ', 'Mind blowing - follow for money earning tips Online money ', 'I like it , great post- follow my page please -daily money earning tips ', ]
#This variables to keep tracking of the posts
posts=0
#Chromedriver path. Make sure to have the same Chromedriver version as your Google Chrome browser
browser = webdriver.Chrome(executable_path= r"D:\pythonlearn\python_projects\chromedriver.exe") # <----- ENTER PATH HERE
browser.get(('https://www.instagram.com/accounts/login/?source=auth_switcher'))
sleep(2)
def likeAndComm(): # Likes and Comments the first 9 posts
global posts
for y in range (1,4):
for x in range(1,4):
post = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/div/div['+str(y)+']/div['+str(x)+']')
browser.implicitly_wait(1)
post.click()
sleep(2)
postLike = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]').click()
#postLike.click()
print("Post liked")
sleep(2)
#comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click()
print("click1")
sleep(3)
comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click()
print("click2")
comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea').send_keys(random.choice(comments))
print("send1-Writing comment")
sleep(3)
sendComment = browser.find_element_by_xpath("//button[@type='submit']")
sendComment.click()
print("click3-Comment-posted")
print("searching for new post, searching...")
sleep(4)
posts+=1
closePost=browser.find_element_by_xpath('/html/body/div[4]/div[3]/button/div')
closePost.click()
sleep(3)
print ('No. of posts: ' +str(posts))
sleep(5)
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
def start():
username = browser.find_element_by_name('username')
username.send_keys('Username') # <- INSERT YOUR INSTAGRAM USERNAME HERE
password = browser.find_element_by_name('password')
password.send_keys('Password') # <- INSERT YOUR INSTAGRAM PASSWORD HERE
nextButton = browser.find_element_by_xpath("//button[@type='submit']")
nextButton.click()
sleep(4)
notification = browser.find_element_by_xpath("//button[contains(text(), 'Not Now')]")
notification.click()
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm() # likeAndComm function
sleep(5)
#Start the programm
start()
|
2,364 | 3f22bf954a8c4608ec4bd4a28bea3679a664a99a | field = [['*', '1', '2', '3'], ['1', '-', '-', '-'], ['2', '-', '-', '-'], ['3', '-', '-', '-']]
def show(a):
for i in range(len(a)):
for j in range(len(a[i])):
print(a[i][j], end=' ')
print()
def askUserZero():
while True:
inputX = input('Введите номер строки нолика')
inputY = input('Введите номер столбца нолика')
if inputX.isdigit() and inputY.isdigit():
zeroPosX = int(inputX)
zeroPosY = int(inputY)
if zeroPosX in [1, 2, 3] and zeroPosY in [1, 2, 3]:
if field[zeroPosX][zeroPosY] != '-':
print("Позиция уже занята :( Попробуйте снова")
else:
return [zeroPosX, zeroPosY]
else:
print("Такой позиции не существует, попробуйте снова")
else:
print("Значение должно принимать значения от 1 до 3. Попробуйте снова")
def askUserCross():
while True:
inputX = input('Введите номер строки крестика')
inputY = input('Введите номер столбца крестика')
if inputX.isdigit() and inputY.isdigit():
crossPosX = int(inputX)
crossPosY = int(inputY)
if crossPosX in [1, 2, 3] and crossPosY in [1, 2, 3]:
if field[crossPosX][crossPosY] != '-':
print("Позиция уже занята :(\nПопробуйте снова")
else:
return [crossPosX, crossPosY]
else:
print("Такой позиции не существует, попробуйте снова")
else:
print("Значение должно принимать значения от 1 до 3. Попробуйте снова")
def winCombo(a):
n=0
m=0
t=0
r=0
for i in range(1, len(a)):
for j in range(1, len(a[i])-1):
if a[i][j] == a[i][j+1] and a[i][j] == 'X' or a[i][j] == a[i][j+1] and a[i][j] == '0':
n += 1
s = a[i][j+1]
if n == len(a[i])-2:
print("Выйграл", s)
return "Congratulations!"
for i in range(1, len(a[1])):
for j in range (1,len(a)-1):
if a[j][i] == a[j+1][i] and a[j][i] == 'X' or a[j][i] == a[j+1][i] and a[j][i] == '0':
m += 1
k = a[j][i]
if m == len(a)-2:
print("Выйграл", k)
return "Congratulations!"
for i in range(1, len(a)-1):
if a[i][i] == a[i+1][i+1] and a[i][i] == 'X' or a[i][i] == a[i+1][i+1] and a[i][i] == '0':
t += 1
z = a[i][i]
if t == len(a)-2:
print("Выйграл", z)
return "Congratulations!"
for i in range(1, len(a)-1):
if a[i][len(a)-i] == a[i+1][len(a)-i-1] and a[i][len(a)-i] == 'X' or a[i][len(a)-i] == a[i+1][len(a)-i-1] and a[i][len(a)-i] == '0':
r += 1
b = a[i][len(a)-i]
if r == len(a)-2:
print("Выйграл", b)
return "Congratulations!"
while True:
show(field)
crossPos = askUserCross()
field[crossPos[0]][crossPos[1]]='X'
show(field)
result=winCombo(field)
if result:
show(field)
break
zeroPos = askUserZero()
field[zeroPos[0]][zeroPos[1]]='0'
result = winCombo(field)
if result:
show(field)
break
print(result)
|
2,365 | cb742701094a8060e524ba22a0af2f969bdbf3d9 | import vk_loader.vk_api as vk
from config import config
import uuid
import requests
from models import session, Meme
import os
PHOTO_URL_FIELDS = [
'photo_75',
'photo_130',
'photo_604',
'photo_807',
'photo_1280',
'photo_2560'
]
conf = config('loader', default={
'access_token': 'Enter VK access token here.',
'sources': [],
'load_limit_per_source': 20,
'remember_loaded_ids': 50,
'images_dir': 'img/'
})
def get_random_id():
return uuid.uuid4().hex
def is_post_meme(post):
if 'id' not in post:
return False
if 'attachments' not in post:
return False
if 'is_pinned' in post and post['is_pinned'] == 1:
return False
if 'marked_as_ads' in post and post['marked_as_ads'] == 1:
return False
attachments = post['attachments']
if type(attachments) != list:
return False
if len(attachments) != 1:
return False
photo = attachments[0]
if 'type' not in photo:
return False
if photo['type'] != 'photo' or 'photo' not in photo:
return False
return True
def get_last_loaded_ids(source_id):
try:
with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:
return list(map(lambda s: int(s.replace('\n', '')), file.readlines()))
except IOError:
return []
def save_loaded_ids(source_id, ids):
actual = ids + get_last_loaded_ids(source_id)
remember = conf['remember_loaded_ids']
if len(actual) > remember:
actual = actual[:remember]
try:
with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:
file.write('\n'.join(map(str, actual)))
except IOError:
print('Can\'t save ids!')
def get_unique_post_id(source_id, post_id):
return str(source_id) + '_' + str(post_id)
def get_new_posts():
result = []
for source_id in conf['sources']:
loaded_ids = set(get_last_loaded_ids(source_id))
to_save = []
finished = False
considered = 0
while not finished:
posts = vk.get_posts(source_id, offset=considered)['items']
count = len(posts)
if count == 0:
finished = True
continue
for item in posts:
if considered == conf['load_limit_per_source']:
finished = True
break
considered += 1
if 'id' not in item:
continue
post_id = item['id']
if post_id in loaded_ids:
finished = True
continue
to_save.append(post_id)
result.append(item)
if len(to_save) > 0:
save_loaded_ids(source_id, to_save)
return result
def download(url, filename):
with open(filename, "wb") as file:
response = requests.get(url)
file.write(response.content)
def __main__():
os.makedirs(conf['images_dir'], exist_ok=True)
os.makedirs('vk_loader/loaded_ids', exist_ok=True)
posts = get_new_posts()
posts = filter(is_post_meme, posts)
for post in posts:
photo = post['attachments'][0]['photo']
ptr = len(PHOTO_URL_FIELDS) - 1
while ptr >= 0 and PHOTO_URL_FIELDS[ptr] not in photo:
ptr -= 1
if ptr < 0:
continue
photo_url = photo[PHOTO_URL_FIELDS[ptr]]
assert(photo_url.endswith('.jpg'))
photo_id = get_random_id()
try:
print('loading', photo_id, photo_url)
download(photo_url, conf['images_dir'] + photo_id + '.jpg')
except IOError:
print('Downloading/saving an image failed!')
continue
session.add(Meme(img=photo_id))
session.commit()
__main__()
|
2,366 | 7998c4e0ed2bb683f029342554730464f8ac2a09 | """
TODO
Chess A.I.
"""
import os, pygame, board, math, engine, sys, gSmart
from pygame.locals import *
import engine, board, piece, copy
class gSmart:
def __init__(self):
self.e = engine.engine()
self.mtrlW = .75
self.dvlpW = 2
self.aggnW = 2
self.defnW = .5
self.thrndW = 2
self.epW = 10
self.chkW = 50
self.chkmtW = 1000
def getNextMove(self, b, n):
gt = gameTree(b, n) #create a gameTree of n ply
return gt.miniMax() #use miniMax algo to return the best move
def getAllNextMoves(self, b):
pcs = b.getPieces(b.turn)
nextMoves = []
for p in pcs:
for x in range(8):
for y in range(8):
futureB = copy.deepcopy(b)
success = futureB.movePiece(self.e, p.sqr, [x,y])
if success == True:
m = [p.sqr, [x,y]]
nextMoves.append([futureB, m])
# print(nextMoves)
return nextMoves
def evaluatePosition(self, b):
mtrl = b.getMaterialSums()
dvlp = self.e.getDevelopment(b)
agg = self.e.getAggression(b)
defn = self.e.getDefense(b)
thrnd = self.e.getThreatened(b)
ep = self.e.getEnPrise(b)
chk = self.e.getCheck(b)
chkmt = self.e.getCheckmate(b)
#print("Unweighted")
#print("Material: \t" + str(mtrl))
#print("Development: \t" + str(dvlp))
#print("Aggression: \t" + str(agg))
#print("Defense: \t" + str(defn))
#print("Threatened:\t" + str(thrnd))
#print("En Prise: \t" + str(ep))
#print("Check: \t" + str(chk))
#print("Checkmate: \t" + str(chkmt))
#print("")
metrics = [mtrl, dvlp, agg, defn, thrnd, ep, chk, chkmt]
weights = [self.mtrlW, self.dvlpW, self.aggnW, self.defnW, self.thrndW, self.epW, self.chkW, self.chkmtW]
position = [0,0]
for x in range(len(metrics)):
for y in range(2):
position[y]+=metrics[x][y]
# print("Position: " + str(position))
weightedMetrics = [ [weights[x]*metrics[x][0], weights[x]*metrics[x][1]] for x in range(len(weights))]
#print("Unweighted")
#print("Material: \t" + str(weightedMetrics[0]))
#print("Development: \t" + str(weightedMetrics[1]))
#print("Aggression: \t" + str(weightedMetrics[2]))
#print("Defense: \t" + str(weightedMetrics[3]))
#print("Threatened:\t" + str(weightedMetrics[4]))
#print("En Prise: \t" + str(weightedMetrics[5]))
#print("Check: \t" + str(weightedMetrics[6]))
#print("Checkmate: \t" + str(weightedMetrics[7]))
#print("")
weightedPosition = [0,0]
for x in range(len(metrics)):
for y in range(2):
weightedPosition[y]+=weightedMetrics[x][y]
# print("Weighted Position: " + str(weightedPosition))
#print("Weighted Posistion: " + str(weightedPosition))
totalWeight = -1*weightedPosition[0] + weightedPosition[1]
print("total weight: " + totalWeight)
return totalWeight
class gameTree():
def __init__(self, b, n): #builds a game tree of "n" ply from board "b"
self.t = gSmart.gameTree.tree(b) #create a tree
cur = self.t.getRoot() #grab the root
self.addPly(cur, b, 3) #build out "h" ply
def addPly(self, curNode, b, ply):
if ply == 0: #basecase
return
else:
moves = getAllNextMoves(curNode.board) #get moves for board in current node
for move in moves:
temp = gameTree.tree.node(b,move,mm) #make a new node for each move
curNode.addChild(temp) #add the new node as a child to curNode
self.addPly(temp, b, ply-1) #recursively call addPly on the child, with one less ply
def getMinOrMax(self, b):
if b.getTurn == "w":
return "max"
else:
return "min"
def minimax(self):
return None
class tree:
def __init__(self, b = None, m= None):
self.root = gSmart.gameTree.tree.node(b, m)
def getRoot(self):
return self.root
def addNode(self, parent, child):
parent.addChild(child)
def DFS(self, start):
print(str(start))
children = start.getChildren()
if(len(children) == 0):
return
else:
for child in children:
self.DFS(child)
class node:
def __init__(self, b = None, m = None):
self.children = []
self.board = b
self.move = m
self.value = None
def addChild(self, newChild):
self.children.append(newChild)
def getChildren(self):
return self.children
def getData(self):
return self.data
def setValue(self, v):
if v == None:
self.value = self.getBoardValue()
else:
self.value = v
def getValue(self):
return self.value
def getBoardValue(self):
return self.gSmart.evaluatePosition()
def isMaxNode(self):
return self.board.isTurn() == "w"
bd = board.Board()
bd.setupDefault()
gt = gSmart.gameTree(bd, 3)
t.DFS(gt.getRoot()) |
2,367 | 6657f0b51bc021e6b5867bbdd1a520c2b0cb92b3 | import logging.config
import os
import sys
import yaml
sys.path.append(os.path.join(os.path.abspath('.'), '..', '..'))
def setup_logging(default_path='common/config/logging.yaml'):
path = default_path
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
|
2,368 | ef6f91af5f500745fdcc23947a7e1764061c608c | import data
import sub_vgg19
import time
import tensorflow as tf
model_syn = sub_vgg19.vgg19_syn
model_asy = sub_vgg19.vgg19_asy
train_x = data.train_x
train_y = data.train_y
test_x = data.test_x
test_y = data.test_y
def input_fn(images, labels, epochs, batch_size):
data = tf.data.Dataset.from_tensor_slices((images, labels))
data = data.repeat(epochs).batch(batch_size)
return data
epochs = 30
batch_size = 32 * 8
syn = True
time1 = time.time()
if syn :
model_syn.fit(train_x, train_y, epochs=epochs, batch_size = batch_size)
test_loss, test_acc = model_syn.evaluate(test_x, test_y, verbose=2, batch_size = 1000) # test는 size 1000으로 고정
else:
model_asy.train(lambda: input_fn(train_x,
train_y,
epochs=epochs,
batch_size=batch_size))
acc = model_asy.evaluate(lambda: input_fn(test_x,
test_y,
epochs=epochs,
batch_size=1000)) # test는 size 1000으로 고정
print("acc", acc)
print("총 걸린 시간 :", time.time() - time1)
|
2,369 | 734fd4c492f2fd31a0459e90e5c4a7468120b4cd | # http://www.dalkescientific.com/writings/diary/archive/2007/10/07/wide_finder.html
'''
Making a faster standard library approach
As I was writing an email to Fredrik describing these results,
I came up with another approach to speeding up the performance, using only the standard library.
Fredrik showed that using a two-level filter, with a quick exclusion test using string operations followed by the regular expression test,
was faster than doing only the regular expression test. Quoting him:
The RE engine does indeed use special code for literal prefixes,
but the superlinear substring search algorithm that was introduced in 2.5 is a lot faster in cases like this, so this simple change gives a noticable speedup.
This works because the only about 20% of the lines in the input file matches the quick test and the simple string test is
% python -m timeit -s 's="This is a test. I was here."*4; t="testXYZ"' 't in s'
10000000 loops, best of 3: 0.194 usec per loop
% python -m timeit -s 'import re;s="This is a test. I was here."*4; t=re.compile("testXYZ")' 't.search(s)'
1000000 loops, best of 3: 0.98 usec per loop
% python -c 'print 0.98/0.194'
5.05154639175
%
roughly 5 times faster than the regular expression test.
My observation was that I can defer the regular expression test until later.
Use the quick string test to find all substrings starting with "GET /ongoing/When/" and ending with the " ".
This will include some extra substrings. Tally all of the substrings, including the false positives.
This will do extra work but the tallying code is very fast.
Once the file has been parsed, post-process the counts dictionary and remove those keys which are not allowed by the regular expression.
This works because there are many duplicate keys. Nearly 50% of the entries which pass the quick string test are duplicates.
The keys in the counts dictionary are unique, which mean only one regular expression test needs to be done, instead of one for each match.
If most of the entries were under /ongoing/When/ and most were unique then these optimizations would be a net slowdown.
You have to understand your data as well as the software in order to figure out how to improve things, and there will be tradeoffs.
Remember also I mentioned that string operations are available for buffer objects?
This means I can do the fast find directly on the memory-mapped file, rather than using a chunk reader.
I'll do the quick search for the leading part of the pattern to search for, then another search for the trailing " " (space) character.
'''
# dalke-wf-10.py fast string ops, mmap, post-process filter
import re, os, mmap
from collections import defaultdict
FILE = "o1000k.ap"
import time, sys
if sys.platform == "win32":
timer = time.clock
else:
timer = time.time
t0, t1 = timer(), time.clock()
pat = re.compile(r"GET /ongoing/When/\d\d\dx/(\d\d\d\d/\d\d/\d\d/[^ .]+) ")
search = pat.search
def count_file(filename):
count = defaultdict(int)
fileobj = open(FILE)
filemap = mmap.mmap(fileobj.fileno(), os.path.getsize(FILE), access=mmap.ACCESS_READ)
i = j = 0
# For the first pass, including everything which is a reasonable match.
# It's faster to count everything and filter later than it is to do
# the filtering now.
while 1:
i = filemap.find("GET /ongoing/When/", j)
if i == -1:
break
j = filemap.find(' ', i+19)
field = filemap[i:j]
count[field] += 1
# The previous code included fields which aren't allowed by the
# regular expression. Filter those which don't match the regexp.
new_count = {}
for k, v in count.iteritems():
# because of the way the key was saved, I didn't keep the
# trailing space. Add it back here so the regexp can be used unchanged.
k = k + " "
m = pat.search(k)
if m:
new_count[m.group(1)] = v
return new_count
count = count_file(FILE)
for key in sorted(count, key=count.get)[:10]:
pass # print "%40s = %s" % (key, count[key])
print timer() - t0, time.clock() - t1
# sanity check
for key in sorted(count, key=count.get)[-10:]:
print "%40s = %s" % (key, count[key])
'''
Variable lookups in module scope are slower than lookups in local scope so I introduced the count_file function to get a bit more speed.
I didn't generate numbers for this one but experience says it's nearly always a performance advantage.
The resulting dalke-wf-10 code finishes in 1.0s. Yes, you read that right. It's faster than the mmap/findall solution of dalke-wf-7.py, which took 1.3s.
Still not as fast as mxTextTools at 0.7s, but this solution uses only the standard library.
''' |
2,370 | 16738e7d89bee8074f39d0b3abc3fa786faf081f | import random
prime=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
t=100
print(t)
n=25
for _ in range(t):
a=random.randint(1,n)
b=random.choice(prime)
print(a,b)
for _ in range(a):
print(random.randint(1,n),end=" ")
print("")
|
2,371 | f96c9753f3cbb0e554f9f05591e23943009c8955 | from classifier import classifier
from get_input_args import get_input_args
from os import listdir
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/calculates_results_stats_hints.py
#
# PROGRAMMER:
# DATE CREATED:
# REVISED DATE:
# PURPOSE: This is a *hints* file to help guide students in creating the
# function calculates_results_stats that calculates the statistics
# of the results of the programrun using the classifier's model
# architecture to classify the images. This function will use the
# results in the results dictionary to calculate these statistics.
# This function will then put the results statistics in a dictionary
# (results_stats_dic) that's created and returned by this function.
# This will allow the user of the program to determine the 'best'
# model for classifying the images. The statistics that are calculated
# will be counts and percentages. Please see "Intro to Python - Project
# classifying Images - xx Calculating Results" for details on the
# how to calculate the counts and percentages for this function.
# This function inputs:
# - The results dictionary as results_dic within calculates_results_stats
# function and results for the function call within main.
# This function creates and returns the Results Statistics Dictionary -
# results_stats_dic. This dictionary contains the results statistics
# (either a percentage or a count) where the key is the statistic's
# name (starting with 'pct' for percentage or 'n' for count) and value
# is the statistic's value. This dictionary should contain the
# following keys:
# n_images - number of images
# n_dogs_img - number of dog images
# n_notdogs_img - number of NON-dog images
# n_match - number of matches between pet & classifier labels
# n_correct_dogs - number of correctly classified dog images
# n_correct_notdogs - number of correctly classified NON-dog images
# n_correct_breed - number of correctly classified dog breeds
# pct_match - percentage of correct matches
# pct_correct_dogs - percentage of correctly classified dogs
# pct_correct_breed - percentage of correctly classified dog breeds
# pct_correct_notdogs - percentage of correctly classified NON-dogs
#
##
# TODO 5: EDIT and ADD code BELOW to do the following that's stated in the
# comments below that start with "TODO: 5" for the calculates_results_stats
# function. Please be certain to replace None in the return statement with
# the results_stats_dic dictionary that you create with this function
#
def calculates_results_stats(results_dic):
"""
Calculates statistics of the results of the program run using classifier's model
architecture to classifying pet images. Then puts the results statistics in a
dictionary (results_stats_dic) so that it's returned for printing as to help
the user to determine the 'best' model for classifying images. Note that
the statistics calculated as the results are either percentages or counts.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
Returns:
results_stats_dic - Dictionary that contains the results statistics (either
a percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value. See comments above
and the classroom Item XX Calculating Results for details
on how to calculate the counts and statistics.
"""
# Creates empty dictionary for results_stats_dic
results_stats_dic = dict()
# Sets all counters to initial values of zero so that they can
# be incremented while processing through the images in results_dic
results_stats_dic['n_dogs_img'] = 0
results_stats_dic['n_match'] = 0
results_stats_dic['n_correct_dogs'] = 0
results_stats_dic['n_correct_notdogs'] = 0
results_stats_dic['n_correct_breed'] = 0
# process through the results dictionary
for key in results_dic:
# Labels Match Exactly
if results_dic[key][2] == 1:
results_stats_dic['n_match'] += 1
# TODO: 5a. REPLACE pass with CODE that counts how many pet images of
# dogs had their breed correctly classified. This happens
# when the pet image label indicates the image is-a-dog AND
# the pet image label and the classifier label match. You
# will need to write a conditional statement that determines
# when the dog breed is correctly classified and then
# increments 'n_correct_breed' by 1. Recall 'n_correct_breed'
# is a key in the results_stats_dic dictionary with it's value
# representing the number of correctly classified dog breeds.
#
# Pet Image Label is a Dog AND Labels match- counts Correct Breed
if results_dic[key][3] == 1 and results_dic[key][2] == 1:
results_stats_dic['n_correct_breed'] += 1
# Pet Image Label is a Dog - counts number of dog images
if results_dic[key][3] == 1:
results_stats_dic['n_dogs_img'] += 1
# Classifier classifies image as Dog (& pet image is a dog)
# counts number of correct dog classifications
if results_dic[key][4] == 1:
results_stats_dic['n_correct_dogs'] += 1
# TODO: 5b. REPLACE pass with CODE that counts how many pet images
# that are NOT dogs were correctly classified. This happens
# when the pet image label indicates the image is-NOT-a-dog
# AND the classifier label indicates the images is-NOT-a-dog.
# You will need to write a conditional statement that
# determines when the classifier label indicates the image
# is-NOT-a-dog and then increments 'n_correct_notdogs' by 1.
# Recall the 'else:' above 'pass' already indicates that the
# pet image label indicates the image is-NOT-a-dog and
# 'n_correct_notdogs' is a key in the results_stats_dic dictionary
# with it's value representing the number of correctly
# classified NOT-a-dog images.
#
# Pet Image Label is NOT a Dog
else:
# Classifier classifies image as NOT a Dog(& pet image isn't a dog)
# counts number of correct NOT dog clasifications.
if results_dic[key][3] == 0 and results_dic[key][4] == 0:
results_stats_dic['n_correct_notdogs'] += 1
# Calculates run statistics (counts & percentages) below that are calculated
# using the counters from above.
# calculates number of total images
results_stats_dic['n_images'] = len(results_dic)
# calculates number of not-a-dog images using - images & dog images counts
results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] -
results_stats_dic['n_dogs_img'])
# TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly
# matched images. Recall that this can be calculated by the
# number of correctly matched images ('n_match') divided by the
# number of images('n_images'). This result will need to be
# multiplied by 100.0 to provide the percentage.
#
# Calculates % correct for matches
results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) * 100
# TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly
# classified dog images. Recall that this can be calculated by
# the number of correctly classified dog images('n_correct_dogs')
# divided by the number of dog images('n_dogs_img'). This result
# will need to be multiplied by 100.0 to provide the percentage.
#
# Calculates % correct dogs
results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) * 100
# TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly
# classified breeds of dogs. Recall that this can be calculated
# by the number of correctly classified breeds of dog('n_correct_breed')
# divided by the number of dog images('n_dogs_img'). This result
# will need to be multiplied by 100.0 to provide the percentage.
#
# Calculates % correct breed of dog
results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) * 100
# Calculates % correct not-a-dog images
# Uses conditional statement for when no 'not a dog' images were submitted
if results_stats_dic['n_notdogs_img'] > 0:
results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /
results_stats_dic['n_notdogs_img']) * 100.0
else:
results_stats_dic['pct_correct_notdogs'] = 0.0
# TODO 5f. REPLACE None with the results_stats_dic dictionary that you
# created with this function
return results_stats_dic
#----------------------------------------------------------------------------------------------------
# METHODS FROM OTHER LESSONS
#----------------------------------------------------------------------------------------------------
def adjust_results4_isadog(results_dic, dogfile):
"""
Adjusts the results dictionary to determine if classifier correctly
classified images 'as a dog' or 'not a dog' especially when not a match.
Demonstrates if model architecture correctly classifies dog images even if
it gets dog breed wrong (not a match).
Parameters:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. Where the list will contain the following items:
index 0 = pet image label (string)
index 1 = classifier label (string)
index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
------ where index 3 & index 4 are added by this function -----
NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
dogfile - A text file that contains names of all dogs from the classifier
function and dog names from the pet image files. This file has
one dog name per line dog names are all in lowercase with
spaces separating the distinct words of the dog name. Dog names
from the classifier function can be a string of dog names separated
by commas when a particular breed of dog has multiple dog names
associated with that breed (ex. maltese dog, maltese terrier,
maltese) (string - indicates text file's filename)
Returns:
None - results_dic is mutable data type so no return needed.
"""
# Creates dognames dictionary for quick matching to results_dic labels from
# real answer & classifier's answer
dognames_dic = dict()
# Reads in dognames from file, 1 name per line & automatically closes file
with open(dogfile, "r") as infile:
# Reads in dognames from first line in file
line = infile.readline()
# Processes each line in file until reaching EOF (end-of-file) by
# processing line and adding dognames to dognames_dic with while loop
while line != "":
# print("----- line: {}".format(line))
# TODO: 4a. REPLACE pass with CODE to remove the newline character
# from the variable line
#
# Process line by striping newline from line
line = line.strip('\n')
# TODO: 4b. REPLACE pass with CODE to check if the dogname(line)
# exists within dognames_dic, then if the dogname(line)
# doesn't exist within dognames_dic then add the dogname(line)
# to dognames_dic as the 'key' with the 'value' of 1.
#
# adds dogname(line) to dogsnames_dic if it doesn't already exist
# in the dogsnames_dic dictionary
if line not in dognames_dic:
dognames_dic[line] = 1
# print("----- dognames_dic[{}]: {}".format(line, dognames_dic[line]))
# Reads in next line in file to be processed with while loop
# if this line isn't empty (EOF)
line = infile.readline()
# Add to whether pet labels & classifier labels are dogs by appending
# two items to end of value(List) in results_dic.
# List Index 3 = whether(1) or not(0) Pet Image Label is a dog AND
# List Index 4 = whether(1) or not(0) Classifier Label is a dog
# How - iterate through results_dic if labels are found in dognames_dic
# then label "is a dog" index3/4=1 otherwise index3/4=0 "not a dog"
for key in results_dic:
# Pet Image Label IS of Dog (e.g. found in dognames_dic)
if results_dic[key][0] in dognames_dic:
# Classifier Label IS image of Dog (e.g. found in dognames_dic)
# appends (1, 1) because both labels are dogs
if results_dic[key][1] in dognames_dic:
results_dic[key].extend((1, 1))
# ('cat_01.jpg', ['cat', 'lynx', 0])
# ('Poodle_07927.jpg', ['poodle', 'standard poodle, poodle', 1])
# TODO: 4c. REPLACE pass BELOW with CODE that adds the following to
# results_dic dictionary for the key indicated by the
# variable key - append (1,0) to the value using
# the extend list function. This indicates
# the pet label is-a-dog, classifier label is-NOT-a-dog.
#
# Classifier Label IS NOT image of dog (e.g. NOT in dognames_dic)
# appends (1,0) because only pet label is a dog
else:
results_dic[key].extend((1, 0))
# Pet Image Label IS NOT a Dog image (e.g. NOT found in dognames_dic)
else:
# TODO: 4d. REPLACE pass BELOW with CODE that adds the following to
# results_dic dictionary for the key indicated by the
# variable key - append (0,1) to the value uisng
# the extend list function. This indicates
# the pet label is-NOT-a-dog, classifier label is-a-dog.
#
# Classifier Label IS image of Dog (e.g. found in dognames_dic)
# appends (0, 1)because only Classifier labe is a dog
if results_dic[key][1] in dognames_dic:
results_dic[key].extend((0, 1))
# TODO: 4e. REPLACE pass BELOW with CODE that adds the following to
# results_dic dictionary for the key indicated by the
# variable key - append (0,0) to the value using the
# extend list function. This indicates
# the pet label is-NOT-a-dog, classifier label is-NOT-a-dog.
#
# Classifier Label IS NOT image of Dog (e.g. NOT in dognames_dic)
# appends (0, 0) because both labels aren't dogs
else:
results_dic[key].extend((0, 0))
def classify_images(images_dir, results_dic, model):
"""
Creates classifier labels with classifier function, compares pet labels to
the classifier labels, and adds the classifier label and the comparison of
the labels to the results dictionary using the extend function. Be sure to
format the classifier labels so that they will match your pet image labels.
The format will include putting the classifier labels in all lower case
letters and strip the leading and trailing whitespace characters from them.
For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese'
so the classifier label = 'maltese dog, maltese terrier, maltese'.
Recall that dog names from the classifier function can be a string of dog
names separated by commas when a particular breed of dog has multiple dog
names associated with that breed. For example, you will find pet images of
a 'dalmatian'(pet label) and it will match to the classifier label
'dalmatian, coach dog, carriage dog' if the classifier function correctly
classified the pet images of dalmatians.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images within this function
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
results_dic - Results Dictionary with 'key' as image filename and 'value'
as a List. Where the list will contain the following items:
index 0 = pet image label (string)
--- where index 1 & index 2 are added by this function ---
NEW - index 1 = classifier label (string)
NEW - index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
Returns:
None - results_dic is mutable data type so no return needed.
"""
# None
first_filename_list = listdir("pet_images/")
filename_list = []
for idx in range(0, len(first_filename_list), 1):
if not first_filename_list[idx].startswith('.'):
filename_list.append(first_filename_list[idx])
idx = 0
for key in results_dic:
# print("---------------")
value=results_dic[key]
# print("\t-----key={}".format(key))
# print("\t-----value={}".format(value))
path = images_dir + filename_list[idx]
# print("\t-----path={}".format(path))
model_label = classifier(path, model)
model_label = model_label.lower()
model_label = model_label.strip()
# print("\t-----model_label={}".format(model_label))
truth = 0
if value in model_label:
truth = 1
results_dic[key] = [ value, model_label, truth ]
# print("\t-----truth={}".format(truth))
idx = idx + 1
def get_pet_label(pet_image):
# Sets string to lower case letters
low_pet_image = pet_image.lower()
# Splits lower case string by _ to break into words
word_list_pet_image = low_pet_image.split("_")
# Create pet_name starting as empty string
pet_name = ""
# Loops to check if word in pet name is only alphabetic characters -
# if true append word to pet_name separated by trailing space
for word in word_list_pet_image:
if word.isalpha():
pet_name += word + " "
# Strip off starting/trailing whitespace characters
pet_name = pet_name.strip()
# Returns resulting pet_name
return pet_name
def print_dict(dict):
for item in dict.items():
print(item)
def main():
in_arg = get_input_args()
first_filename_list = listdir("pet_images/")
filename_list = []
for idx in range(0, len(first_filename_list), 1):
if not first_filename_list[idx].startswith('.'):
filename_list.append(first_filename_list[idx])
results_dic = dict()
for idx in range(0, len(filename_list), 1):
if filename_list[idx] not in results_dic:
results_dic[filename_list[idx]] = get_pet_label(filename_list[idx])
classify_images(in_arg.dir, results_dic, in_arg.arch)
adjust_results4_isadog(results_dic, in_arg.dogfile)
results_dic_output = calculates_results_stats(results_dic)
print_dict(results_dic_output)
#----------------------------------------------------------------------------------------------------
main() |
2,372 | 80819ec83572737c89044936fc269154b190751a | import pymysql
def get_list(sql, args):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='chen0918', db='web')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
result = cursor.fetchall()
cursor.close()
conn.close()
return result
def get_one(sql, args):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='chen0918', db='web')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
result = cursor.fetchone()
cursor.close()
conn.close()
return result
def modify(sql, args):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='chen0918', db='web')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
conn.commit()
cursor.close()
conn.close()
|
2,373 | b61bb47f3e059c607447cea92ce1712825735822 | # -*- coding:utf-8 -*-
from src.Client.Conf.config import *
class SaveConfigFile():
"""
该类负责保存配置文件,属于实际操作类
"""
def __init__(self, fileName='../conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def saveConfigFile(self, configMainName, configSubName, value):
"""
:param missionId: 需要保存的任务id (int 或者 string)
:return:
"""
try:
# 防御编程 若value不是string,转换则在这转换
if configMainName is None or configSubName is None:
return None
# 写回配置文件
self.config.read(self.fileName)
self.config.set(configMainName, configSubName, value)
self.config.write(open(self.fileName, "r+"))
# 打印debug日志
if DEBUG and SYSTEM_TOOLS_DEBUG:
print('{SYS}{MISSION_DEBUG} config has been save in file successfully')
except Exception as e:
# 打开错误日志文件
wrongFile = open('data/wrongMessage.dat', 'a+')
# 获取当前时间
currentTime = str(
datetime.datetime.strptime(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()), '%Y-%m-%d-%H-%M-%S'))
# 生成报错的错误信息
wrongMessage = {
'|currentTime': currentTime,
'|file': 'SystemTools-ConfFileRead-saveConfigFile',
'|configMainName': configMainName,
'|configSubName': configSubName,
'|value': value,
'|wrongMessage': str(e)
}
# 存入文件
wrongFile.write(str(wrongMessage))
# 增加换行符
wrongFile.write('\n')
wrongFile.close()
# 配置文件读取测试
if __name__ == '__main__':
s = SaveConfigFile(fileName='F:\python17\pythonPro\MemortAssit\conf\main.ini')
print(s.saveConfigFile('VERSION', 'version', 'v1.0'))
|
2,374 | ff67ef77958e78335dc1dc2c7e08bf42998387c6 |
SPACE = 0
MARK = 1
def frame_to_bit_chunks(frame_values, baud_rate=45.45, start_bit=SPACE, stop_bit=MARK):
"""フレームごとの信号強度からデータビットのまとまりに変換する"""
binary_values = frame_to_binary_values(frame_values)
bit_duration_values = binary_values_to_bit_duration(binary_values)
bit_values = bit_duration_to_bit_values(bit_duration_values, baud_rate)
bit_chunks = bit_values_to_bit_chunks(bit_values, start_bit, stop_bit)
return bit_chunks
def frame_to_binary_values(frame_values, threshold=1.0):
"""フレームごとの信号強度から0/1を判定する"""
# ヒステリシスを持たせるときの前の状態
current_binary_value = SPACE
for mark_value, space_value, time in frame_values:
# mark の強度が space の強度の threshold 倍を越えていれば mark と判断する
if mark_value > space_value * threshold:
current_binary_value = MARK
# space の強度が mark の強度の threshold 倍を越えていれば space と判断する
if space_value > mark_value * threshold:
current_binary_value = SPACE
yield (current_binary_value, time)
def binary_values_to_bit_duration(binary_values):
"""連続する0/1の長さを測る"""
# 前の値
previous_binary_value = SPACE
# 前の値に変化した経過時間
previous_time = 0
# 今の値
current_binary_value = SPACE
# 今の値に変化した経過時間
current_time = 0
for binary_value, time in binary_values:
# 今の値を代入する
current_binary_value = binary_value
current_time = time
# 前と値が変わっていれば、前の値とその長さを出力する
if current_binary_value != previous_binary_value:
yield (previous_binary_value, current_time - previous_time)
# 今の値を前の値に代入する
previous_binary_value = current_binary_value
previous_time = current_time
# ループ内では最後の値は出力されないので、ここで出力する
yield (current_binary_value, current_time - previous_time)
def bit_duration_to_bit_values(bit_duration_values, baud_rate=45.45, minimum_bit_width=0.25):
"""短すぎる値を無視したり長い値を1bitごとに分割したりする"""
# 1bit あたりの時間(秒)
bit_duration = 1 / baud_rate
# 基準(minimum_bit_width) bit あたりの時間(秒)
minimum_duration = bit_duration * minimum_bit_width
# 最後に出力してからの経過時間
duration = 0
for bit_value, original_duration in bit_duration_values:
# 次の値を読んで、経過時間を足す
duration += original_duration
while duration > minimum_duration:
# 今の値の経過時間が基準を超えている間繰り返す
handle_duration = min(bit_duration, duration)
width = handle_duration / bit_duration
yield (bit_value, width)
# 出力した分だけ経過時間を減らす
duration -= handle_duration
def bit_values_to_bit_chunks(bit_values, start_bit=SPACE, stop_bit=MARK, lsb_on_left=True):
"""1bit ごとの値からデータビットを抽出する
bit_index|ビットの役割
---------|----------
0 |スタートビット
1 |データビット
2 |データビット
3 |データビット
4 |データビット
5 |データビット
6 |ストップビット
bit_index が 1-5の範囲のみを出力する
"""
# 前のデータ とりあえずスタートビットとしておく
previous_bit_value = start_bit
# データビットの何番目を処理しているかを数えておく
# はじめはどのタイミングか分からないので None にしておく
bit_index = None
# データビットを貯める
chunk = []
for current_bit_value, _ in bit_values:
if bit_index is None:
# 初期状態、まだデータのタイミングが分かっていない
if previous_bit_value == stop_bit and current_bit_value == start_bit:
# 1つ目のストップビット→スタートビットの遷移を検出
# タイミングが決まる
bit_index = 0
else:
# データのタイミングが分かっている
# 次のビットを読む
bit_index += 1
if bit_index <= 5:
# 5個目まではデータビットなので読む
# この if はデータビットの順番が 12345 か 54321 のどちらにも対応するためのもの
if lsb_on_left:
# list への append は最後に追加する
chunk.append(current_bit_value)
else:
# list への insert(0) は最初に追加する
chunk.insert(0, current_bit_value)
else:
# データビットが終わった
if bit_index == 6:
# ストップビットが来るはず あんまり気にしないで貯めたデータを出力する
yield ''.join(str(bit) for bit in chunk)
# データを空にしておく
chunk.clear()
if previous_bit_value == stop_bit and current_bit_value == start_bit:
# スタートビットが来たので状態をリセットする
bit_index = 0
previous_bit_value = current_bit_value
|
2,375 | 6f951815d0edafb08e7734d0e95e6564ab1be1f7 | from __future__ import unicode_literals
import frappe, json
def execute():
for ps in frappe.get_all('Property Setter', filters={'property': '_idx'},
fields = ['doc_type', 'value']):
custom_fields = frappe.get_all('Custom Field',
filters = {'dt': ps.doc_type}, fields=['name', 'fieldname'])
if custom_fields:
_idx = json.loads(ps.value)
for custom_field in custom_fields:
if custom_field.fieldname in _idx:
custom_field_idx = _idx.index(custom_field.fieldname)
if custom_field_idx == 0:
prev_fieldname = ""
else:
prev_fieldname = _idx[custom_field_idx - 1]
else:
prev_fieldname = _idx[-1]
custom_field_idx = len(_idx)
frappe.db.set_value('Custom Field', custom_field.name, 'insert_after', prev_fieldname)
frappe.db.set_value('Custom Field', custom_field.name, 'idx', custom_field_idx)
|
2,376 | bdf819d8a5bc3906febced785c6d95db7dc3a603 | import math
def solution(X, Y, D):
# write your code in Python 3.6
xy = Y-X;
if xy == 0: return 0
jumps = math.ceil(xy/D)
return jumps
|
2,377 | cc74163d5dbcc2b2ca0fe5222692f6f5e45f73fe | import os
from pathlib import Path
import shutil
from ament_index_python.packages import get_package_share_directory, get_package_prefix
import launch
import launch_ros.actions
def generate_launch_description():
cart_sdf = os.path.join(get_package_share_directory('crs_support'), 'sdf', 'cart.sdf')
cart_spawner = launch_ros.actions.Node(
node_name='spawn_node',
package='gazebo_ros',
node_executable='spawn_entity.py',
arguments=['-entity', 'cart', '-x', '0', '-y', '0.2', '-z', '0.05', '-file', cart_sdf])
return launch.LaunchDescription([
cart_spawner
])
|
2,378 | 4100415b0df52e8e14b00dd66c7c53cd46c0ea6e | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import re
def main():
s = input().strip()
s = s.replace('BC', 'X')
ans = 0
for ax in re.split(r'[BC]+', s):
inds = []
for i in range(len(ax)):
if ax[i] == 'A':
inds.append(i)
ans += sum([len(ax) - 1 - ind for ind in inds]) - sum(range(len(inds)))
print(ans)
if __name__=='__main__':
main()
|
2,379 | 65264f52f641b67c707b6a827ecfe1bf417748e8 | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
MainWindow.setWindowIcon(QIcon('data/nn.png'))
MainWindow.resize(800, 800)
self.OK = QtWidgets.QPushButton(self.centralwidget)
self.OK.setStyleSheet("background-color:#18BDFF; border-radius: 5px;");
self.OK.setIcon(QIcon("data/ok.png"))
self.OK.setIconSize(QSize(40, 40))
self.OK.setGeometry(QtCore.QRect(375, 820, 150, 45))
font = QtGui.QFont()
font.setPointSize(10)
self.OK.setFont(font)
self.OK.setAutoFillBackground(True)
self.OK.setObjectName("OK")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Drawing digits"))
self.OK.setText(_translate("MainWindow", " OK"))
|
2,380 | 181e9ac4acf0e69576716f3589359736bfbd9bef | """
Ниже на четырёх языках программирования записана программа, которая вводит натуральное число 𝑥,
выполняет преобразования, а затем выводит результат. Укажите наименьшее значение 𝑥,
при вводе которого программа выведет число 10.
Тупо вручную ввёл. Крч 9. Хз, как на экзамене делать))
"""
x = int(input())
a = 3 * x + 23
b = 3 * x - 17
while a != b:
if a > b:
a -= b
else:
b -= a
print(a)
print('---')
number = 7
while number < 100:
x = number
a = 3 * x + 23
b = 3 * x - 17
while a != b:
if a > b:
a -= b
else:
b -= a
if a == 10:
print(x)
x += 1
|
2,381 | e7c454b2bf6cf324e1e318e374e07a83812c978b | a = ord(input().rstrip())
if a < 97:
print('A')
else:
print('a')
'''
ord(A)=65
ord(Z)=90
ord(a)=97
ord(z)=122
'''
|
2,382 | 0e3c6e14ff184401a3f30a6198306a17686e6ebe | #!python3
"""
I1. a
Ex1
5
1 3 5
2 1 4
3 2 4
4 1 5
5 2 3
"""
n = int(input().strip())
t = [None] * n
for i in range(n):
x,x1 = [int(i) for i in input().strip().split(' ')]
x,x1 = x-1, x1-1
t[i] = [x, x1]
res = [0]
while len(res) < n:
a = res[-1]
b = t[a][0]
c = t[a][1]
if c not in t[b]:
b, c = c, b
res += [b, c]
print(' '.join(str(i+1) for i in res))
|
2,383 | ee4fd4aef7ecdfbc8ff53028fdedc558814f46a7 | #!/usr/bin/env python3
import sql_manager
import Client
from getpass import getpass
from settings import EXIT_CMD
def main_menu():
print("""Welcome to our bank service. You are not logged in.
Please register or login""")
while True:
command = input("guest@hackabank$ ")
if command == "register":
username = input("Enter your username: ")
password = getpass(prompt="Enter your password: ")
sql_manager.register(username, password)
print("Registration Successfull")
elif command == "login":
username = input("Enter your username: ")
password = getpass(prompt="Enter your password: ")
logged_user = sql_manager.login(username, password)
if logged_user:
logged_menu(logged_user)
else:
print("Login failed")
continue
elif command == "help":
print("""login - for logging in!
register - for creating new account!
exit - for closing program!""")
elif command == "exit":
break
else:
print("Not a valid command")
continue
def logged_menu(logged_user):
print("Welcome you are logged in as: " + logged_user.get_username())
while True:
command = input("{}@hackabank# ".format(logged_user.get_username()))
if command == "info":
print("You are: " + logged_user.get_username())
print("Your id is: " + str(logged_user.get_id()))
print("Your balance is:" + str(logged_user.get_balance()) + "$")
elif command == "changepass":
new_pass = input("Enter your new password: ")
sql_manager.change_pass(new_pass, logged_user)
elif command == "change-message":
new_message = input("Enter your new message: ")
sql_manager.change_message(new_message, logged_user)
elif command == "show-message":
print(logged_user.get_message())
elif command == "help":
print("info - for showing account info")
print("changepass - for changing passowrd")
print("change-message - for changing users message")
print("show-message - for showing users message")
elif command in EXIT_CMD:
break
else:
print("Not such a command!")
continue
|
2,384 | 330df4f194deec521f7db0389f88171d9e2aac40 | """
Author: Eric J. Ma
Purpose: This is a set of utility variables and functions that can be used
across the PIN project.
"""
import numpy as np
from sklearn.preprocessing import StandardScaler
BACKBONE_ATOMS = ["N", "CA", "C", "O"]
AMINO_ACIDS = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
]
BOND_TYPES = [
"hydrophobic",
"disulfide",
"hbond",
"ionic",
"aromatic",
"aromatic_sulphur",
"cation_pi",
"backbone",
"delaunay",
]
RESI_NAMES = [
"ALA",
"ASX",
"CYS",
"ASP",
"GLU",
"PHE",
"GLY",
"HIS",
"ILE",
"LYS",
"LEU",
"MET",
"ASN",
"PRO",
"GLN",
"ARG",
"SER",
"THR",
"VAL",
"TRP",
"TYR",
"GLX",
"UNK",
]
HYDROPHOBIC_RESIS = [
"ALA",
"VAL",
"LEU",
"ILE",
"MET",
"PHE",
"TRP",
"PRO",
"TYR",
]
DISULFIDE_RESIS = ["CYS"]
DISULFIDE_ATOMS = ["SG"]
IONIC_RESIS = ["ARG", "LYS", "HIS", "ASP", "GLU"]
POS_AA = ["HIS", "LYS", "ARG"]
NEG_AA = ["GLU", "ASP"]
AA_RING_ATOMS = dict()
AA_RING_ATOMS["PHE"] = ["CG", "CD", "CE", "CZ"]
AA_RING_ATOMS["TRP"] = ["CD", "CE", "CH", "CZ"]
AA_RING_ATOMS["HIS"] = ["CG", "CD", "CE", "ND", "NE"]
AA_RING_ATOMS["TYR"] = ["CG", "CD", "CE", "CZ"]
AROMATIC_RESIS = ["PHE", "TRP", "HIS", "TYR"]
CATION_PI_RESIS = ["LYS", "ARG", "PHE", "TYR", "TRP"]
CATION_RESIS = ["LYS", "ARG"]
PI_RESIS = ["PHE", "TYR", "TRP"]
SULPHUR_RESIS = ["MET", "CYS"]
ISOELECTRIC_POINTS = {
"ALA": 6.11,
"ARG": 10.76,
"ASN": 10.76,
"ASP": 2.98,
"CYS": 5.02,
"GLU": 3.08,
"GLN": 5.65,
"GLY": 6.06,
"HIS": 7.64,
"ILE": 6.04,
"LEU": 6.04,
"LYS": 9.74,
"MET": 5.74,
"PHE": 5.91,
"PRO": 6.30,
"SER": 5.68,
"THR": 5.60,
"TRP": 5.88,
"TYR": 5.63,
"VAL": 6.02,
"UNK": 7.00, # unknown so assign neutral
"ASX": 6.87, # the average of D and N
"GLX": 4.35, # the average of E and Q
}
scaler = StandardScaler()
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
ISOELECTRIC_POINTS_STD = dict()
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
MOLECULAR_WEIGHTS = {
"ALA": 89.0935,
"ARG": 174.2017,
"ASN": 132.1184,
"ASP": 133.1032,
"CYS": 121.1590,
"GLU": 147.1299,
"GLN": 146.1451,
"GLY": 75.0669,
"HIS": 155.1552,
"ILE": 131.1736,
"LEU": 131.1736,
"LYS": 146.1882,
"MET": 149.2124,
"PHE": 165.1900,
"PRO": 115.1310,
"SER": 105.0930,
"THR": 119.1197,
"TRP": 204.2262,
"TYR": 181.1894,
"VAL": 117.1469,
"UNK": 137.1484, # unknown, therefore assign average of knowns
"ASX": 132.6108, # the average of D and N
"GLX": 146.6375, # the average of E and Q
}
MOLECULAR_WEIGHTS_STD = dict()
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
MOLECULAR_WEIGHTS_STD = dict()
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
|
2,385 | 4f3908e12102cfd58737952803c710772e960b0e | animal = 'cat'
def f():
global animal
animal = 'dog'
print('local_scope:', animal)
print('local:', locals())
f()
print('global_scope:', animal)
print('global:', locals())
|
2,386 | 5df42a024e1edbe5cc977a814efe580db04b8b76 | import struct
def parse(message):
return IGENMessage.from_bytes(message)
class IGENMessage(object):
def __init__(self):
self.serial = None
self.temperature = None
self.pv1 = 0
self.pv2 = 0
self.pv3 = 0
self.pa1 = 0
self.pa2 = 0
self.pa3 = 0
self.ov1 = 0
self.ov2 = 0
self.ov3 = 0
self.oa1 = 0
self.oa2 = 0
self.oa3 = 0
self.oHz = 0
self.op1 = 0
self.op2 = 0
self.op3 = 0
self.energy_today = None
self.energy_overall = None
self.operational_hours = None
@classmethod
def from_bytes(cls, data):
if len(data) != 103:
raise Exception('Packet should be exactly 103 bytes')
self = cls()
parsed = struct.unpack('!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)
self.serial = parsed[0].decode('ascii')
self.temperature = parsed[1] / 10
self.pv1 = parsed[2] / 10
self.pv2 = parsed[3] / 10
self.pv3 = parsed[4] / 10
self.pa1 = parsed[5] / 10
self.pa2 = parsed[6] / 10
self.pa3 = parsed[7] / 10
self.oa1 = parsed[8] / 10
self.oa2 = parsed[9] / 10
self.oa3 = parsed[10] / 10
self.ov1 = parsed[11] / 10
self.ov2 = parsed[12] / 10
self.ov3 = parsed[13] / 10
self.oHz = parsed[14] / 100
self.op1 = parsed[15]
self.op2 = parsed[16]
self.op3 = parsed[17]
self.energy_today = parsed[18] / 100
self.energy_overall = parsed[19] / 10
self.operational_hours = parsed[20]
return self
def outputs(self):
return [
(self.ov1, self.oa1, self.op1),
(self.ov2, self.oa2, self.op2),
(self.ov3, self.oa3, self.op3)
]
def inputs(self):
return [
(self.pv1, self.pa1),
(self.pv2, self.pa2),
(self.pv3, self.pa3)
]
def report(self):
print("Logger: {}".format(self.serial))
print("Temperature: {} degrees celcius".format(self.temperature))
print()
print("Inputs: ")
print(" Channel 1: {:6.2f} V {:5.2f} A".format(self.pv1, self.pa1))
print(" Channel 2: {:6.2f} V {:5.2f} A".format(self.pv2, self.pa2))
print(" Channel 3: {:6.2f} V {:5.2f} A".format(self.pv3, self.pa3))
print()
print("Outputs: ({} Hz)".format(self.oHz))
print(" L1: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov1, self.oa1, self.op1))
print(" L2: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov2, self.oa2, self.op2))
print(" L3: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov3, self.oa3, self.op3))
print()
print("Energy today: {:8.1f} kWh".format(self.energy_today))
print("Energy overall: {:8.1f} kWh".format(self.energy_overall))
print("Operational hours: {}".format(self.operational_hours))
def __repr__(self):
total_power = self.op1 + self.op2 + self.op3
return "<IGENMessage {} watt ({} kWh today)>".format(total_power, self.energy_today)
|
2,387 | 64fb006ea5ff0d101000dd4329b3d957a326ed1a | def test(name,message):
print("用户是:" , name)
print("欢迎消息是:",message)
my_list = ['孙悟空','欢迎来疯狂软件']
test(*my_list)
print('*****')
# ###########################
def foo(name,*nums):
print("name参数:",name)
print("nums参数:",nums)
my_tuple = (1,2,3)
foo('fkit',*my_tuple)
print('********')
foo(*my_tuple)
print('*******')
foo(my_tuple)
#############################
def bar(book,price,desc):
print(book,'这本书的价格是:',price)
print('描述信息是:',desc)
print('********')
my_dict = {'price':89,'book':'疯狂python讲义','desc':'这是一本系统全面的python学习图书'}
bar(**my_dict)
print('*******')
#如果是下面的调用形式,不采用逆向参数收集将报错
# TypeError: bar() missing 2 required positional arguments: 'price' and 'desc'
bar(my_dict) |
2,388 | 57027cd638a01a1e556bcde99bcbe2a3b2fa0ef8 | # -*- coding:utf-8 -*-
import easygui as eg
import time as tm
import numpy as np
import thread
import os
from urllib2 import urlopen, Request
import json
from datetime import datetime, timedelta
URL_IFENG='http://api.finance.ifeng.com/akmin?scode=%s&type=%s'
NUM_PER_THREAD=100#单线程监控的股票数
SCAN_INTERVAL=10
FILE_PATH=u'.\export'
END_HOUR=24
MAX_DATES=100
MSG_HEAD=u'\n 板块 代码 开盘价 均价 收盘价\n'
KDATA_ONE_DAY={'5':48,'15':16,'30':8,'60':4}
K_MIN_LABELS=['5', '15', '30', '60']
cross_list={}
def cross_monitor(codes,ktype,avn,thread_no,retry=3):
global cross_list
tmp_codes=[]
for code in codes:#代码信息改为 [0]证券代码+[1]所属板块+[2]最新行情时间
tmp_code=list(code)
tmp_code.append(u'0')
tmp_codes.append(tmp_code)
while datetime.now().hour<END_HOUR:
start=tm.clock()
for code in tmp_codes:
for _ in range(retry):
try:
url=URL_IFENG%(code[0],ktype)
request=Request(url)
lines=urlopen(request,timeout=3).read()
js=json.loads(lines)
data=js['record'][-avn:]
if data[-1][0]!=code[2]:
print u'发现新数据'
code[2]=data[-1][0]
mean=0
for j in range(avn):
mean=mean+float(data[-(j+1)][3])
mean=mean/avn
price_open=float(data[-2][3])
price_close=float(data[-1][3])
if price_open<=mean and mean<=price_close:
cross_list[code[1]][u'cross_codes'].append([code[0][2:8],price_open,mean,price_close])
except Exception as e:
print code,u'数据处理异常,错误信息',e
else:
break
finish=tm.clock()
print u'线程',thread_no,u'数据获取结束,总耗时',finish-start
tm.sleep(20)
#弹出提示窗口函数
def showcross():
global cross_list
msg=MSG_HEAD
for board, lis in cross_list.iteritems():
new_num=len(lis[u'cross_codes'])
if lis[u'cross_num']<new_num:
msg=msg+u'============================================\n'
for code in lis[u'cross_codes'][lis[u'cross_num']:new_num]:
msg=msg+'['+board+u'] '+code[0]+' '+str(code[1])+' '+str(code[2])+' '+str(code[3])+'\n'
lis[u'cross_num']=new_num
if msg!=MSG_HEAD:
eg.msgbox(msg=msg,title=u'发现K线上穿均线的股票',ok_button=u'知道了')
#写日志
try:
log=open('log.txt','a')
log.write('\n'+datetime.now().isoformat(' '))
log.write(msg.encode('gbk'))
except:
eg.msgbox(u'写日志失败')
finally:
log.close()
return None
if __name__ == "__main__":
#code=raw_input(u'code:')
total_codes=0
avn=0
codes=[]
ktype=eg.choicebox(msg=u'请选择k线周期', choices=K_MIN_LABELS)
while(avn<=1):
avn=eg.integerbox(msg=u'请输入均线天数,范围在1-500之间', default=10, upperbound=500)
try:
dir_list=os.listdir(FILE_PATH)
except:
eg.msgbox(u'查找数据文件出现异常')
exit()
for dir_name in dir_list:
#检查是否为目录
path_test=os.path.join(FILE_PATH,dir_name)
if os.path.isdir(path_test):
cross_list[dir_name]={u'cross_num':0,u'cross_codes':[]}
try:
file_list=os.listdir(path_test)
except:
eg.msgbox(u'查找数据文件出现异常')
for file_name in file_list:
if file_name[0:2]=='SZ':
codes.append([u'sz'+file_name[3:9],dir_name])
total_codes=total_codes+1
elif file_name[0:2]=='SH':
codes.append([u'sh'+file_name[3:9],dir_name])
total_codes=total_codes+1
if total_codes==0:
eg.msgbox(u'没有发现数据文件')
exit()
try:
k=0
i=0
while k<total_codes:
if (k+NUM_PER_THREAD)>=total_codes:
thread.start_new_thread(cross_monitor,(codes[k:],ktype,avn,i,))
else:
thread.start_new_thread(cross_monitor,(codes[k:k+NUM_PER_THREAD],ktype,avn,i,))
i=i+1
k=k+NUM_PER_THREAD
except:
eg.msgbox(msg=u'创建监控线程失败')
exit()
while datetime.now().hour<END_HOUR:#下午4点结束监控
showcross()
tm.sleep(SCAN_INTERVAL)
eg.msgbox(msg=u'闭市了!')
|
2,389 | 67b101df690bbe9629db2cabf0060c0f2aad9722 | """
Type data Dictionary hanya sekedar menghubungkan KEY dan VALUE
KVP = KEY VALUE PAIR
"""
kamus = {}
kamus['anak'] = 'son'
kamus['istri'] = 'wife'
kamus['ayah'] = 'father'
print(kamus)
print(kamus['ayah'])
print('\nData ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi')
data_server_gojek = {
'tanggal': '2020-10-27',
'driver_list': [ # diver_list merupakan array yang bertipe dictionary krna memiliki beberapa atribut
{'nama': 'Eko', 'jarak': 10},
{'nama': 'Dwi', 'jarak': 100},
{'nama': 'Tri', 'jarak': 1000}
]
}
print(data_server_gojek)
print(f"Driver di sekitar sini {data_server_gojek['driver_list']}")
print(f"Driver #1 {data_server_gojek['driver_list'][0]}")
print(f"Driver #3 {data_server_gojek['driver_list'][2]}")
print('\nCara mengambil data jarak terdekat')
print(f"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters")
|
2,390 | 755eeaf86ebf2560e73869084030a3bfc89594f6 | # Author: Omkar Sunkersett
# Purpose: To fetch SPP data and update the database
# Summer Internship at Argonne National Laboratory
import csv, datetime, ftplib, MySQLdb, os, time
class SPP():
def __init__(self, server, path, start_dt, end_dt, prog_dir):
self.files_cached = []
try:
self.ftp_handle = ftplib.FTP(server)
self.ftp_handle.login()
self.path_name = path
self.start_dt = datetime.datetime.strptime(start_dt, "%m-%d-%Y")
self.end_dt = datetime.datetime.strptime(end_dt, "%m-%d-%Y")
self.prog_dir = prog_dir
except Exception as e:
print (str(e))
def fetch_files(self, pres_wd, dir_wd):
try:
try:
self.ftp_handle.voidcmd("NOOP")
except Exception as e:
print (str(e))
self.ftp_handle = ftplib.FTP("pubftp.spp.org")
self.ftp_handle.login()
self.ftp_handle.cwd(pres_wd.replace('\\', '/') + '/' + dir_wd)
dir_lst = [x for x in self.ftp_handle.nlst() if '.' not in x]
if dir_lst == []:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd)
self.ftp_handle.cwd('..')
else:
files_lst = [x for x in self.ftp_handle.nlst() if '.' in x and x.split('-')[1] == 'OR' and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") >= self.start_dt and datetime.datetime.strptime(x.split('-')[3][:8], "%Y%m%d") <= self.end_dt]
if len(files_lst) > 0:
if os.path.isdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd) == False:
os.makedirs(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
os.chdir(self.prog_dir + '\\cache\\spp' + pres_wd + '\\' + dir_wd)
for file_name in files_lst:
print (os.getcwd() + '\\' + file_name)
self.ftp_handle.retrbinary("RETR " + file_name, open(file_name, 'wb').write)
self.files_cached.append(os.getcwd() + '\\' + file_name)
for each_dir in dir_lst:
self.fetch_files(self.ftp_handle.pwd().replace('/', '\\'), each_dir)
self.ftp_handle.cwd('..')
except Exception as e:
print (str(e))
def __str__(self):
try:
self.ftp_handle.quit()
os.chdir(self.prog_dir + '\\cache\\spp')
fwrite = open(self.path_name[1:-1].replace('\\', '-') + '.txt', 'w')
fwrite.write('File(s) cached are as follows:\n')
for file_name in self.files_cached:
fwrite.write(file_name + '\n')
fwrite.close()
os.chdir(self.prog_dir)
return ("\nFile(s) cached: " + ', '.join(self.files_cached) + '\n')
except Exception as e:
print (str(e))
def etl_file_data(cache_file):
try:
fread = open(cache_file, 'r')
flines = [x.rstrip('\n') for x in fread.readlines() if x.endswith('.csv\n')]
fread.close()
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'SPP'")
mkt_id = cursor.fetchone()[0]
i = 1
for fname in flines:
print ('Current file: ' + fname + '\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')
fread = open(fname, 'r')
frows = csv.reader(fread, delimiter = ',')
next(frows, None)
offer_base_rs = []
ins_perf = True
for row in frows:
if len(row) > 0 and row[2].strip() != '' and row[3].strip() != '' and row[4].strip() != '':
if ins_perf == True:
cursor.execute("SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s", (mkt_id,))
offer_base_rs = list(cursor.fetchall())
if len(offer_base_rs) > 0:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
else:
off_check = [x for (x, y, z) in offer_base_rs if (row[2], '0') == (y, z)]
if len(off_check) > 0:
off_id = off_check[0]
ins_perf = False
else:
cursor.execute("INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)", (row[2], '0', "SPP", mkt_id))
ins_perf = True
cursor.execute("SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s", (row[2], '0'))
off_id = cursor.fetchone()[0]
if fname.split('\\')[-1].split('-')[0].lower() == 'da':
mrun_id = 'DAM'
elif fname.split('\\')[-1].split('-')[0].lower() == 'rtbm':
mrun_id = 'RTBM'
intv_start = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S") - datetime.timedelta(hours = 1, minutes = 0)).strftime("%Y-%m-%d %H:%M:%S")
intv_end = (datetime.datetime.strptime(row[0], "%m/%d/%Y %H:%M:%S")).strftime("%Y-%m-%d %H:%M:%S")
intv_dt = intv_start[:10]
hr, iv = int(intv_start[11:13]), 0
intv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]
cursor.execute("SELECT interval_id FROM interval_meta USE INDEX (PRIMARY) WHERE interval_id = %s", (intv_id,))
intvid_rs = cursor.fetchone()
if intvid_rs == None:
cursor.execute("INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))
cursor.execute("SELECT interval_id FROM spp_results USE INDEX (IDX_SPP_RESULTS_INTERVAL_ID) WHERE interval_id = %s", (intv_id,))
spp_rs = cursor.fetchone()
if spp_rs == None:
spp_rs = []
else:
spp_rs = list(spp_rs)
xml_item_map = {'Capability Offer Reg-Down': 'coreg_down', 'Capability Offer Reg-Up': 'coreg_up', 'Mileage Factor Reg-Down': 'mfreg_down', 'Mileage Factor Reg-Up': 'mfreg_up', 'Mileage Offer Reg-Down': 'moreg_down', 'Mileage Offer Reg-Up': 'moreg_up', 'SPIN': 'spin_price', 'SUPP': 'supp_price'}
if row[3].strip() in xml_item_map.keys():
if len(spp_rs) > 0:
qry = "UPDATE spp_results SET " + xml_item_map[row[3].strip()] + " = %s WHERE interval_id = %s"
cursor.execute(qry, (float(row[4].strip()), intv_id))
else:
qry = "INSERT INTO spp_results (interval_id, " + xml_item_map[row[3].strip()] + ") VALUES (%s, %s)"
cursor.execute(qry, (intv_id, float(row[4])))
else:
print (row[3].strip() + " is a new ASProduct for the interval with interval_id: " + intv_id)
cnx.commit()
fread.close()
i += 1
cursor.close()
cnx.close()
except Exception as e:
print (str(e))
def dbdt_check(mkt_name, start_dt, end_dt):
try:
print ("\nStarting the database date validation check...\n")
cnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')
cursor = cnx.cursor()
cursor.execute("SELECT min(interval_dt) AS oldest_dt, max(interval_dt) AS latest_dt FROM interval_meta USE INDEX (IDX_INTERVAL_META_MARKET_ID) WHERE market_id = (SELECT DISTINCT market_id FROM market_meta USE INDEX (PRIMARY) WHERE lower(market_name) = %s)", (mkt_name.lower(),))
rs = cursor.fetchone()
cursor.close()
cnx.close()
print("Database Oldest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[0], "%m-%d-%Y"))
dbdt_start = datetime.datetime.strptime(datetime.datetime.strftime(rs[0], "%Y-%m-%d"), "%Y-%m-%d")
print("Database Latest Date (MM-DD-YYYY): " + datetime.datetime.strftime(rs[1], "%m-%d-%Y"))
dbdt_end = datetime.datetime.strptime(datetime.datetime.strftime(rs[1], "%Y-%m-%d"), "%Y-%m-%d")
print("Script Start Date (MM-DD-YYYY): " + start_dt)
start_dt = datetime.datetime.strptime(start_dt.split('-')[2] + '-' + start_dt.split('-')[0] + '-' + start_dt.split('-')[1], "%Y-%m-%d")
print("Script End Date (MM-DD-YYYY): " + end_dt)
end_dt = datetime.datetime.strptime(end_dt.split('-')[2] + '-' + end_dt.split('-')[0] + '-' + end_dt.split('-')[1], "%Y-%m-%d")
if start_dt == (dbdt_end + datetime.timedelta(hours = 24, minutes = 0)) and end_dt >= start_dt and end_dt <= datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d"), "%Y-%m-%d"):
print ("\nThe database date validation check has completed successfully. The program will now execute...\n")
return True
else:
actual_st = datetime.datetime.strftime(dbdt_end + datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
actual_ed = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), "%Y-%m-%d")
print ("\nPlease check the script start and end dates properly. The start date must be set to " + actual_st.split('-')[1] + '-' + actual_st.split('-')[2] + '-' + actual_st.split('-')[0] + " (MM-DD-YYYY) and the end date must be less than or equal to " + actual_ed.split('-')[1] + '-' + actual_ed.split('-')[2] + '-' + actual_ed.split('-')[0] + " (MM-DD-YYYY) and also not less than the start date.")
return False
except Exception as e:
print (str(e))
def main():
print ("\n********** Start of the Program **********\n")
# prog_dir is the main directory under which the CSV files will be stored
#prog_dir = "C:\\Users\\Omkar Sunkersett\\Downloads\\markets"
# These respective variables set the start and end dates for fetching data from the server
#startdatetime = "MM-DD-YYYY"
#enddatetime = "MM-DD-YYYY"
if dbdt_check("SPP", startdatetime, enddatetime):
# Code for fetching the CSV files from the server for historical offers
#histoff_or = SPP("pubftp.spp.org", "/Markets/HistoricalOffers/", startdatetime, enddatetime, prog_dir)
#histoff_or.fetch_files("/Markets/HistoricalOffers", "")
#rint(histoff_or)
# Code for loading the historical offer related CSV data into the not-published database for OR only
# IMPORTANT: Make sure you have the latest backup of the database before uncommenting the below lines
#print ("\nLoading the new data into the database...\n")
#etl_file_data(prog_dir + "\\cache\\spp\\Markets\HistoricalOffers.txt")
print ("\n********** End of the Program **********\n")
main()
|
2,391 | 5f5e314d2d18deb12a8ae757a117ef8fbb2ddad5 | import os
os.mkdir("作业")
f=open("D:/six3/s/作业/tet.txt",'w+')
for i in range(10):
f.write("hello world\n")
f.seek(0)
s=f.read(100)
print(s)
f=open("D:/six3/s/作业/tet2.txt",'w+')
for i in s:
f.write(i)
f.close() |
2,392 | b34ce3ac87a01b8e80abc3fde1c91638f2896610 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from functools import reduce
def element_wise_op(x, operation):
for i in np.nditer(x, op_flags=['readwrite']):
i[...] = operation[i]
class RecurrentLayer(object):
def __init__(self, input_dim, state_dim, activator, learning_rate):
self.input_dim = input_dim
self.state_dim = state_dim
self.activator = activator
self.learning_rate = learning_rate
self.time = 0
self.state_list = np.zeros((state_dim, 1)) #Initialization of state series in time 0
self.W = np.random.uniform(-1e-3, 1e-3, (state_dim, state_dim))
self.U = np.random.uniform(-1e-3, 1e-3, (state_dim, input_dim))
def forward(self, input_vec):
self.time += 1
state = (np.dot(self.U, input_vec) + np.dot(self.W, self.state_list[-1]))
element_wise_op(state, self.activator.forward)
self.state_list.append(state)
def bptt(self, sensitivity_array, activator):
self.calcu_delta(sensitivity_array, activator)
self.calcu_grad()
def calcu_delta(self, sensitivity_array, activator):
self.delta_list = []
for i in range(self.time):
self.delta_list.append(np.zeros(self.state_dim, 1))
self.delta_list.append(sensitivity_array)
for k in range(self.time -1, 0, -1):
self.calcu_delta_k(k, activator)
def calcu_delta_k(self, k, activator):
state = self.state_list[k+1].copy()
element_wise_op(self.state_list[k+1], activator.backward)
self.state_list[k] = np.dot(np.dot(self.state_list[k+1].T, self.W), np.diag(state[:, 0])).T
def calcu_grad(self):
self.grad_list = []
for t in range(self.time + 1):
self.grad_list.append(np.zeros((self.state_dim, self.state_dim)))
for t in range(self.time, 0, -1):
self.calcu_grad_t(t)
self.grad = reduce(lambda a, b: a+b, self.grad_list, self.grad)
def calcu_grad_t(self, t):
grad = np.dot(self.delta_list[t], self.delta_list[t-1].T)
self.grad_list[t] = grad
def bpttupdate(self):
self.W -= self.grad * self.learning_rate
|
2,393 | e361215c44305f1ecc1cbe9e19345ee08bdd30f5 | skipped = 0
class Node(object):
"""docstring for Node"""
def __init__(self, value, indentifier):
super(Node, self).__init__()
self.value = value
self.identifier = indentifier
self.next = None
class Graph(object):
"""docstring for Graph"""
def __init__(self, values, edges):
super(Graph, self).__init__()
self.node_values = values
self.vertices = len(values)
self.edges = edges
self.graph = [None] * self.vertices
# self.edges.sort()
self.grand_sum = sum(self.node_values)
def build_adjacency_list(self):
for edge in self.edges:
fro = edge[0] - 1
to = edge[1]- 1
# Adding the node to the source node
node = Node(self.node_values[to], to)
node.next = self.graph[fro]
self.graph[fro] = node
# Adding the source node to the destination as
# it is the undirected graph
node = Node(self.node_values[fro], fro)
node.next = self.graph[to]
self.graph[to] = node
def print_graph(self):
for i in range(self.vertices):
node = self.graph[i]
print("Vertex:", i)
while(node!=None):
print(node.value, node.identifier)
node = node.next
print("<<"*20)
def get_tree_nodes(self, start_node, nodes, edge, total):
if(start_node==None):
return nodes
while(start_node!=None):
if(start_node.identifier==edge[0] or start_node.identifier==edge[2] or (start_node.identifier in nodes)):
print("skipping ", start_node.identifier)
else:
print("adding ", start_node.identifier)
nodes.append(start_node.identifier)
total[0] += start_node.value
next_n = self.graph[start_node.identifier]
self.get_tree_nodes(next_n, nodes, edge, total)
start_node = start_node.next
return nodes
def split_and_compute_tree_sum(self, t1_nodes = [], t2_nodes = [], edge=[], ton = False):
t1_total = 0
t2_total = 0
total = [0]
start_node = self.graph[edge[1]]
if(start_node.next != None):
t2_nodes = self.get_tree_nodes(start_node, t2_nodes, edge, total)
if(len(t2_nodes)==0 and edge[1]!=edge[2]):
t2_nodes.append(edge[1])
total[0] += self.node_values[edge[1]]
t2_total = total[0]
if(not ton and t2_total < self.grand_sum/2):
for i in range(self.vertices):
if(i not in t2_nodes):
t1_nodes.append(i)
t1_total = self.grand_sum - t2_total
print("t2_nodes", t2_nodes)
print("t2_total", t2_total)
return t1_total, t2_total
def check(self, tree1_total, tree2_total, tree3_total):
print("###"*10)
print("FINAL tree1_total: ", tree1_total)
print("FINAL tree2_total: ", tree2_total)
print("FINAL tree3_total: ", tree3_total)
print("###"*10)
if (tree1_total == tree2_total) or (tree1_total == tree3_total) or (tree2_total == tree3_total):
mx = max(tree1_total, tree2_total, tree3_total)
if([tree1_total, tree2_total, tree3_total].count(mx) >= 2):
ret = mx - min(tree1_total, tree2_total, tree3_total)
return ret, True
return -1, False
def split_tree_into_two(self):
ret = -1
found = False
global skipped
for entry in range(self.vertices):
tree1_nodes = []
tree2_nodes = []
tree3_nodes = []
temp_nodes = []
n = self.graph[entry]
while(n!=None):
edge = [entry, n.identifier, -1]
if(n.identifier <= entry):
n = n.next
skipped += 1
continue
print("##MAIN##. SPLIT POINT EDGE: ", edge)
tree1_nodes = []
tree2_nodes = []
tree1_total, tree2_total = self.split_and_compute_tree_sum(tree1_nodes, tree2_nodes, edge)
print("ORIGINALS: ", tree1_total, tree2_total)
if(min(tree1_total, tree2_total) < self.grand_sum/3 or (max(tree1_total, tree2_total) > (2*self.grand_sum)/3)):
n = n.next
continue
if(tree1_total > tree2_total):
ret, found = self.find_third_tree(tree1_total, tree2_total,tree1_nodes, 1, edge[1])
elif(tree2_total > tree1_total):
ret, found = self.find_third_tree(tree1_total, tree2_total,tree2_nodes, 2, edge[0])
elif (tree1_total == tree2_total):
ret = tree1_total
found = True
else:
found = True
if(found):
break
n = n.next
if(found):
break
return ret
def find_third_tree(self, tree1_total, tree2_total, nodes, t = 1, m=0):
ret , found = -1, False
global skipped
consumed = []
for i in range(len(nodes)):
skip_n = nodes[i]
consumed.append(skip_n)
n = self.graph[skip_n]
while(n!=None):
if(n.identifier in consumed):
n = n.next
skipped += 1
continue
edge = [skip_n, n.identifier, m]
print("2. SPLIT POINT EDGE: ", edge)
print("tree1_total",tree1_total)
tree3_nodes = []
temp_nodes = []
_,tree3_total = self.split_and_compute_tree_sum(temp_nodes, tree3_nodes, edge, True)
if(t==1):
ret , found = self.check(tree1_total - tree3_total, tree2_total, tree3_total)
elif(t==2):
ret , found = self.check(tree1_total, tree2_total - tree3_total, tree3_total)
if(found):
break
n = n.next
if(found):
break
return ret, found
def balancedForest(values, edges):
mygraph = Graph(values, edges)
mygraph.build_adjacency_list()
mygraph.print_graph()
return mygraph.split_tree_into_two()
import unittest
class BalancedForestTest(unittest.TestCase):
def test1(self):
expected = 10
c = [1, 1, 1, 18, 10, 11, 5, 6]
edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]
self.assertEqual(balancedForest(c, edges), expected)
def test2(self):
expected = 13
c = [12, 7, 11, 17, 20, 10]
edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]
self.assertEqual(balancedForest(c, edges), expected)
def test3(self):
expected = 19
c = [15, 12, 8, 14, 13]
edges = [[4,5],[1,2],[1,3],[1,4]]
self.assertEqual(balancedForest(c, edges), expected)
def test4(self):
expected = 2
c = [1,2,2,1,1]
edges = [[1,2],[1,3],[3,5],[1,4]]
self.assertEqual(balancedForest(c, edges), expected)
def test5(self):
expected = -1
c = [1,3,5]
edges = [[1,3],[1,2]]
self.assertEqual(balancedForest(c, edges), expected)
def test6(self):
expected = -1
c = [7, 7, 4, 1, 1, 1]
edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]
self.assertEqual(balancedForest(c, edges), expected)
def test7(self):
expected = 0
c = [1, 3, 4, 4]
edges = [(1, 2), (1, 3), (1, 4)]
self.assertEqual(balancedForest(c, edges), expected)
def test8(self):
expected = 297
c = [100, 99, 98, 100, 99, 98]
edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]
self.assertEqual(balancedForest(c, edges), expected)
def test9(self):
expected = 4
c = [12, 10, 8, 12, 14, 12]
edges = [[1, 2], [1, 3], [1, 4], [2, 5], [4, 6]]
self.assertEqual(balancedForest(c, edges), expected)
print("SKIPPED", skipped)
if __name__ == '__main__':
unittest.main() |
2,394 | d85261268d9311862e40a4fb4139158544c654b3 | from pathlib import Path
from typing import Union
from archinst.cmd import run
def clone(url: str, dest: Union[Path, str]):
Path(dest).mkdir(parents=True, exist_ok=True)
run(
["git", "clone", url, str(dest)],
{
"GIT_SSH_COMMAND": "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
},
)
|
2,395 | f54d0eeffa140af9c16a1fedb8dcd7d06ced29f2 | import math
import pendulum
from none import *
@on_command('yearprogress')
async def year_progress(session: CommandSession):
await session.send(get_year_progress())
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n' \
f'\n\n' \
f'{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor((passed_days / year_days) * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join(["▓" if i < percent else "░" for i in range(blocks)])
|
2,396 | 62018b32bf0c66fa7ec3cc0fcbdc16e28b4ef2d6 | rate=69
dollar=int(input("enter an dollars to convert:"))
inr=dollar*rate
print('INR :Rs.',inr,'/-') |
2,397 | 149f8b453786ec54668a55ec349ac157d2b93b5d | #Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importing the data
dataset=pd.read_csv('Social_Network_Ads.csv')
X=dataset.iloc[:,0:2].values
y=dataset.iloc[:,2].values
#spiliting the data into training data and testing data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y)
#feature Scaling to improve the predictions
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
#training the logistic regression on the model
from sklearn.linear_model import LogisticRegression
log=LogisticRegression()
log.fit(X_train,y_train)
#predicting the new result
log.predict(sc.transform([[45,87000]]))
#predicting the test set results
y_pred=log.predict(X_test)
np.set_printoptions(precision=2)
np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)
#confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test,y_pred)
#accuracy score
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred) |
2,398 | 0553bd4c7261197a1a80c5551305a16e7bfdc761 | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 1e-3)
m.bias.data.fill_(0.)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#--------------------------------
# Device configuration
#--------------------------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: %s'%device)
#--------------------------------
# Hyper-parameters
#--------------------------------
input_size = 3
num_classes = 10
hidden_size = [128, 512, 512, 512, 512]
num_epochs = 20
batch_size = 200
learning_rate = 2e-3
learning_rate_decay = 0.95
reg=0.001
num_training= 49000
num_validation =1000
norm_layer = None #norm_layer="BN"
print(hidden_size)
dropout_p = 0 #probability of dropout
#-------------------------------------------------
# Load the CIFAR-10 dataset
#-------------------------------------------------
#################################################################################
# TODO: Q3.a Choose the right data augmentation transforms with the right #
# hyper-parameters and put them in the data_aug_transforms variable #
#################################################################################
data_aug_transforms = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
data_aug_transforms += [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(2),
transforms.RandomGrayscale(),
transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),
transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),
]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
norm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
cifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=True,
transform=norm_transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=False,
transform=test_transform
)
#-------------------------------------------------
# Prepare the training and validation splits
#-------------------------------------------------
mask = list(range(num_training))
train_dataset = torch.utils.data.Subset(cifar_dataset, mask)
mask = list(range(num_training, num_training + num_validation))
val_dataset = torch.utils.data.Subset(cifar_dataset, mask)
#-------------------------------------------------
# Data loader
#-------------------------------------------------
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#-------------------------------------------------
# Convolutional neural network (Q1.a and Q2.a)
# Set norm_layer for different networks whether using batch normalization
#-------------------------------------------------
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):
super(ConvNet, self).__init__()
#################################################################################
# TODO: Initialize the modules required to implement the convolutional layer #
# described in the exercise. #
# For Q1.a make use of conv2d and relu layers from the torch.nn module. #
# For Q2.a make use of BatchNorm2d layer from the torch.nn module. #
# For Q3.b Use Dropout layer from the torch.nn module. #
#################################################################################
layers = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# Adding the other blocks
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# stacking convolutional blocks
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
# Fully connected layer
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
def forward(self, x):
#################################################################################
# TODO: Implement the forward pass computations #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return out
#-------------------------------------------------
# Calculate the model size (Q1.b)
# if disp is true, print the model parameters, otherwise, only return the number of parameters.
#-------------------------------------------------
def PrintModelSize(model, disp=True):
#################################################################################
# TODO: Implement the function to count the number of trainable parameters in #
# the input model. This useful to track the capacity of the model you are #
# training #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model_sz = 0
for parameter in model.parameters():
model_sz += parameter.nelement()
if disp == True:
print("\nNumber of parameters: ", model_sz)
print("\n")
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model_sz
#-------------------------------------------------
# Calculate the model size (Q1.c)
# visualize the convolution filters of the first convolution layer of the input model
#-------------------------------------------------
def VisualizeFilter(model):
#################################################################################
# TODO: Implement the functiont to visualize the weights in the first conv layer#
# in the model. Visualize them as a single image of stacked filters. #
# You can use matlplotlib.imshow to visualize an image in python #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to("cpu")
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8*4,4):
for j in range(0, 16*4, 4):
kernel_map[i:i+3, j:j+3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#======================================================================================
# Q1.a: Implementing convolutional neural net in PyTorch
#======================================================================================
# In this question we will implement a convolutional neural networks using the PyTorch
# library. Please complete the code for the ConvNet class evaluating the model
#--------------------------------------------------------------------------------------
model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)
# Q2.a - Initialize the model with correct batch norm layer
model.apply(weights_init)
# Print the model
print(model)
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
break
# Print model size
#======================================================================================
# Q1.b: Implementing the function to count the number of trainable parameters in the model
#======================================================================================
PrintModelSize(model)
#======================================================================================
# Q1.a: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
#======================================================================================
#VisualizeFilter(model)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)
# Train the model
lr = learning_rate
total_step = len(train_loader)
loss_train = []
loss_val = []
best_accuracy = 0
accuracy_val = []
best_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance
#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)
for epoch in range(num_epochs):
model.train()
loss_iter = 0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_iter += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
loss_train.append(loss_iter/(len(train_loader)*batch_size))
# Code to update the lr
lr *= learning_rate_decay
update_lr(optimizer, lr)
model.eval()
with torch.no_grad():
correct = 0
total = 0
loss_iter = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss_iter += loss.item()
loss_val.append(loss_iter/(len(val_loader)*batch_size))
accuracy = 100 * correct / total
accuracy_val.append(accuracy)
print('Validation accuracy is: {} %'.format(accuracy))
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to save the model which has #
# the model with the best validation accuracy so-far (use best_model). #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if accuracy > best_accuracy:
best_model.load_state_dict(model.state_dict())
best_accuracy=accuracy
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
model.eval()
plt.figure(2)
plt.plot(loss_train, 'r', label='Train loss')
plt.plot(loss_val, 'g', label='Val loss')
plt.legend()
plt.show()
plt.figure(3)
plt.plot(accuracy_val, 'r', label='Val accuracy')
plt.legend()
plt.show()
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#
# best model so far and perform testing with this model. #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model.load_state_dict(best_model.state_dict())
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#Compute accuracy on the test set
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 1000:
break
print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))
# Q1.c: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
VisualizeFilter(model)
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
2,399 | b95eadd60093d5235dc0989205edff54ef611215 |
import sys
sys.path.insert(0, ".") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.