index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,600 | 923669c6123f515915ca7f88e91baed849f4cb35 | for x in "sathya":
print(x)
#-----------------
l = [1,2,3,4,5,6,7,8,9,10]
for x in l:
print(x)
#-----------------
#Working with Slice Operator
l = [1,2,3,4,5,6,7,8,9,10]
for x in l[3:8]:
print(x)
print("Thanks")
#-------------------
l = [1,2,3,4,5,6,7,8,9,10]
for x in l[:8]:
print(x)
print("Thanks")
#----------------------
l = [1,2,3,4,5,6,7,8,9,10]
for x in l[0:10:2]:
print(x)
print("Thanks")
#----------------
l = [1,2,3,4,5,6,7,8,9,10]
for x in l[-1:-10:-2]:
print(x)
print("Thanks")
|
985,601 | a2cf956800add12f7dde64c8a1fbced9935f98a6 | import sqlite3
import logging
import os
def initialize():
if(os.path.exists('chuj.db')):
return
conn = sqlite3.connect('chuj.db')
c = conn.cursor()
# tworzenie juserow
try:
c.execute('''CREATE TABLE IF NOT EXISTS USERS(
ID INTEGER NOT NULL PRIMARY KEY ASC,
NAME TEXT NOT NULL,
LOGIN TEXT NOT NULL UNIQUE);''')
conn.commit()
c.execute('''INSERT INTO USERS(NAME, LOGIN) VALUES ("Michał", "Pestka");''')
conn.commit()
except Exception as e:
print(str(e))
# tworzenie tabeli piwek
try:
c.execute('''CREATE TABLE IF NOT EXISTS PIWKAHEHE(
TRANS_ID INTEGER NOT NULL PRIMARY KEY ASC,
FROM_ID INTEGER,
TO_ID INTEGER);''')
conn.commit()
c.execute('''INSERT INTO PIWKAHEHE(FROM_ID, TO_ID) VALUES (1, 1);''')
conn.commit()
except Exception as e :
print(str(e))
conn.close()
|
985,602 | 1204bd7b2292617e37cb72fad00ff572d3ae32e5 | # coding:utf-8
import unittest
from airtest.core.api import *
import yaml, logging.config
yaml.warnings({'YAMLLoadWarning': False})
log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../config/log.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger()
with open('../config/devices.yaml', 'r', encoding='gbk') as file:
data = yaml.load(file)
devicesname = data['devicesname']
package = data['package']
class StartEnd(unittest.TestCase):
def setUp(self):
time.sleep(10)
try:
connect_device('Android:///' + devicesname)
start_app(package)
except:
connect_device('Android:///' + devicesname)
start_app(package)
def tearDown(self):
clear_app(package)
stop_app(package)
time.sleep(8)
|
985,603 | 627f11dce62bd2d01cfa287f08240b8249d6a0a0 | import requests
from bs4 import BeautifulSoup
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import random
import time
import schedule
def spider_foods():
res_foods = requests.get('http://www.xiachufang.com/explore/')
bs_foods = BeautifulSoup(res_foods.text, 'html.parser')
list_foods = bs_foods.find_all('div', class_='info pure-u')
list_all = []
for food in list_foods:
tag_a = food.find('a')
name = tag_a.text[17:-13]
URL = 'http://www.xiachufang.com' + tag_a['href']
tag_p = food.find('p', class_='ing ellipsis')
ingredients = tag_p.text[1:-1]
list_all.append([name, URL, ingredients])
return list_all
def send_mail(food_list):
mailhost = 'smtp.qq.com'
qqmail = smtplib.SMTP()
qqmail.connect(mailhost, 25)
account = input('请输入你的邮箱:')
password = input('请输入你的密码:')
qqmail.login(account, password)
receiver = input('请输入收件人的邮箱:')
content = '今天就吃' + random.sample(food_list,1)[0][0] + '吧'
message = MIMEText(content, 'plain', 'utf-8')
subject = input('请输入你的邮件主题:')
message['Subject'] = Header(subject, 'utf-8')
try:
qqmail.sendmail(account, receiver, message.as_string())
print('邮件发送成功')
except Exception:
print('邮件发送失败')
qqmail.quit()
def job():
foods = spider_foods()
send_mail(foods)
def main():
schedule.every().friday.at('12:00').do(job)
while True:
schedule.run_pending()
time.sleep(3600)
if __name__ == '__main__':
main()
|
985,604 | 743a2b86c2b880a1be6da5a9fad7e421485e80af | from django.shortcuts import render
from django.http import HttpResponse
from .models import Klassen, Richtingen, Leraren, Contact
def home(request):
return render(request, 'crud/index.html')
def aanbod(request):
alleRichtingen = Richtingen.objects.all()
return render(request, 'crud/aanbod.html', {'alleRichtingen': alleRichtingen})
def wie(request):
alleLeraren = Leraren.objects.all()
return render(request, 'crud/wie.html', {'alleLeraren': alleLeraren})
def contact(request):
return render(request, 'crud/contact.html') #, {'form': form_class}) #grab form and passing it over into the template
def richtingen(request):
alleRichtingen = Richtingen.objects.all()
return render(request, 'crud/richtingen.html', {'alleRichtingen': alleRichtingen})
def leraren(request):
alleLeraren = Leraren.objects.all()
return render(request, 'crud/leraren.html', {'alleLeraren': alleLeraren})
def klassen(request):
alleKlassen = Klassen.objects.all()
return render(request, 'crud/klassen.html', {'alleKlassen': alleKlassen}) |
985,605 | a32eb141dcc8b1e3f827166350c2cccb4cb16943 | # This module contains adapters and a facade for the different python netcdf libraries.
# $Id: opendap.py 4658 2011-06-13 15:41:23Z boer_g $
# $Date: 2011-06-13 17:41:23 +0200 (ma, 13 jun 2011) $
# $Author: boer_g $
# $Revision: 4658 $
# $HeadURL: https://repos.deltares.nl/repos/OpenEarthTools/trunk/python/io/opendap/opendap.py $
# $Keywords: $
import exceptions
import warnings
['close', 'cmptypes', 'createCompoundType', 'createDimension', 'createGroup', 'createVLType', 'createVariable', 'delncattr', 'dimensions', 'file_format', 'getncattr', 'groups', 'maskanscale', 'ncattrs', 'parent', 'path', 'renameDimension', 'renameVariable', 'set_fill_off', 'set_fill_on', 'setncattr', 'sync', 'variables', 'vltypes']
def pydaptonetCDF4(dataset):
"""make a pydap dataset look and quack like a netcdf4 dataset
>>> import pydap.client
>>> url = 'http://opendap.deltares.nl:8080/opendap/rijkswaterstaat/jarkus/profiles/transect.nc'
>>> ds = pydap.client.open_url(url)
>>> ncds = pydaptonetCDF4(ds)
>>> type(ncds)
<class 'pydap.model.DatasetType'>
You should now be able to access the dataset in a netCDF4 way (with variables)
>>> ncds.variables['x'] is ds['x']['x']
True
"""
import pydap.model
assert isinstance(dataset, pydap.model.DatasetType)
# in pydap the dataset itself is a dict, in netCDF4 it has a variables dict
# let's add the variables as well
dataset.variables = {}
for variable in dataset.keys():
if isinstance(dataset[variable], pydap.model.GridType):
# the pydap client returns grids for arrays with coordinates.
#
dataset.variables[variable] = dataset[variable][variable]
dataset.variables[variable].attributes.update(dataset[variable].attributes)
else:
dataset.variables[variable] = dataset[variable]
for key, value in dataset.attributes['NC_GLOBAL'].items():
if key not in dataset:
# I think the __setitem__ might be overwritten, so we'll do it like this
setattr(dataset, key, value)
else:
warnings.warn('Could not set %s to %s because it already exists as a variable' % (key, value))
return dataset
# TO DO
# deal with fill_values: netCDF deals with them automaticaly
# by inserting into a masked arrray, whereas pydap does not.
# numpy.ma.MaskedArray or does netCDF4 doe that based on presence of fillvalue att
# TO DO
# perhaps create a netCDF4topydap adapter also
def opendap(url):
"""return the dataset looking like a netCDF4 object
>>> url = 'http://opendap.deltares.nl:8080/opendap/rijkswaterstaat/jarkus/profiles/transect.nc'
>>> ds = opendap(url)
>>> ds.variables.has_key('x')
True
>>> ds.title
'Jarkus Data'
"""
try:
import netCDF4
dataset = netCDF4.Dataset(url)
# Either netcdf is not found, or it cannot read url's
except (ImportError, RuntimeError):
import pydap.client
dataset = pydaptonetCDF4(pydap.client.open_url(url))
return dataset
if __name__ == "__main__":
import doctest
doctest.testmod() |
985,606 | 371f65a57b9f22b9bbac0c7bb342a8af331373f7 | #!/usr/bin/python
import sys
import operator
import csv
# dictionary that contains the student id as the key and
# her/his reputation as the value
student = {}
# dictionary that contains the tag as the key and
# its score as value
tags = {}
# set the reader for the TSV data file
reader = csv.reader(sys.stdin, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
# read from the output of mappers
for line in reader:
# if there are only two fields
if len(line) == 2:
# if we're receiving a user
# (check on the first char of the key)
if line[0][0] == 'A':
# get the student id removing the prepending 'A'
student_id = line[0][1:]
# get the reputation
reputation = int(line[1])
# add the student's reputation to the dictionary
student[student_id] = reputation
# if we're receiving a tag
# (check on the first char of the key)
elif line[0][0] == 'B':
# get the tag removing the prepending 'B'
tag = line[0][1:]
# get the student id
student_id = line[1]
# defensive check
# if student_id is not in the dictionary
if not student.has_key(student_id):
# it means the data of user and nodes is not coherent because
# in nodes file are present nodes that have an author_id that
# is not present in the users file. So just skip it.
continue;
# if the dictionary contains the tag
if tags.has_key(tag):
# increment by the reputation of the student this tag
tags[tag] += student[student_id]
# if the dictionary does not contain the tag
else:
# set the reputation of the student as the tag value
tags[tag] = student[student_id]
# sort the tags by their score
sorted_tags = sorted(tags.iteritems(), key=operator.itemgetter(1))
# loop over the last 10 tags (the ones that have highest scores)
for tag in sorted_tags[-10:]:
# and output the tag and its score
print "{0}\t{1}".format(tag[0], tag[1])
|
985,607 | e3ffa641b7f30f4e32dd2a54f1d3a23ff88cd471 | # Author: Daniel Martini Jimenez
# Date: 24-2-2019
from gurobipy import *
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from write_to_text import *
# from path_planning import *
# ============================================================================
#
# Optimization Model
#
# ============================================================================
'Drone dynamics data'
g=9.81 # m/s^2
D_sides=32 # [-]
V_max=3.6 # m/s
V_max *= math.cos(math.pi/D_sides)
U_max=0.5*g # m/s^2
U_max *= math.cos(math.pi/D_sides)
delta_t=1 # sec
t_max=60 # sec
t_steps=int(round((t_max+delta_t)/delta_t,1)) # [-]
gamma=10*math.pi/180 # deg
alpha=10*math.pi/180 # deg
waypoints =np.array([[8.913, 9.27, 9.27, 5.76, 2.675, 2.787, 6.4045, 6.4045, 2.91, 5.91, 8, 8.913], # x axis
[14.642, 10.51, 4.057, 2.749, 6.083, 12.259, 12.5825, 11.8575, 9.0,7.2685, 7.2685+1, 14.642], #y-axis
[0.41, 2.245, 2.899, 2.49, 2.236, 2.36, 1.098, 1.098, 1.625, 2.1565, 2.1565 - 0.5, 0.41]]) # z-axis
velocity_cts=np.array([[0, math.inf, 0, math.inf, 0, 0, 0, 0, math.inf, math.inf, math.inf, 0],
[0, math.inf, math.inf, 0, math.inf, math.inf, math.inf, math.inf, math.inf, 0, math.inf, 0],
[ 0, math.inf, 0, 0, 0, 0, 0, 0, math.inf, 0, math.inf, 0]])
crimefile = open('timeswp.txt', 'r')
t_wps = []
for line in crimefile:
line=line[:-1]
t_wps.append(float(line))
print(t_wps)
n_waypoints=waypoints.shape[1]
# ----------------------------------------------------------------------------
# Define Variables to be used.
# ----------------------------------------------------------------------------
m = Model('Path_planning')
'X-coordinate'
px = {}
vx = {}
ux = {}
'Y-coordinate'
py = {}
vy = {}
uy = {}
'Z-coordinate'
pz = {}
vz = {}
uz = {}
'Acceleration'
U={}
'Velocity'
V={}
'Commodity variables for discretization'
theta={}
b={}
# ----------------------------------------------------------------------------
# Create Objective FUction
# ----------------------------------------------------------------------------)
for d in range(D_sides):
theta[d]=2*math.pi*(d+1)/D_sides
for n in range(t_steps):
U[n]= m.addVar(obj=1,
vtype=GRB.CONTINUOUS,lb=0,ub=U_max,
name="U_%s" % (n))
V[n] = m.addVar(obj=0.001,
vtype=GRB.CONTINUOUS, lb=0,ub=V_max,
name="V_%s" % (n))
ux[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="ux_%s" % (n))
uy[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="uy_%s" % (n))
uz[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="uz_%s" % (n))
vx[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="Vx_%s" % (n))
vy[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="Vy_%s" % (n))
vz[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="Vz_%s" % (n))
px[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="Px_%s" % (n))
py[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="Py_%s" % (n))
pz[n] = m.addVar(obj=0,
vtype=GRB.CONTINUOUS,lb=-GRB.INFINITY,
name="Pz_%s" % (n))
n_gates=waypoints.shape[1]
m.update()
# Optimize
m.setObjective(m.getObjective(), GRB.MINIMIZE) # The objective is to maximize revenue
# ----------------------------------------------------------------------------
# Create Constraints
# ----------------------------------------------------------------------------
'Accelerations Constraint'
for n in range(t_steps):
for d in range(D_sides):
m.addConstr(
math.cos(theta[d])*math.sin(-alpha)*ux[n]+math.sin(theta[d])*math.sin(-alpha)*uy[n]+math.sin(-alpha)*uz[n],
GRB.LESS_EQUAL, U[n], name='U_cts1_%s_%s' % (n,d))
m.addConstr(
math.cos(theta[d]) * math.sin(alpha)*uz[n] + math.sin(theta[d]) * math.sin(alpha) * ux[n] + math.sin(alpha) * uz[n],
GRB.LESS_EQUAL, U[n], name='U_cts2_%s_%s' % (n, d))
m.addConstr(
math.cos(theta[d])*uy[n]+math.sin(theta[d])*uz[n],
GRB.LESS_EQUAL, U[n], name='U_cts3_%s_%s' % (n,d))
'Max Velocity Constraint'
for n in range(t_steps):
for d in range(D_sides):
m.addConstr(
math.cos(theta[d])*math.sin(gamma)*vx[n]+math.sin(theta[d])*math.sin(gamma)*vy[n]+math.sin(-gamma)*vz[n],
GRB.LESS_EQUAL,V[n], name='V_cts1_%s_%s' % (n,d))
m.addConstr(
math.cos(theta[d]) * math.sin(gamma) * vz[n] + math.sin(theta[d]) * math.sin(gamma) * vx[n] + math.sin(gamma) * vz[n],
GRB.LESS_EQUAL, V[n], name='V_cts2_%s_%s' % (n, d))
m.addConstr(
math.cos(theta[d])*vy[n]+math.sin(theta[d])*vz[n],
GRB.LESS_EQUAL,V[n], name='V_cts3_%s_%s' % (n,d))
'Integration scheme Constraint'
for n in range(1,t_steps):
'Position'
m.addConstr(
px[n-1]+vx[n-1]*delta_t+1/2*delta_t**2*ux[n-1],
GRB.EQUAL,px[n], name='Px_cts_%s' % (n))
m.addConstr(
py[n-1]+vy[n-1]*delta_t+1/2*delta_t**2*uy[n-1],
GRB.EQUAL,py[n], name='Py_cts_%s' % (n))
m.addConstr(
pz[n-1]+vz[n-1]*delta_t+1/2*delta_t**2*uz[n-1],
GRB.EQUAL,pz[n], name='Pz_cts_%s' % (n))
'Velocity'
m.addConstr(
vx[n-1]+delta_t*ux[n-1],
GRB.EQUAL,vx[n], name='Vx_cts_%s' % (n))
m.addConstr(
vy[n-1]+delta_t*uy[n-1],
GRB.EQUAL,vy[n], name='Vy_cts_%s' % (n))
m.addConstr(
vz[n-1]+delta_t*uz[n-1],
GRB.EQUAL,vz[n], name='Vz_cts_%s' % (n))
'Waypoint Constraint'
for i in range(n_gates):
n=int(t_wps[i]/delta_t)
print(n)
m.addConstr(
px[n]-waypoints[0,i],
GRB.EQUAL, 0,name='Wpx_%s_%s' % (i,n))
m.addConstr(
py[n] - waypoints[1, i],
GRB.EQUAL, 0, name='Wpoy_%s_%s' % (i,n))
m.addConstr(
pz[n] - waypoints[2, i],
GRB.EQUAL, 0, name='Wpz_%s_%s' % (i,n))
'Initial acceleration Constraint'
for n in [0,t_steps-1]:
m.addConstr(
ux[n],
GRB.EQUAL, 0,name='ux_%s'%(n))
m.addConstr(
uy[n],
GRB.EQUAL, 0,name='uy_%s'%(n))
m.addConstr(
uz[n],
GRB.EQUAL, 0,name='uz_%s'%(n))
'Velocity constraint at waypoint'
for i in range(n_gates):
j=int(t_wps[i]/delta_t)
if velocity_cts[0,i]<math.inf:
m.addConstr(vx[j],
GRB.EQUAL, velocity_cts[0,i],name='vx_%s'%(i))
if velocity_cts[1,i]<math.inf:
m.addConstr(vy[j],
GRB.EQUAL, velocity_cts[1,i],name='vy_%s'%(i))
if velocity_cts[2,i]<math.inf:
m.addConstr(vz[j],
GRB.EQUAL, velocity_cts[2,i],name='vz_%s'%(i))
# ---------------------------------------------------
# -------------------------
# Optimize the Problem
# ----------------------------------------------------------------------------
# Collects the values of the variables "A_%_%_%_%_%_%" and "P_%_%_%_%_%_%" in
# a multi-dimensional array called results_A and results_P respectively.
# Note that every aircraft type has the same amoUt of instances (i.e. the total
# number of aircraft).
m.update()
m.write('path_planning.lp')
'Solve using one of the selected optimization softwares.'
# Set time constraint for optimization (5minutes)
# m.setParam('TimeLimit', 5 * 60)
# m.setParam('MIPgap', 0.009)
m.optimize()
m.write("Path_trajectory.sol")
status = m.status
print(status)
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is Unbounded')
elif status == GRB.Status.OPTIMAL:
f_objective= m.objVal
pos_x = []
pos_y = []
pos_z = []
U_list=[]
ux_list=[]
for n in range(t_steps):
pos_x.append(px[n].X)
pos_y.append(py[n].X)
pos_z.append(pz[n].X)
U_list.append(U[n].X)
ux_list.append(math.sqrt(ux[n].X**2+uy[n].X**2+uz[n].X**2))
print(ux[n].X,uy[n].X,uz[n].X)
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(pos_x, pos_y, pos_z,label='dt=0.05')
# ax.scatter(waypoints[0,:], waypoints[1,:], waypoints[2,:], zdir='z', s=80, c='red', depthshade=True,label='Waypoints')
# plt.legend()
# title_str='Trajectory achieved in '+str(t_max)+' seconds'
# plt.title(title_str)
# plt.show()
# plt.plot((np.array(U_list)-np.array(ux_list))/np.array(ux_list),label='Numerical')
# plt.plot(np.array(U_list),label='Numerical')
# plt.plot(ux_list,label='Analyitical')
# plt.show()
file_name="new_path.txt"
write_text(px, py, pz,t_max,file_name)
print('Optmization time is ', m.Runtime)
elif status != GRB.Status.INF_OR_UBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
exit(0) |
985,608 | 2d9352d7af85bc095fed7c845839db0f054be816 | """
Generic time series generator for two columns of data
"""
import matplotlib.dates as md
import matplotlib.pyplot as plt
import matplotlib.mlab as ml
import numpy as np
#import datetime as dt
import dateutil.parser as dp
#import cPickle
#import scikits.timeseries as ts
PATH = 'data.csv'
#Common Options
SKIP_HEADER = 1
PLOT_TITLE = 'Structure 46'
X_AXIS_TITLE = 'Date'
Y_AXIS_TITLE = 'Flow, cfs'
HEADER_NAMES = ['Date','Data']
def openFile(path):
#columns = {'names':('date','d10'),
# 'formats':(object,np.float,np.float,np.float,np.float,np.float)}
#data = cPickle.load(open(OUTDIR+"data.p"))
def str2date(x):
return dp.parse(x)
print '...opening file'
converter = {0:str2date}
#dtype = columns,
data = np.genfromtxt(path,converters = converter, delimiter=',',skip_header=SKIP_HEADER)
#cPickle.dump(data, open(OUTDIR+"data.p","w"))
data.dtype.names = HEADER_NAMES
return data
def plotSingleTimeseries(data):
"""
plots observed data and modeled data time series
"""
print '...creating plot'
fig = plt.figure(figsize=(11,8.5))
ax = fig.add_subplot(111)
for header in HEADER_NAMES[1:]:
ax.plot(data[HEADER_NAMES[0]],data[header],label=header)
#i, h = ax.get_legend_handles_labels()
fig.autofmt_xdate()
ax.set_title(PLOT_TITLE)
ax.set_xlabel(X_AXIS_TITLE)
ax.set_ylabel(Y_AXIS_TITLE)
ax.grid(True)
ax.xaxis.set_major_formatter(md.DateFormatter('%m-%d-%Y %H:%M'))
#print i,h
ax.legend()
plt.show()
return i,h
if __name__=='__main__':
data = openFile(PATH+FILE)
i, h = plotSingleTimeseries(data)
|
985,609 | 9971f85c3f86c228a662af71378976f98ee1a425 | import pygraphviz as pgv
def plot_tree(nodes, edges, labels, name: str = "tree"):
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
name = name + '.png'
g.draw('images/' + name)
def write_file(tree):
f = open("gen_code.py", "a")
f.write("# GP generated code\n" + str(tree) + "\n")
|
985,610 | 919cac514a460faa587ab0f6debde0a15a60f0b8 | import pytest
import bson
from datetime import date
from data_service.model.case import Case
from data_service.model.case_reference import CaseReference
from data_service.model.document_update import DocumentUpdate
from data_service.model.geojson import Feature, Point
from data_service.util.errors import ValidationError
def test_instantiating_case_from_empty_json_is_error():
with pytest.raises(ValidationError):
case = Case.from_json("{}")
def test_case_from_minimal_json_is_valid():
with open("./tests/data/case.minimal.json", "r") as minimal_file:
case = Case.from_json(minimal_file.read())
assert case is not None
def test_case_with_geojson_is_valid():
with open("./tests/data/case.with_location.json", "r") as file:
case = Case.from_json(file.read())
assert case is not None
assert case.location is not None
assert type(case.location) == Feature
def test_csv_header():
header_line = Case.csv_header()
header_fields = header_line.split(",")
assert "caseStatus" in header_fields
assert "location.latitude" in header_fields
def test_csv_row_with_no_id():
identifier = "abcd12903478565647382910"
oid = bson.ObjectId(identifier)
ref = CaseReference()
ref.sourceId = oid
case = Case()
case.confirmationDate = date(2022, 6, 13)
case.caseReference = ref
case.caseStatus = "probable"
case.pathogenStatus = "emerging"
csv = case.to_csv()
csv_fields = csv.split(",")
assert "probable" in csv_fields
assert "2022-06-13" in csv_fields
def test_csv_row_with_id():
id1 = "abcd01903478565647382910"
id2 = "abcd12903478565647382910"
oid2 = bson.ObjectId(id2)
ref = CaseReference()
ref.sourceId = oid2
case = Case()
case._id = id1
case.confirmationDate = date(2022, 6, 13)
case.caseReference = ref
case.caseStatus = "probable"
case.pathogenStatus = "unknown"
csv = case.to_csv()
csv = case.to_csv()
csv_fields = csv.split(",")
assert "probable" in csv_fields
assert "2022-06-13" in csv_fields
def test_apply_update_to_case():
with open("./tests/data/case.minimal.json", "r") as minimal_file:
case = Case.from_json(minimal_file.read())
update = DocumentUpdate.from_dict({"confirmationDate": date(2022, 3, 7)})
updated_case = case.updated_document(update)
# original case should be unchanged
assert case.confirmationDate == date(2021, 12, 31)
# new case should be updated
assert updated_case.confirmationDate == date(2022, 3, 7)
def test_apply_update_that_unsets_value():
with open("./tests/data/case.minimal.json", "r") as minimal_file:
case = Case.from_json(minimal_file.read())
update = DocumentUpdate.from_dict({"confirmationDate": None})
case.apply_update(update)
assert case.confirmationDate is None
def test_cannot_put_wrong_type_in_list():
with open("./tests/data/case.minimal.json", "r") as minimal_file:
case = Case.from_json(minimal_file.read())
case.gender = ["man", True]
with pytest.raises(ValidationError):
case.validate()
def test_list_elements_must_come_from_acceptable_values():
with open("./tests/data/case.minimal.json", "r") as minimal_file:
case = Case.from_json(minimal_file.read())
case.gender = ["woman", "dalek"]
with pytest.raises(ValidationError):
case.validate()
|
985,611 | b36c1a7a86cf763f8b7782ae99f708ca8d35d44d | # Python Crash Course: A Hands-On, Project-Based Introduction To Programming
#
# Name: Mark Lester Apuya
# Date: 05/23/2021
#
# Chapter 4: Working With Lists
#
# Exercise 4.4 One Million:
# Make a list of the numbers from one to one million, and then use a for loop
# to print the numbers. (If the output is taking too long, stop it by pressing
# ctrl-C or by closing the output window.)
# list() function allows you to convert the set of numbers into a list
# Crashes VS code
numbers = list(range(1, 1000001))
print(numbers) |
985,612 | 75ccadc134017d5eb045f5d39da5bf4fa28555f1 | import requests
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.uix.listview import ListItemButton
from kivy.uix.screenmanager import ScreenManager, Screen, SlideTransition
from kivy.properties import StringProperty
import patient
class Edit(BoxLayout):
# Connects the value in the TextInput widget to these
# fields
email_text_input = ObjectProperty()
def edit(self):
pass
class EditApp(App):
user_name_log_text_input = ObjectProperty()
password_log_text_input = ObjectProperty()
student_list = ObjectProperty()
def build(self):
manager = ScreenManager()
manager.add_widget(Edit(name='studentdb'))
manager.add_widget(Patient(name='patient'))
return manager
|
985,613 | 809c93ae3debb5eaed8ef51872898eeafafb4d32 | from model import db, DietPreference, ExcludedIngredient
def set_food_preferences(session):
"""Setting saved diet and ingredient exclusions for registered users"""
if 'user_id' in session:
diet, health = get_diet_preferences(session['user_id'])
exclusion_list = get_ingred_exclusions(session['user_id'])
else:
diet = None
health = None
exclusion_list = None
return diet, health, exclusion_list
def update_diet_preference(user_id, preferences):
"""Add diet preference of user to table"""
# delete previous entries
DietPreference.query.filter_by(user_id=user_id).delete()
# adding each diet preference to session
for diet_id in preferences:
if diet_id != None:
new_preference_entry = DietPreference(user_id = user_id,
diet_id = diet_id)
db.session.add(new_preference_entry)
# committing changes
db.session.commit()
def get_diet_preferences(user_id):
"""Get diet preferences from db"""
preferences = DietPreference.query.filter_by(user_id=user_id).all()
health = None
diet = None
for preference in preferences:
if preference.diet_type.edamam_class == 'Health':
health = preference.diet_type.diet_name
else:
diet = preference.diet_type.diet_name
return diet, health
def update_ingredient_exclusions(user_id, updates):
"""Add ingredients to exclude to database"""
# deleting all preferences to start
# TO DO: delete single ingredient functionality on user page
ExcludedIngredient.query.filter_by(user_id=user_id).delete()
# adding each ingredient to session
exclusion_list = updates.split(',')
for exclusion in exclusion_list:
if exclusion != '':
exclusion.strip()
new_exclusion_entry = ExcludedIngredient(user_id = user_id,
ingred_name = exclusion)
db.session.add(new_exclusion_entry)
# committing changes
db.session.commit()
return None
def get_ingred_exclusions(user_id):
"""Get diet preferences from db"""
exclusions = ExcludedIngredient.query.filter_by(user_id=user_id).all()
if exclusions:
exclusion_list = []
for exclusion in exclusions:
exclusion_list.append(exclusion.ingred_name)
return exclusion_list
else:
return None |
985,614 | 21a2e1625964284814a6bc9f5ceb6f497de3866a | from sklearn_django.settings import BASE_DIR
class Params:
BAD_STATUSES = ('true', 1, '1')
ALGORITHMS = ['adaboost', 'gausnb', 'decisiontree', 'gradientboost', 'logregression', 'linear_sgd','xgboost'
,'lightgbm', 'kneighbors']
COMPARE_RESULT_URL = '/media/exp/compare-results.csv'
COMPARE_RESULT_PATH = '{}/media/exp/{}' . format(BASE_DIR, 'compare-results.csv')
|
985,615 | 6e0b0615a940491d8597c3039d4cfb510c4aa416 | from .ecr import ECRImages # noqa: F401 (unused)
from .images_base import Images # noqa: F401 (unused)
from .local import LocalImages # noqa: F401 (unused)
|
985,616 | e8ee7651d8f7cb0fce60b206be693fafd85426f6 | import os
import os.path
import json
import collections
import configparser
import glob
import re
from delphi.dof import DOFFile
from ello.sdk.git import get_sorted_tags
class ProjectMetadata:
def __init__(self, _filename=None):
self._metadata = {}
self._filename = _filename or 'package.json'
self._type = os.path.splitext(self._filename)[1]
self._previous_version = None
self._load_metadata()
def __repr__(self):
return '<{} "{}">'.format(type(self).__name__, self._filename)
def get_current_project_name(self):
dof_files = glob.glob('*.dof')
if dof_files:
return os.path.splitext(dof_files[0])[0]
return ''
def _load_metadata(self):
# If metadata is json but the metadata file doesn't exists, try to open dof metadata
if (self._type == '.json') and (not os.path.isfile(self._filename)):
self._filename = self.get_current_project_name() + '.dof'
self._type = os.path.splitext(self._filename)[1]
# No metadata found, let's get some info from the current folder.
if not os.path.isfile(self._filename):
self._metadata['name'] = os.path.basename(os.getcwd())
self._metadata['project_name'] = self._metadata['name']
return
if self._type == '.json':
self._load_json_metadata()
elif self._type == '.dof':
self._load_dof_metadata()
def _load_json_metadata(self):
with open(self._filename, 'r') as f:
self._metadata = json.load(
f,
object_pairs_hook=collections.OrderedDict
)
def _load_dof_metadata(self):
dof = configparser.ConfigParser()
dof.read(self._filename)
major = dof.get('Version Info', 'MajorVer')
minor = dof.get('Version Info', 'MinorVer')
release = dof.get('Version Info', 'Release')
self._metadata['name'] = os.path.splitext(self._filename)[0]
self._metadata['version'] = '{}.{}.{}'.format(major, minor, release)
def update_version(self, version):
""" Updates package.json version info and Project.dof version info """
self._metadata['version'] = version
if self._type == '.json':
with open(self._filename, 'w') as f:
f.write(json.dumps(self._metadata, indent=2))
dof_filename = os.path.join(self.path, self.name + '.dof')
if os.path.isfile(dof_filename):
dof_file = DOFFile(dof_filename)
dof_file.update_version(version)
@property
def project_name(self):
if self._metadata.has_key("project_name"):
return self._metadata["project_name"]
else:
return self.name + ".dpr"
@property
def resource_file(self):
if self._metadata.has_key("resource_file"):
return self._metadata["resource_file"]
else:
return self.name + ".rc"
@property
def resources(self):
return self._metadata['resources']
@property
def conditionals(self):
return self._metadata['conditionals']
@property
def tag_prefix(self):
return self._metadata.get('tag-prefix', None)
@property
def makefile(self):
return os.path.join(self.path, 'Makefile')
def __getattr__(self, name):
return self._metadata.get(name, None)
def dependencies(self):
for dep in self._metadata['dependencies'].keys():
hash = self._metadata['dependencies'][dep]
repo = self._metadata['repos'].get(dep, '')
yield dep, repo, hash
@property
def dependent_projects(self):
"""Retorna uma lista com os projetos dependentes do projeto atual"""
projects = self._metadata.get('dependent_projects', [])
projects = map(lambda p: os.path.join(self.path, p) if not os.path.isabs(p) else p, projects)
return list(projects)
@property
def path(self):
"""Caminho completo do projeto atual"""
return os.path.dirname(os.path.abspath(self._filename))
@property
def previous_version(self):
""" Obtém a última versão 'taggeada' do projeto.
Leva em consideração se a versão no projeto utiliza
prefixo (ex: nome_projeto/1.2.3.4) ou não (ex: 1.2.3.4).
"""
if self._previous_version:
return self._previous_version
tags = get_sorted_tags()
if self.tag_prefix:
tags = filter(lambda t: t.startswith(self.tag_prefix), tags)
else:
tags = filter(lambda t: '/' not in t, tags)
return list(tags)[-1]
def texto_ultima_revisao(self):
"""Retorna o texto da última modificação do changelog"""
changelog_header = re.compile('\d{2}/\d{2}/\d{4} - ')
lines = []
with open(os.path.join(self.path, 'CHANGELOG.txt')) as changelog:
lines.append(changelog.readline())
for line in changelog:
match = changelog_header.search(line)
if match:
break
lines.append(line)
return ''.join(lines)
if __name__ == "__main__":
metadata = ProjectMetadata()
print(metadata.version)
|
985,617 | b78967f9969c2bf1d7989d2d1c1ad10e597dac28 | import random
import numpy as np
import math as m
import k_helper as kh
x1 = np.array([3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10])
x2 = np.array([5, 5, 6, 4, 5, 6, 7, 5, 6, 7, 2, 2, 3, 4, 2, 3, 4, 2, 3, 4])
data = np.column_stack((x1, x2))
k = 3
def initialize_k_medoids(k, Data):
medoids = np.empty((k, np.size(Data, 1)))
DAta = Data
for n in range(k):
i = random.randrange(np.size(DAta, 1))
for m in range(np.size(Data, 1)):
medoids[n, m] = DAta[i, m]
DAta = np.delete(DAta, i, 0)
return medoids
def calculate_medoids(k, Data, medoids):
cluster_assignments = cluster(k, Data, medoids)
means = np.zeros(shape=(k, Data.shape[1]), dtype=object)
numbers = np.zeros(shape=k)
for i in range(Data.shape[0]):
for j in range(k):
for l in range(Data.shape[1]):
if j == int(cluster_assignments[i]):
means[j, l] = means[j, l] + Data[i, l]
numbers[j] = numbers[j] + 1
for i in range(k):
for j in range(Data.shape[1]):
means[i, j] = (means[i, j] / numbers[i])
return means
def cluster(k, Data, medoids):
cluster_assignments = np.empty(np.size(Data, 0))
for i in range(np.size(Data, 0)):
dist = np.empty(k)
for j in range(k):
x1 = Data[i, :]
x2 = medoids[j]
dist[j] = kh.euclid_distance(x1, x2)
cluster_assignments[i] = dist.argmin()
return cluster_assignments
def find_greatest_dist(cluster_assignments, Data, medoids):
combined_array = np.empty(shape=(Data.shape[0], 2), dtype=object)
greatest_dist = np.zeros(shape=(k, 2), dtype=float)
dist = np.empty(shape=Data.shape[0])
for i in range(len(combined_array)):
combined_array[i] = [cluster_assignments[i], Data[i, :]]
x1 = Data[i, :]
currentMedoid = int(combined_array[i, 0])
x2 = medoids[currentMedoid]
dist[i] = kh.euclid_distance(x1, x2)
if greatest_dist[currentMedoid, 0] <= dist[i]:
greatest_dist[currentMedoid] = [float(dist[i]), int(i)]
# greatest_dist[:, 0] is the distance and greatest_dist[:, 1] is the id where it came from
return greatest_dist
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
def cluster_cost(Data, k, medoids, cluster_assignments):
price = np.zeros(k)
for i in range(0, k):
x = Data[cluster_assignments == i, :]
for j in range(0, np.size(x, 0)):
cost = kh.euclid_distance(x[j, :], medoids[i, :])
price[i] += cost
return price
def improve(k, medoids, Data):
Data = np.array(Data)
cluster_assignments = cluster(k, Data, medoids)
best_price = 10 ** 23
price = cluster_cost(Data, k, medoids, cluster_assignments)
old_assignments = np.zeros(shape=cluster_assignments.shape)
while np.sum(price) < best_price and old_assignments.all() != cluster_assignments.all():
old_assigments = cluster_assignments
print('old: ', old_assigments, 'current: ', cluster_assignments)
print('test if: ', old_assigments.all() != cluster_assignments.all())
# Find the greatest distance to calc new medoids
greatest_dist = find_greatest_dist(cluster_assignments, Data, medoids)
# Create a new data without the furthest distance points
new_data = np.empty(shape=(Data.shape[0] - k, Data.shape[1]))
new_data = np.delete(Data, greatest_dist[:, 1], 0)
for i in range(k):
current_data = np.zeros(shape=new_data.shape)
for j in range(new_data.shape[0]):
if cluster_assignments[j] == i:
current_data[j] = new_data[j]
else:
current_data[j] = 0
current_data = np.array([x for x in current_data if x.all() != 0.])
if current_data.all() != 0:
for l in range(len(current_data)):
if not (current_data[l, :] == medoids[i, :]).all():
temp = medoids[i, :]
medoids[i, :] = current_data[l, :]
current_data[l, :] = temp
cluster_assignments = cluster(k, Data, medoids)
best_price_prev = best_price
best_price = np.sum(price)
price = cluster_cost(Data, k, medoids, cluster_assignments)
if np.sum(price) > best_price:
temp = medoids[i, :]
medoids[i, :] = current_data[l, :]
current_data[l, :] = temp
price = best_price
best_price = best_price_prev
print('sum price: ', np.sum(price), 'best price: ', best_price)
print('assigments =', cluster_assignments)
return cluster_assignments
# medoids = initialize_k_medoids(k, data)
# print('data: ', improve(k, medoids, data))
# print(cluster_assignments)
# print('shapes', data.shape, cluster_assignments.shape)
# print('medoids', medoids)
# print('Data: ', data)
|
985,618 | 0f01bddccdb787ae7a2eb96b40e3fd2609f34593 | #!/usr/bin/python3
def uppercase(str):
for x in str:
flag = 0
if ord(x) >= 97 and ord(x) <= 122:
flag = 32
print('{:c}'.format(ord(x) - flag), end="")
print()
|
985,619 | 390657fdfc870eae8cdb5415369d390103214c51 | import time
from textwrap import dedent
time.sleep(5)
print(
dedent("""{
"text": "Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. Золушка — советский чёрно-белый художественный фильм-сказка, снятый на киностудии Ленфильм режиссёрами Надеждой Кошеверовой и Михаилом Шапиро по сценарию Евгения Шварца. ",
"cards": [
{
"assignment": {
"text": "Создать требования для новой автоматической системы",
"start": 3,
"end": 12
},
"responsible": {
"text": "Круглов Николай Сергеевич",
"start": 14,
"end": 22
},
"date": {
"text": "12 февраля 2021",
"start": 27,
"end": 35
}
},
{
"assignment": {
"text": "Создать условия для интеграции процессов системы",
"start": 3,
"end": 12
},
"responsible": {
"text": "Иванов Василий Сергеевич",
"start": 14,
"end": 22
},
"date": {
"text": "21.03.2021",
"start": 27,
"end": 35
}
},
{
"assignment": {
"text": "Создать требования для новой автоматической системы",
"start": 3,
"end": 12
},
"responsible": {
"text": "Круглов Николай Сергеевич",
"start": 14,
"end": 22
},
"date": {
"text": "12 февраля 2021",
"start": 27,
"end": 35
}
},
{
"assignment": {
"text": "Создать условия для интеграции процессов системы",
"start": 3,
"end": 12
},
"responsible": {
"text": "Иванов Василий Сергеевич",
"start": 14,
"end": 22
},
"date": {
"text": "21.03.2021",
"start": 27,
"end": 35
}
}
]
}""")
)
|
985,620 | 5ed1198a5d600f3c1e41d236287fabf2a1075106 | from bs4 import BeautifulSoup
import requests
import time
import re
from tokens import url
def get_info():
r = requests.get("https://www.warmane.com/information")
data = r.text
soup = BeautifulSoup(data, "lxml")
outland = soup.find_all("div", {"class": "stats"})[1].find_all("div")
players = str(outland[0])[5:-6]
uptime = str(outland[1])[5:-6]
return players, uptime
while True:
players, uptime = get_info()
min = int(re.search("(\d+)", uptime).group(0))
if min < 5:
requests.get(url + "Server reset!\n" + players)
time.sleep(600)
time.sleep(60)
|
985,621 | e640aba17aa44a770c528cda1f7a786644897e72 | from django.db import models
from django.urls import reverse
# Create your models here.
class User(models.Model):
# author = models.ForeignKey('auth.User')
user_id = models.CharField(max_length=50, unique=True)
password = models.CharField(max_length=200)
class asset(models.Model):
user_id = models.ForeignKey(User, on_delete=models.CASCADE, max_length=50)
asset = models.CharField(max_length=50)
code = models.CharField(max_length=50, null=False)
quantity = models.IntegerField()
# values = {'asset' : asset, 'stock_code':stock_code, 'quantity':quantity,
# 'cur_price': cur_price, 'first_price':first_price, 'revenue' : revenue, 'asset_sum':asset_sum} |
985,622 | d94ada86ad7f2e449268f3c60d8c70f52595d07e | # Copyright 2018 Oinam Romesh Meitei. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy,time
from math import exp,pi,pow,sqrt
from molepy.pymints.os_vrr import cVRR
from molepy.ccmints.os_hrr import ccHRR
from molepy.lib.mole_param import hfparam
def twofunc_(A,norma,Aexps,Acoefs,I,J,K,
B,normb,Bexps,Bcoefs,
C,normc,Cexps,Ccoefs,lc,mc,nc,
D,normd,Dexps,Dcoefs,ld,md,nd):
" Part of Horizontal RR "
" obtain (a0|cd) "
tmp_ = {}
for i in range(0,nd+2):
for j in range(lc+ld,lc-1,-1):
for k in range(mc+md,mc-1,-1):
for l in range(nc+nd-i,nc-1,-1):
if i==0:
tmp_[j,k,l,0,0,0] = (
cVRR(A,norma,Aexps,Acoefs,(I,J,K),
B,normb,Bexps,Bcoefs,
C,normc,Cexps,Ccoefs,(j,k,l),
D,normd,Dexps,Dcoefs))
else:
tmp_[j,k,l,0,0,i] = (tmp_[j,k,l+1,0,0,i-1]+\
(C[2]-D[2])*tmp_[j,k,l,0,0,i-1])
for i in range(1,md+1):
for j in range(lc+ld,lc-1,-1):
for k in range(mc+md-i,mc-1,-1):
tmp_[j,k,nc,0,i,nd] = (tmp_[j,k+1,nc,0,i-1,nd]+\
(C[1]-D[1])*tmp_[j,k,nc,0,i-1,nd])
for i in range(1,ld+1):
for j in range(lc+ld-i,lc-1,-1):
tmp_[j,mc,nc,i,md,nd] = (tmp_[j+1,mc,nc,i-1,md,nd]+\
(C[0]-D[0])*tmp_[j,mc,nc,i-1,md,nd])
return tmp_[lc,mc,nc,ld,md,nd]
def cHRR(a,b,c,d):
if hfparam.ccmints:
return ccHRR(a.center[0],a.center[1],a.center[2],a.norms,
a.cartam[0],a.cartam[1],a.cartam[2],a.exps,a.coefs,
b.center[0],b.center[1],b.center[2],b.norms,
b.cartam[0],b.cartam[1],b.cartam[2],b.exps,b.coefs,
c.center[0],c.center[1],c.center[2],c.norms,
c.cartam[0],c.cartam[1],c.cartam[2],c.exps,c.coefs,
d.center[0],d.center[1],d.center[2],d.norms,
d.cartam[0],d.cartam[1],d.cartam[2],d.exps,d.coefs)
else:
return cHRR_py(a,b,c,d)
def cHRR_py(bas1,bas2,bas3,bas4):
" Obara Saika Recurrence "
" comput (ab|cd) through horizontal RR "
" using (a0|c0) from vertical RR "
A = bas1.center
B = bas2.center
C = bas3.center
D = bas4.center
norma = bas1.norms
normb = bas2.norms
normc = bas3.norms
normd = bas4.norms
Aexps = bas1.exps
Bexps = bas2.exps
Cexps = bas3.exps
Dexps = bas4.exps
Acoefs = bas1.coefs
Bcoefs = bas2.coefs
Ccoefs = bas3.coefs
Dcoefs = bas4.coefs
(la,ma,na) = bas1.cartam
(lb,mb,nb) = bas2.cartam
(lc,mc,nc) = bas3.cartam
(ld,md,nd) = bas4.cartam
hrr_ = {}
for i in range(la,la+lb+1):
for j in range(ma,ma+mb+1):
for k in range(na,na+nb+1):
hrr_[i,j,k,0,0,0] = (
twofunc_(A,norma,Aexps,Acoefs,i,j,k,
B,normb,Bexps,Bcoefs,
C,normc,Cexps,Ccoefs,lc,mc,nc,
D,normd,Dexps,Dcoefs,ld,md,nd,))
for i in xrange(1,nb+1):
for j in xrange(la,la+lb+1):
for k in xrange(ma,ma+mb+1):
for l in xrange(na,na+nb+1-i):
hrr_[j,k,l,0,0,i] = (
hrr_[j,k,l+1,0,0,i-1]+(A[2]-B[2])*hrr_[j,k,l ,0,0,i-1])
for i in xrange(1,mb+1):
for j in xrange(la,la+lb+1):
for k in xrange(ma,ma+mb+1-i):
hrr_[j,k,na,0,i,nb] = (
hrr_[j,k+1,na,0,i-1,nb]+(A[1]-B[1])*hrr_[j,k ,na,0,i-1,nb])
for i in xrange(1,lb+1):
for j in xrange(la,la+lb+1-i):
hrr_[j,ma,na,i,mb,nb] = (
hrr_[j+1,ma,na,i-1,mb,nb]+(A[0]-B[0])*hrr_[j ,ma,na,i-1,mb,nb])
return hrr_[la,ma,na,lb,mb,nb]
|
985,623 | d4e5651e0d4911684e06d13bc41b0f039bb1e9d4 | import sys
import time
import datetime
import logging
import cPickle as pickle
import os
import numpy as np
import torch
# import cv2
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from helpers.datagenerator import DataGenerator, FakeDataGenerator
from generator import GeneratorEncDec, GeneratorVan
from discriminator import Discriminator
from helpers.utils import llprint
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
# use_cuda = False
if use_cuda:
gpu = 0
def cov(x):
mean_x = torch.mean(x, 0)
xm = x.sub(mean_x.expand_as(x))
c = xm.mm(xm.t())
c = c / (x.size(1) - 1)
return c, mean_x
def _assert_no_grad(variable):
assert not variable.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
class batchNLLLoss(nn.Module):
def __init__(self):
super(batchNLLLoss, self).__init__()
def forward(self, synt, target, claim_length=20):
"""
Returns the NLL Loss for predicting target sequence.
Inputs: inp, target
- inp: batch_size x seq_len
- target: batch_size x seq_len
inp should be target with <s> (start letter) prepended
"""
loss_fn = nn.NLLLoss()
loss = 0
synt = synt.permute(1,0,2)
# _, indices = target.max(2)
target = target.permute(1,0)
for i in range(claim_length):
# print synt[i]#.topk(1)[1]
# print target[1]
loss += loss_fn(synt[i], target[i])
# print loss
return loss
class batchNLLLossV2(nn.Module):
def __init__(self):
super(batchNLLLossV2, self).__init__()
def forward(self, synt, target, claim_length=20):
"""
Returns the NLL Loss for predicting target sequence.
Inputs: inp, target
- inp: batch_size x seq_len
- target: batch_size x seq_len
inp should be target with <s> (start letter) prepended
"""
loss_fn = nn.NLLLoss()
loss = 0
for i in range(synt.shape[0]):
for j in range(claim_length):
loss += loss_fn(synt[i][j].unsqueeze(0), target[i][j])
return loss
class JSDLoss(nn.Module):
def __init__(self):
super(JSDLoss,self).__init__()
def forward(self, f_real, f_synt):
assert f_real.size()[1] == f_synt.size()[1]
f_num_features = f_real.size()[1]
batch_size = f_real.size()[0]
identity = autograd.Variable(torch.eye(f_num_features)*0.1)
if use_cuda:
identity = identity.cuda(gpu)
cov_real, mean_real = cov(f_real)
cov_fake, mean_fake = cov(f_synt)
f_real_mean = torch.mean(f_real, 0, keepdim=True)
f_synt_mean = torch.mean(f_synt, 0, keepdim=True)
dev_f_real = f_real - f_real_mean.expand(batch_size,f_num_features) # batch_size x num_feat
dev_f_synt = f_synt - f_synt_mean.expand(batch_size,f_num_features) # batch_size x num_feat
f_real_xx = torch.mm(torch.t(dev_f_real), dev_f_real) # num_feat x batch_size * batch_size x num_feat = num_feat x num_feat
f_synt_xx = torch.mm(torch.t(dev_f_synt), dev_f_synt) # num_feat x batch_size * batch_size x num_feat = num_feat x num_feat
cov_mat_f_real = f_real_xx / (batch_size) - torch.mm(f_real_mean, torch.t(f_real_mean)) + identity # num_feat x num_feat
cov_mat_f_synt = f_synt_xx / (batch_size) - torch.mm(f_synt_mean, torch.t(f_synt_mean)) + identity # num_feat x num_feat
# assert mean_real == f_real_mean.squeeze()
# assert mean_fake == f_synt_mean.squeeze()
assert cov_real == cov_mat_f_real
assert cov_fake == cov_mat_f_synt
cov_mat_f_real_inv = torch.inverse(cov_mat_f_real)
cov_mat_f_synt_inv = torch.inverse(cov_mat_f_synt)
temp1 = torch.trace(torch.add(torch.mm(cov_mat_f_synt_inv, torch.t(cov_mat_f_real)), torch.mm(cov_mat_f_real_inv, torch.t(cov_mat_f_synt))))
temp1 = temp1.view(1,1)
temp2 = torch.mm(torch.mm((f_synt_mean - f_real_mean), (cov_mat_f_synt_inv + cov_mat_f_real_inv)), torch.t(f_synt_mean - f_real_mean))
loss_g = temp1 + temp2
return loss_g
# class JSDLoss(nn.Module):
# def __init__(self):
# super(JSDLoss,self).__init__()
#
# def forward(self, f_real, f_synt):
# f_synt_target = torch.distributions.Normal.log_prob(autograd.Variable(f_synt.data))
# f_real_target = torch.distributions.Normal.log_prob(autograd.Variable(f_real.data))
#
# f_synt = torch.distributions.Normal.log_prob(f_synt)
# f_real = torch.distributions.Normal.log_prob(f_real)
#
# loss_g = (nn.KLDivLoss()(f_synt, f_synt_target) + nn.KLDivLoss()(f_synt, f_real_target) + nn.KLDivLoss()(f_real, f_real_target) + nn.KLDivLoss()(f_real, f_synt_target)) / 2
# # sqrt_loss_g = torch.sqrt(loss_g)
# print(loss_g)
#
# return loss_g
class MMDCovLoss(nn.Module):
def __init__(self):
super(MMDCovLoss,self).__init__()
def forward(self, batch_size, f_real, f_synt):
"""
input: f_real , f_synt
those are the extracted features of real claims and synthetic claims generated by the generator.
size: batch_size x feature_dim
output: loss_g
"""
assert f_real.size()[1] == f_synt.size()[1]
f_num_features = f_real.size()[1]
identity = autograd.Variable(torch.eye(f_num_features)*0.1, requires_grad=False)
if use_cuda:
identity = identity.cuda(gpu)
f_real_mean = torch.mean(f_real, 0, keepdim=True) #1 * num_features
f_synt_mean = torch.mean(f_synt, 0, keepdim=True) #1 * num_features
dev_f_real = f_real - f_real_mean.expand(batch_size,f_num_features) #batch_size * num_features
dev_f_synt = f_synt - f_synt_mean.expand(batch_size,f_num_features) #batch_size * num_features
f_real_xx = torch.mm(torch.t(dev_f_real), dev_f_real) #num_features * num_features
f_synt_xx = torch.mm(torch.t(dev_f_synt), dev_f_synt) #num_features * num_features
cov_mat_f_real = (f_real_xx / batch_size) - torch.mm(f_real_mean, torch.t(f_real_mean)) + identity #num_features * num_features
cov_mat_f_synt = (f_synt_xx / batch_size) - torch.mm(f_synt_mean, torch.t(f_synt_mean)) + identity #num_features * num_features
kxx, kxy, kyy = 0, 0, 0
cov_sum = (cov_mat_f_fake + cov_mat_f_real)/2
cov_sum_inv = torch.inverse(cov_sum)
dividend = 1
dist_x, dist_y = f_synt/dividend, f_real/dividend
cov_inv_mat = cov_sum_inv
x_sq = torch.sum(torch.mm(dist_x, cov_inv_mat) * dist_x, dim=1)
y_sq = torch.sum(torch.mm(dist_y, cov_inv_mat) * dist_y, dim=1)
tempxx = -2*torch.mm(torch.mm(dist_x, cov_inv_mat), torch.t(dist_x)) + x_sq + torch.t(x_sq) # (xi -xj)**2
tempxy = -2*torch.mm(torch.mm(dist_x, cov_inv_mat), torch.t(dist_y)) + x_sq + torch.t(y_sq) # (xi -yj)**2
tempyy = -2*torch.mm(torch.mm(dist_y, cov_inv_mat), torch.t(dist_y)) + y_sq + torch.t(y_sq) # (yi -yj)**2
for sigma in options['sigma_range']:
kxx += torch.mean(torch.exp(-tempxx/2/(sigma**2)))
kxy += torch.mean(torch.exp(-tempxy/2/(sigma**2)))
kyy += torch.mean(torch.exp(-tempyy/2/(sigma**2)))
loss_g = torch.sqrt(kxx + kyy - 2*kxy)
return loss_g
class MMDLDLoss(nn.Module):
def __init__(self):
super(MMDLDLoss,self).__init__()
def forward(self, batch_size, f_real, f_synt):
"""
input: f_real , f_synt
those are the extracted features of real claims and synthetic claims generated by the generator.
size: batch_size x feature_dim
output: loss_g
"""
assert f_real.size()[1] == f_synt.size()[1]
f_num_features = f_real.size()[1]
identity = autograd.Variable(torch.eye(f_num_features)*0.1, requires_grad=False)
if use_cuda:
identity = identity.cuda(gpu)
kxx, kxy, kyy = 0, 0, 0
dividend = 32
dist_x, dist_y = f_synt/dividend, f_real/dividend
x_sq = torch.sum(dist_x**2, dim=1, keepdim=True)
y_sq = torch.sum(dist_y**2, dim=1, keepdim=True)
tempxx = -2*torch.mm(dist_x, torch.t(dist_x)) + x_sq + torch.t(x_sq) # (xi -xj)**2
tempxy = -2*torch.mm(dist_x, torch.t(dist_y)) + x_sq + torch.t(y_sq) # (xi -yj)**2
tempyy = -2*torch.mm(dist_y, torch.t(dist_y)) + y_sq + torch.t(y_sq) # (yi -yj)**2
for sigma in [20]:
kxx += torch.sum(torch.exp(-tempxx/2/(sigma)))
kxy += torch.sum(torch.exp(-tempxy/2/(sigma)))
kyy += torch.sum(torch.exp(-tempyy/2/(sigma)))
loss_g = torch.sqrt(kxx + kyy - 2*kxy)
return loss_g
|
985,624 | bd499996b62d5f9ccc4ab6d0c1b873d776018050 | from django.db import models
# reverse allows us to reference an object bty its URL template name
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
body = models.TextField()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', args=[str(self.id)])
|
985,625 | 39fb9e9e7ab5f9b4a0ecfc00fb89014b88cbc263 | import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import psycopg2
import json
import pandas as pd
import time
app = dash.Dash(__name__)
#app.css.config.serve_locally=False
#app.css.append_css(
# {'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'})
conn = psycopg2.connect(host='ec2-18-232-24-132.compute-1.amazonaws.com',database='earthquake', user='postgres', password='********')
cur = conn.cursor()
location = pd.read_csv("data_file.csv")
location=location.astype(str)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='graph', style={'margin-top': '20'})], className="six columns"),
html.Div([
dcc.Graph(
id='bar-graph'
)
], className='twelve columns'
),
dcc.Interval(
id='interval-component',
interval=5*1000, # in milliseconds
n_intervals=0)
], className="row")
], className="ten columns offset-by-one")
@app.callback(Output('graph', 'figure'), [Input('interval-component', 'n_intervals')])
def update_map(n):
"""
Args n: int
:rtype: dict
"""
try:
latest_reading = "select * from ereadings limit 90;"
df_map = pd.read_sql(latest_reading, conn)
map_data = df_map.merge(location, how='left', left_on=["device_id", "country_code"], right_on=["device_id","country_code"])
clrred = 'rgb(222,0,0)'
clrgrn = 'rgb(0,222,0)'
def SetColor(gal):
if gal >= .17:
return clrred
else:
return clrgrn
layout = {
'autosize': True,
'height': 500,
'font': dict(color="#191A1A"),
'titlefont': dict(color="#191A1A", size='18'),
'margin': {
'l': 35,
'r': 35,
'b': 35,
't': 45
},
'hovermode': "closest",
'plot_bgcolor': '#fffcfc',
'paper_bgcolor': '#fffcfc',
'showlegend': False,
'legend': dict(font=dict(size=10), orientation='h', x=0, y=1),
'name': map_data['country_code'],
'title': 'earthquake activity for the last 3 seconds',
'mapbox': {
'accesstoken':'*********************************',
'center': {
'lon':-98.49,
'lat':18.29
},
'zoom': 5,
'style': "dark"
}
}
return {
"data": [{
"type": "scattermapbox",
"lat": list(location['latitude']),
"lon": list(location['longitude']),
"hoverinfo": "text",
"hovertext": [["sensor_id: {} <br>country_code: {} <br>gal: {} <br>x: {} <br>y: {}".format(i, j, k, l, m)]
for i, j, k, l, m in zip(location['device_id'],location['country_code'].tolist(),map_data['gal'].tolist(),map_data['avg_x'].tolist(), map_data['avg_y'].tolist())],
"mode": "markers",
"marker": {
"size": 10,
"opacity": 1,
"color": list(map(SetColor, map_data['gal']))
}
}],
"layout": layout
}
except Exception as e:
print("Error: Couldn't update map")
print(e)
if __name__ == '__main__':
app.run_server(debug=False)
|
985,626 | a2fe3dafaeddfdda38e2a1deac011b6d0af0db55 | # -*- coding: utf-8 -*-
#
# brunel-delta-nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This version uses NEST's Connect functions.
import nest
import time
from numpy import exp
import pandas as pd
import numpy as np
def sim_brunel_delta(dt=0.1,
simtime=1000.0,
delay=1.5,
g=5.0,
eta=2.0,
epsilon=0.1,
order=2500,
J=0.1,
V_reset=0.0,
input_stop=np.inf,
N_rec=50,
num_threads=1,
print_report=True):
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
startbuild = time.time()
NE = 4*order
NI = 1*order
N_neurons = NE+NI
CE = int(epsilon*NE) # number of excitatory synapses per neuron
CI = int(epsilon*NI) # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
# Initialize the parameters of the integrate and fire neuron
tauMem = 20.0
theta = 20.0
J_ex = J
J_in = -g*J_ex
nu_th = theta/(J*CE*tauMem)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"local_num_threads": num_threads})
if print_report:
print("Building network")
neuron_params= {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": V_reset,
"V_m": 0.0,
"V_th": theta}
nest.SetDefaults("iaf_psc_delta", neuron_params)
nodes_ex=nest.Create("iaf_psc_delta",NE)
nodes_in=nest.Create("iaf_psc_delta",NI)
nest.SetDefaults("poisson_generator",{"rate": p_rate, 'stop': input_stop})
noise=nest.Create("poisson_generator")
espikes=nest.Create("spike_detector")
ispikes=nest.Create("spike_detector")
nest.SetStatus(espikes,[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True,
"to_file": False}])
nest.SetStatus(ispikes,[{"label": "brunel-py-in",
"withtime": True,
"withgid": True,
"to_file": False}])
if print_report:
print("Connecting devices")
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
nest.Connect(noise,nodes_ex, syn_spec="excitatory")
nest.Connect(noise,nodes_in, syn_spec="excitatory")
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
if print_report:
print("Connecting network")
if print_report:
print("Excitatory connections")
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, "excitatory")
if print_report:
print("Inhibitory connections")
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory")
endbuild=time.time()
if print_report:
print("Simulating")
nest.Simulate(simtime)
endsimulate= time.time()
if print_report:
events_ex = nest.GetStatus(espikes,"n_events")[0]
rate_ex = events_ex/simtime*1000.0/N_rec
events_in = nest.GetStatus(ispikes,"n_events")[0]
rate_in = events_in/simtime*1000.0/N_rec
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
exc_spikes = nest.GetStatus(espikes, 'events')[0]
inh_spikes = nest.GetStatus(ispikes, 'events')[0]
return pd.DataFrame(exc_spikes), pd.DataFrame(inh_spikes)
def build_brunel_delta_plastic(dt=0.1,
delay=1.5,
g=5.0,
eta=2.0,
epsilon=0.1,
order=2500,
J=0.1,
alpha=2.02,
lambd=0.01,
Wmax=3.,
V_reset=0.0,
input_stop=np.inf,
N_rec=50,
num_threads=1,
print_report=True):
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
startbuild = time.time()
NE = 4*order
NI = 1*order
N_neurons = NE+NI
CE = int(epsilon*NE) # number of excitatory synapses per neuron
CI = int(epsilon*NI) # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
# Initialize the parameters of the integrate and fire neuron
tauMem = 20.0
theta = 20.0
J_ex = J
J_in = -g*J_ex
nu_th = theta/(J*CE*tauMem)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"local_num_threads": num_threads})
if print_report:
print("Building network")
neuron_params= {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": V_reset,
"V_m": 0.0,
"V_th": theta}
nest.SetDefaults("iaf_psc_delta", neuron_params)
nodes_ex=nest.Create("iaf_psc_delta",NE)
nodes_in=nest.Create("iaf_psc_delta",NI)
nest.SetStatus(nodes_ex+nodes_in, 'V_m', np.random.uniform(low=-20., high=20., size=(NE+NI,)))
nest.SetDefaults("poisson_generator",{"rate": p_rate, 'stop': input_stop})
noise=nest.Create("poisson_generator")
espikes=nest.Create("spike_detector")
ispikes=nest.Create("spike_detector")
nest.SetStatus(espikes,[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True,
"to_file": False}])
nest.SetStatus(ispikes,[{"label": "brunel-py-in",
"withtime": True,
"withgid": True,
"to_file": False}])
if print_report:
print("Connecting devices")
nest.CopyModel("stdp_synapse","excitatory_plastic",{'alpha': alpha, 'lambda': lambd, 'Wmax': Wmax,
"delay":delay})
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
nest.Connect(noise,nodes_ex, syn_spec="excitatory")
nest.Connect(noise,nodes_in, syn_spec="excitatory")
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
if print_report:
print("Connecting network")
if print_report:
print("Excitatory connections")
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex, conn_params_ex, {'model': 'excitatory_plastic',
'weight': {'distribution': 'uniform',
'low': 0.5 * J_ex,
'high': 1.5 * J_ex}})
nest.Connect(nodes_ex, nodes_in, conn_params_ex, {'model': 'excitatory',
'weight': {'distribution': 'uniform',
'low': 0.5 * J_ex,
'high': 1.5 * J_ex}})
if print_report:
print("Inhibitory connections")
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory")
endbuild=time.time()
if print_report:
num_synapses = sum(nest.GetDefaults(syn_model)["num_connections"]
for syn_model in ('excitatory', 'excitatory_plastic',
'inhibitory'))
build_time = endbuild-startbuild
print("Brunel network (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Building time : %.2f s" % build_time)
return espikes, ispikes, nodes_ex, nodes_in
def sim_brunel_delta_plastic(simtime,
ex_spike_det, in_spike_det,
nodes_ex_wgt=None, nodes_in_wgt=None,
print_report=True):
nodes_ex_wgt = list(nodes_ex_wgt) if nodes_ex_wgt is not None else []
nodes_in_wgt = list(nodes_in_wgt) if nodes_in_wgt is not None else []
startsimulate=time.time()
if print_report:
print("Simulating")
nest.Simulate(simtime)
endsimulate= time.time()
if print_report:
sim_time = endsimulate-startsimulate
print("Brunel network simulation (Python)")
print("Simulation time : %.2f s" % sim_time)
exc_spikes = nest.GetStatus(ex_spike_det, 'events')[0]
inh_spikes = nest.GetStatus(in_spike_det, 'events')[0]
w_pl = nest.GetStatus(nest.GetConnections(source=nodes_ex_wgt+nodes_in_wgt, synapse_model='excitatory_plastic'), 'weight')
return pd.DataFrame(exc_spikes), pd.DataFrame(inh_spikes), np.array(w_pl)
|
985,627 | ce4ad01c755094edd053b653fc2e3baf0106cf4d | import numpy as np
import scipy as sc
import os, re
import matplotlib.pyplot as plt
from prettyprint import pp
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, classification_report
#from sklearn.model_selection import train_test_split
from datetime import datetime as dt
from ipy_table import *
from string import punctuation, digits
data_path = '/Users/alexandre/Projects/TextClassificationBayes/tok_vnexpress/'
stopwords_path = '/Users/alexandre/Projects/TextClassificationBayes/vietnamese-stopwords-dash.txt'
#Classes are the folder names
class_names = os.listdir(data_path)
folders = [data_path + folder + '/' for folder in os.listdir(data_path) if folder != ".DS_Store"]
if '.DS_Store' in class_names: del class_names[0]
#list the files of each class
files = {}
for folder, name in zip(folders, class_names):
files[name] = [folder + f for f in os.listdir(folder)]
train_test_ratio = 0.7
def train_test_split(ratio, classes, files):
"""
this method will split the input list of files to train and test sets.
*Note: currently this method uses the simplest way an array can be split in two parts.
Parameters
----------
ratio: float
ratio of total documents in each class assigned to the training set
classes: list
list of label classes
files: dictionary
a dictionary with list of files for each class
Returns
-------
train_dic: dictionary
a dictionary with lists of documents in the training set for each class
test_dict: dictionary
a dictionary with lists of documents in the testing set for each class
"""
train_dict = {}
test_dict = {}
for cl in classes:
train_cnt = int(ratio * len(files[cl]))
train_dict[cl] = files[cl][:train_cnt]
test_dict[cl] = files[cl][train_cnt:]
return train_dict, test_dict
train_path, test_path = train_test_split(train_test_ratio, class_names, files)
#train_path, test_path, class_train, class_test = train_test_split(files, class_names, test_size=0.3, random_state=42)
stop_word = []
def loadStopWord(path):
from string import punctuation, digits
stop_word = []
try:
f = open(path)
lines = [line.rstrip('\n') for line in open(path)]
lines = [line.replace('_', '') for line in lines]
finally:
f.close()
return lines
stop_word = loadStopWord(stopwords_path)
def cleanupText(path):
"""
this method will read in a text file and try to cleanup its text.
Parameters
----------
path: path to the document file
Returns
-------
text_cleaned: cleaned up raw text in the input file
"""
text_cleaned = ''
try:
f = open(path)
raw = f.read().lower()
text = raw
text_cleaned = text.translate(None, punctuation + digits)
# print "\n Word count before:" + str(len(text_translated.split())) + "\n"
# for stop in stop_word:
# text_translated = text_translated.replace(stop,'')
# print "\n Word count after:" + str(len(text_translated.split())) + "\n"
text_cleaned = ' '.join([word for word in text_cleaned.split(' ') if (word and len(word) > 1)])
finally:
f.close()
return text_cleaned
#print(stop_word)
train_arr = []
test_arr = []
train_lbl = []
test_lbl = []
for cl in class_names:
for path in train_path[cl]:
train_arr.append(cleanupText(path))
train_lbl.append(cl)
for path in test_path[cl]:
test_arr.append(cleanupText(path))
test_lbl.append(cl)
print len(train_arr)
print len(test_arr)
#
vectorizer = CountVectorizer()
vectorizer.fit(train_arr)
train_mat = vectorizer.transform(train_arr)
print train_mat.shape
#print train_mat
test_mat = vectorizer.transform(test_arr)
print test_mat.shape
tfidf = TfidfTransformer()
tfidf.fit(train_mat)
train_tfmat = tfidf.transform(train_mat)
print train_tfmat.shape
#print train_tfmat
test_tfmat = tfidf.transform(test_mat)
print test_tfmat.shape
def testClassifier(x_train, y_train, x_test, y_test, clf):
"""
this method will first train the classifier on the training data
and will then test the trained classifier on test data.
Finally it will report some metrics on the classifier performance.
Parameters
----------
x_train: np.ndarray
train data matrix
y_train: list
train data label
x_test: np.ndarray
test data matrix
y_test: list
test data label
clf: sklearn classifier object implementing fit() and predict() methods
Returns
-------
metrics: list
[training time, testing time, recall and precision for every class, macro-averaged F1 score]
"""
#metrics = []
start = dt.now()
clf.fit(x_train, y_train)
end = dt.now()
print 'training time: ', (end - start)
# add training time to metrics
#metrics.append(end-start)
start = dt.now()
yhat = clf.predict(x_test)
end = dt.now()
print 'testing time: ', (end - start)
# add testing time to metrics
#metrics.append(end-start)
print 'classification report: '
# print classification_report(y_test, yhat)
pp(classification_report(y_test, yhat))
print 'f1 score'
print f1_score(y_test, yhat, average='macro')
print 'accuracy score'
accuracy = accuracy_score(y_test, yhat)
print accuracy
#metrics.append(accuracy)
#precision = precision_score(y_test, yhat, average=None)
#recall = recall_score(y_test, yhat, average=None)
# add precision and recall values to metrics
#for p, r in zip(precision, recall):
# metrics.append(p)
# metrics.append(r)
#add macro-averaged F1 score to metrics
#metrics.append(f1_score(y_test, yhat, average='macro'))
print 'confusion matrix:'
print confusion_matrix(y_test, yhat)
# plot the confusion matrix
plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')
plt.show()
return accuracy
metrics_dict = {}
#'name', 'metrics'
#bnb = BernoulliNB()
#bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, bnb)
#metrics_dict.append({'name':'BernoulliNB', 'metrics':bnb_me})
#
#
#gnb = GaussianNB()
#gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, gnb)
#metrics_dict.append({'name':'GaussianNB', 'metrics':gnb_me})
alpha=[10,5]
for a in alpha:
print "alpha= =" + str(a)
mnb = MultinomialNB(a)
mnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, mnb)
metrics_dict.update({a:mnb_me})
#mnb = MultinomialNB(alpha=0.01)
#mnb_me = testClassifier(train_mat.toarray(), train_lbl, test_mat.toarray(), test_lbl, mnb)
#mnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, mnb)
#metrics_dict.append({'name':'MultinomialNB', 'metrics':mnb_me})
|
985,628 | 780afdffab76d7c1f4d5ab7994c9b754e18281d2 | from datetime import datetime, timedelta
from config import BaseApiConfig
from src.api.pool.services.location_services import LocationFormService
from src.api.pool.services.meeting_services.meeting_time_services.meeting_location_service import MeetingLocationService
from src.message_queue.tasks.email_tasks.base_email_task import BaseEmailTask
from src.message_queue.tasks.email_tasks.email_factory.excel_writer.schedule_writer import ScheduleWriter
class AttachXLSTask(BaseEmailTask):
__xls = []
__location_ids = []
__NOW = datetime.now() + timedelta(hours=BaseApiConfig.TIME_DIFFERENCE)
__location_names = []
def _addition_init(self, *args, **kwargs):
self.__location_ids = kwargs.get('location_ids', [])
self.__from_date = kwargs.get('from_date', self.__NOW + timedelta(days=-7))
self.__to_date = kwargs.get('to_date', self.__NOW + timedelta(days=7))
# celery modify date time to string ---> need to secure data type
if isinstance(self.__from_date, str):
self.__from_date = datetime.strptime(self.__from_date, '%Y-%m-%dT%H:%M:%S')
if isinstance(self.__to_date, str):
self.__to_date = datetime.strptime(self.__to_date, '%Y-%m-%dT%H:%M:%S')
self.__f_date = datetime.strftime(self.__from_date, '%d-%m-%Y')
self.__t_date = datetime.strftime(self.__to_date, '%d-%m-%Y')
def _create_text_body(self) -> str:
txt_body = 'Tổng hợp lịch họp: {f_date} đến {t_date} tại'.format(
f_date=self.__f_date, t_date=self.__t_date
)
index = 1
for location_name in self.__location_names:
txt_body += ' {}'.format(location_name)
if self.__location_names[0]:
txt_body += ','
else:
txt_body += '.'
index += 1
return txt_body
def _create_email_subject(self) -> str:
return 'vMeeting: {f_date} đến {t_date}'.format(
f_date=self.__f_date, t_date=self.__t_date
)
def _create_attachment_paths(self) -> list:
self.__create_xsl()
return [
*self.__xls
]
def __create_xsl(self):
meeting_service = self._prepared_service(base_service=MeetingLocationService())
location_service = self._prepared_service(base_service=LocationFormService())
if isinstance(meeting_service, MeetingLocationService):
writer = ScheduleWriter()
for location_id in self.__location_ids:
location = location_service.get_model(_id=location_id)
self.__location_names.append(location.name)
meetings = meeting_service.get_location_meetings(location_id=location_id,
from_date=self.__from_date,
to_date=self.__to_date)
writer.init_location(**{
'location_name': location.name,
'from': self.__from_date,
'to': self.__to_date,
'meetings': meetings
})
self.__xls.append({
'path': writer.export(export_name='vMeeting_' + self.__f_date + ':' + self.__t_date)})
|
985,629 | c9f785d02481dfdec6b7f62d5bde3d2b986fd811 | # imports
print("""
|******************|
| Desafio085 |
|******************|
""")
print("7 Valores Numéricos")
print()
# Variáveis
nums = [[], []]
for c in range(0, 7):
num = int(input(f"Digite o {c+1}º número inteiro: "))
if num % 2 == 0:
# Par
nums[0].append(num)
else:
# Impar
nums[1].append(num)
# Ordena
nums[0].sort()
nums[1].sort()
print('-='*20)
print(f'Os valores pares digitados foram: {nums[0]}')
print(f'Os valores ímpares digitados foram: {nums[1]}')
|
985,630 | 3daa44b373b61c996966af491da768641f90fb3e | def nth_fibonacci(n):
arr = [1,1]
for i in range(2,n+1):
newfib = arr[i-1] + arr[i-2]
arr.append(newfib)
return arr[n-1]
|
985,631 | 53129993d24f8bd994955f924aa9ec0e4c9656cf | # bz2_file_write.py
import bz2
import io
import os
data = 'Il contenuto del file esempio va qui.\n'
with bz2.BZ2File('esempio.bz2', 'wb') as output:
with io.TextIOWrapper(output, encoding='utf-8') as enc:
enc.write(data)
os.system('file esempio.bz2')
|
985,632 | ef98786c7a10a9a405df253fd300e9f73a41d64c | from django.views.generic.base import View
from product_management.forms import CategoryForm, ProductForm
from product_management.models import Category, Product
from django.shortcuts import render, redirect, get_object_or_404
class HomeView(View):
def get(self,request):
return render(request,"index.html")
class CategoryView(View):
def get(self,request):
categoryForm = CategoryForm()
return render(request, 'category.html', {'categoryForm': categoryForm})
def post(self,request):
categoryForm = CategoryForm(request.POST)
if categoryForm.is_valid():
categoryForm.save()
return redirect('category_list')
return render(request, 'category.html', {'categoryForm': categoryForm})
class DisplayCategoryView(View):
def get(self,request):
if request.GET.get('visible'):
visible_filter = request.GET.get('visible')
if 'False' == visible_filter:
categorys = Category.objects.filter(Visible=False)
else:
categorys = Category.objects.filter(Visible=True)
else:
categorys = Category.objects.all()
# categorys = Category.objects.all()
return render(request, 'list_category.html', {'categorys': categorys})
class UpdateCategoryView(View):
def get(self,request,id_category):
category = get_object_or_404(Category, id=id_category)
categoryForm = CategoryForm(request.POST or None, instance=category)
return render(request, "update_category.html", {'categoryForm': categoryForm})
def post(self,request,id_category):
category = get_object_or_404(Category, id=id_category)
categoryForm = CategoryForm(request.POST or None, instance=category)
if categoryForm.is_valid():
categoryForm.save()
return redirect('category_list')
return render(request, "update_category.html", {'categoryForm': categoryForm})
class DeleteCategoryView(View):
def get(self,request,id_category):
category = Category.objects.get(id=id_category)
category.delete()
return redirect('category_list')
class CreateProductView(View):
def get(self,request):
productForm = ProductForm()
return render(request, 'create_product.html', {'productForm': productForm})
def post(self,request):
productForm = ProductForm(request.POST)
if productForm.is_valid():
productForm.save()
return redirect('product_list')
return render(request, 'create_product.html', {'productForm': productForm})
class DisplayProductView(View):
def get(self,request):
categorys = Category.objects.all()
if request.GET.get('submit'):
title_filter = request.GET.get('title')
created_filter = request.GET.get('created')
category_filter = request.GET.get('categorys')
# if title_filter and created_filter and category_filter:
# products = Product.objects.filter(title=title_filter,Created=created_filter,category=category_filter)
# elif title_filter or created_filter and category_filter:
# products = Product.objects.filter(Created=created_filter,category=category_filter)
# elif title_filter or created_filter and category_filter:
# products = Product.objects.filter(category=category_filter,Created=created_filter)
if created_filter and title_filter:
products = Product.objects.filter(title=title_filter,Created=created_filter,category__Name=category_filter)
elif title_filter:
products = Product.objects.filter(title=title_filter,category__Name=category_filter)
elif created_filter:
products = Product.objects.filter(Created=created_filter,category__Name=category_filter)
elif category_filter:
products = Product.objects.filter(category__Name=category_filter)
else:
products = Product.objects.all()
elif request.GET.get('ascending'):
products = Product.objects.all().order_by('title')
elif request.GET.get('descending'):
products = Product.objects.all().order_by('-title')
else:
products = Product.objects.all()
return render(request,"list_product.html",{'products':products,'categorys':categorys})
class UpdateProductView(View):
def get(self,request,id_product):
product = get_object_or_404(Product, id=id_product)
productForm = ProductForm(request.POST or None, instance=product)
return render(request, "update_product.html", {'productForm': productForm})
def post(self,request,id_product):
product = get_object_or_404(Product, id=id_product)
productForm = ProductForm(request.POST or None, instance=product)
if productForm.is_valid():
productForm.save()
return redirect('product_list')
return render(request, "update_product.html", {'categoryForm': productForm})
class DeleteProductView(View):
def get(self,request,id_product):
product = Product.objects.get(id=id_product)
product.delete()
return redirect('product_list')
|
985,633 | f549171088f0e49afecd075a20c328834a71fc0b | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-04 04:52
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('support', '0014_auto_20170803_1831'),
]
operations = [
migrations.RenameField(
model_name='subscription',
old_name='date_subscription_begin',
new_name='date_begin',
),
]
|
985,634 | a3614bb6ef91b00c89aff65d5906caf3241a3c06 | from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
@app.get("/user/{user_id}")
async def userDataFetch(user_id):
return {"user": user_id, "msg": "FETCHED SUCCESS FULLY", "code": 200}
@app.get("/admin/{user_id}")
async def AdminDataFetch(user_id):
return {"admin": user_id, "msg": "FETCHED SUCCESS FULLY", "code": 200}
@app.post("/admin/{user_id}")
async def AdminDataFetch(user_id):
return {"admin": user_id, "msg": "Posted SUCCESS FULLY", "code": 200}
@app.post("/admin/{user_id}/posts")
async def AdminDataPost(user_id: str):
return {"admin": user_id, "msg": "POSTED SUCCESS FULLY", "code": 200}
@app.get("/admin/{user_id}/posts")
async def AdminDataPost(user_id: str):
return {"admin": user_id, "msg": "GET SUCCESS FULLY", "code": 200}
class UserInputModel(BaseModel):
name: str
description: str
price: float
tax: float
@app.post("/user/{user_id}")
async def userDataPost(user_id, inputParam: UserInputModel):
print(inputParam)
return {"user": user_id, "msg": "FETCHED SUCCESS FULLY", "code": 200, "data": inputParam} |
985,635 | d67da19b48bc81dfa931c06a620a1e0e997c4aa2 | # 8.2从同一个目录导入蓝图对象
from info import db
from . import api
from flask import session, render_template, current_app, jsonify, request, g
from info.models import User, News, Category
from info.utls.response_code import RET
from info.utls.commons import login_required
# 8.3使用蓝图对象,项目首页展示
@api.route('/')
@login_required
def index():
"""首页用户信息的展示
1.尝试从redis中获取用户id
2.如果user_id存在,查询mysql,把用户信息传给模板
"""
# 因为user的用户信息已经存储在g对象中,想要拿到usr信息,
# 所以在把g.user赋值给user
user = g.user
# user_id = session.get('user_id')
# # 如果数据查询失败,返回None,避免date没有被定义
# date= None # 如果数据查询失败,返回None,避免date没有被定义
# user = None
#
#
#
# if user_id:
# try:
# # 通过id查用户信息
# user = User.query.get('user_id')
# except Exception as e:
# current_app.logger.error(e)
# return jsonify(error=RET.DBERR,errmsg='查询数据失败')
# 17.新闻点击排行,默认按照新闻的点击次数倒叙排序
try:
news_list =News.query.filter().order_by(News.clicks.desc()).limit(6)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg='查询数据异常')
# 判断查询结果是否存在
if not news_list:
return jsonify(errno= RET.NODATA,errmsg='无新闻数据')
# 定义容器列表用来存储遍历后的数据
news_dict_list = []
# 遍历查询结果,把查询到的对象转成可读字符串
for news in news_list:
news_dict_list.append(news.to_dict())
# 返回数据
# 首页显示分类信息,查询数据使用try,然后返回查询一次数据,
# 然后一定要判段查询结果,数据不存在,返回错误信息
try:
categories= Category.query,all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBEER,errmsg='查询分类异常')
# 判断查询结果
if not categories:
return jsonify(errno= RET.NODATA,errmsg='无分类数据')
# 定义容器存储查询结果
category_list = []
# 遍历查询结果
for category in categories:
category_list.append(category.to_dict())
# data作为一个字典传参,冒号左边类似三元式
data={
#设置用户信息的键 user的方法:是拿到user所有的键值对
'user_Info':user.to_dict()if user else None,
'news_dict_list':news_dict_list,
'category_list':category_list
}
# 返回渲染数据,将template改为模板格式
return render_template('news/index.html',data= data)
# 项目 logo图标展示
@api.route('/favicon.ico')
def favicon():
# 使用current_app调用发送静态文件的方法,把项目logo文件发给浏览器
return current_app.send_static_file('news/favicon.ico')
# 首页新闻列表,这部分为局部刷新,所以通过ajax局部请求,所以定义相应的接口
@api.route('/news_list')
def get_news_list():
"""
首页新闻列表数据展示
1.获取参数,cid新闻分类id、page总页数、per_page每页的条目数
2.检查参数,把参数转成int类型
3、判断分类id 是否大于1
4.根据新闻分类id查询数据库,按照新闻的发布时间排序,对排序结果进行分页
5.获取分页后,的新闻数据:总页数,当前页数,数据
6.返回结果
重要提示:新闻列表的展示,可能会有参数传入,也可能没有,所以需要有默认值,而且请求方式
是get,
:return:
"""
# 获取参数,请求方式是get,使用查询字符串获取参数,参数可有可无,s
# 所以1、1、10是默认数据,不传参数默认就是1、1、10
cid = request.args.get('cid','1')
page = request.args.get('page','1')
per_page = request.args.get('per_page','10')
# 检查参数,需要不参数转成int,转变数据类型,也要使用try
try:
cid,page,per_page = int(cid),int(page),int(per_page)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.PARAMERR,errmsg='参数格式错误')
# 定义过滤条件
filters = []
# 判断分类id是否是最新,就是是否大于1
if cid > 1:
filters.append(News.category_id == cid)
# =根据分类查询数据库
try:
#对过滤查询执行排序,堆排序结果进行分页
# *filters的作用就是传参,如果cid大于1,里面添加的就是具体参数
# 如果不大于1就是空值,就是显示最新的,所有的新闻列表按时间排序
# paginate 是返回分页后的一个对象,而不是具体数据
paginate = News.query.filter(*filters).order_by(News.create_time.desc()).paginate(page,per_page,False)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg= '查询新闻列表数据异常')
# 获取分页后的新闻数据,通过items、pages‘page,
news_list = paginate.items # 获取新闻数据,依旧是个对象
total_page = paginate.pages # 获取总页数
current_page = paginate.page #@ 获取当前页数
# 定义列表 ,用来存储 遍历news_list对象后的数据
news_dict_list = []
# 遍历分页后的新闻数据
# 怎样确定是否要遍历对象,看最后拿到的是否是具体的数据,而不是一个包含多条数据的对象
# 首先news_list 是一个包含多条新闻的对象,所以遍历它,
# 拿到每一条新闻的对象,然后通过to_dict拿到每一条的具体数据,
# 添加给news_dict_list 的空列表,
# 然后news_dict_list就是一个包含多个新闻对象的具体数据的列表
# 然后把多条数据 放到一个字典中返回
for news in news_list:
news_dict_list.append(news.to_dict())
# 定义data 字典
data= {
'news_dict_list':news_dict_list,
'totalpage': total_page,
'current_page':current_page
}
# 返回结果,把data返回给前端
return jsonify(errno= RET.OK,errmsg= 'ok',data=data)
# 新闻详情页面展示
@api.route('/<int:news_id>')
@login_required
def get_news_detail(news_id):
"""
新闻详情页面
1.尝试获取用户信息
2.根据news_Id查询mysql
3.判断查询数据结果
4.进入某一个新闻详情,需要修改该新闻的点击次数clicks + 1
5.构造字典直接,返回结果
:return:
"""
# 尝试获取用户信息,可能有,可能是空
user = g.user
# 根据news_id查询数据库
try:
# news = News.query.filter().first()
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg= '数据库查询异常')
# 判断结果
if not news:
return jsonify(errno=RET.NODATA,errmsg='该条新闻不存在')
# 让新闻 的点击次数加1
news.clicks += 1
# 提交数据
try:
db.session.add(news)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg= '保存数据失败')
# 构造字典
data={
'user': user.to_dict() if user else None,
'news_detail': news.to_dict()
}
# 返回数据到模板
return render_template('news/index.html',data=data)
# 实现取消和收藏
@api.route('/news_collect',methods=['POST'])
@ login_required
def news_collect():
"""
# 收藏和取消收藏
1.因为收藏必须是在用户登录的情况下,所以先尝试获取用户的信息
2.如果我未登录,直接返回
3.获取参数,news_id 和 action
4.检查参数
5.把news_id 转成int ,因为接受的json是字符串,所以要转成int,方便查询
6.判断action参数必须为collect和 cancel_collect
收藏和取消
7根据news_id 查询收藏列表中是否存在该新闻,存在返回
8.判断查询结果
9.判断用户选择的是收藏还是取消
10.返回结果
"""
# 尝试获取用户信息
user = g.user
# 判断用户是否存
if not user:
return jsonify(errno=RET.SESSIONERR,errmsg='用户未登录')
# 获取参数
news_id = request.json.get('news_id')
action = request.json.get('action')
# 检查完整性
if not all([news_id,action]):
return jsonify(errno=RET.PARAMERR,errmsg='参数不完整')
# 检查news_id格式 int
try:
news_id = int(news_id)
except Exception as e:
current_app.logger.erron(e)
return jsonify(errno=RET.PARAMERR,errmsg='参数格式错误')
# 检查action 是否是collect 或者 cancel_collect
if action not in ['collect','cancel_collect']
return jsonify(errno=RET.PARAMERR,errmsg='参数错误')
# 查询数据库根据news_id
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DATAERR,errmsg='查询数据库异常')
# 判断结果是否存在
if not news:
return jsonify(errno=RET.NODATA,errmsg='该新闻不存在')
# 判断用户选择的是收藏还是取消收藏
if action == 'collect':
# 判断用户之前未收藏过该新闻,则添加
if news not in user.collection_news:
user.collection_news.append(news)
else:
return user.collection_news.remove(news)
# 提交数据到数据库中
try:
db.session.add(user)
db.session.commit() # commit会提交所有对user数据的操作
except Exception as e :
current_app.logger.error(e)
# 提交数据不成功,记得要回滚到原来的数据
db.session.rollback()
return jsonify(errno=RET.DATAERR,errmsg='保存数据异常')
return jsonify(errno=RET.OK,errmsg='收藏成功') |
985,636 | 9ac910bf73575e1e4a8d51c263c7947c71be4b2e | from datetime import date, datetime, timedelta
from coinapi_service import coin_api_get_exchange_rates
import json
from os import path
'''
date1 = date.today() # date du jour
#date2 = date1 + timedelta(10) # additionner / soustraire des jours
date3 = date(2021, 1, 30)
diff = date1-date3
# print(diff.days)
date3_str = date3.strftime("%d/%m/%Y") # Y (Years) m (months) d (days)
date4 = datetime.strptime("2021-02-01", "%Y-%m-%d").date()
date4 += timedelta(1)
print(date4)
'''
date_end = date.today()
date_end_str = date_end.strftime("%Y-%m-%d")
date_start = date_end - timedelta(10)
date_start_str = date_start.strftime("%Y-%m-%d")
assets = "BTC/EUR"
data_filename = assets.replace("/", "_") + ".json"
def load_json_data_from_file(filename):
f = open(filename, "r")
json_data = f.read()
f.close()
return json_data
def save_json_data_to_file(filename, json_data):
f = open(filename, "w")
f.write(json_data)
f.close()
def get_json_rates(rates_data):
rates_json = []
for r in rates_data:
rates_json.append({"date": r["time_period_start"][:10], "value": r["rate_close"] })
return json.dumps(rates_json)
# date_start / date_end : date objects
# max_days : int
# start : 1/1/2021
# End : 27/5/2021
# -> [[1/1/2021, 10/04/2021], [11/04/2021, 27/5/2021]]
def get_dates_intervals(date_start, date_end, max_days):
diff = date_end-date_start
diff_days = diff.days
dates_intervals = []
interval_begin_date = date_start
while diff_days > 0:
nb_days_to_add = max_days-1
if diff_days < max_days-1:
nb_days_to_add = diff_days
interval_end_date = interval_begin_date + timedelta(nb_days_to_add)
dates_intervals.append([interval_begin_date, interval_end_date])
diff_days -= nb_days_to_add+1
interval_begin_date = interval_end_date + timedelta(1)
return dates_intervals
# config : date debut / date fin / assets
# 1 - lire fichier json (si il existe)
# date_debut / date_fin
# - fichier existe
# - saved_data_date_start / saved_data_date_end
# - load_json_data_from_file(...) -> json
rates = []
if path.exists(data_filename):
# le fichier json existe
json_rates = load_json_data_from_file(data_filename)
rates = json.loads(json_rates)
if len(rates) > 0:
saved_data_date_start_str = rates[0]["date"]
saved_data_date_end_str = rates[-1]["date"]
print("saved_data_date_start_str", saved_data_date_start_str)
print("saved_data_date_end_str", saved_data_date_end_str)
# Fichier json existe -> [ date_start/saved_data_date_start ] ... [date_end/saved_data_date_end]
# 3 - faire les appels à l'api (avant / après) <--- passer la limite des 100 jours
# 4 - Consolider les données (mettre à jour le fichier json)
else:
# Fichier json n'existe pas ou pas de data -> [date_start / date_end]
# 3 - faire les appels à l'api (avant / après) <--- passer la limite des 100 jours
# 4 - Consolider les données (mettre à jour le fichier json)
pass
print(get_dates_intervals(date(2021, 1, 1), date(2021, 5, 27), 100))
rates = coin_api_get_exchange_rates(assets, "2021-01-01", "2021-04-11")
print()
'''print("Date start", date_start_str)
print("Date end", date_end_str)
rates = coin_api_get_exchange_rates(assets, date_start_str, date_end_str)
if rates:
json = get_json_rates(rates)
save_json_data_to_file(data_filename, json)
print(json)
print(assets + ", nombre de cours:", len(rates))
for r in rates:
print(r["time_period_start"][:10], ":", r["rate_close"])
''' |
985,637 | 2268ccd366003d64a1af5d62e19344aea3a05b34 | from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^customers/$', customer_list),
url(r'^customers/', customer_detail),
url(r'^customers/age/', customer_list_age),
] |
985,638 | 0f6afec1a29283dfe311b8ce55c6dbd6d80be438 | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import pika
import sys
import config
CHUNK_SIZE=6
class MqMessage():
def __init__(self):
user = config.rabbitmq['user']
passwd = config.rabbitmq['passwd']
host = config.rabbitmq['host']
cred = pika.PlainCredentials(user, passwd)
params = pika.ConnectionParameters(host, credentials=cred)
self.connection = pika.BlockingConnection(params)
self.channel = self.connection.channel()
def getChannel(self):
return self.channel
def close(self):
self.connection.close()
def getConnection(self):
return self.connection
def send(self, msg, exchange=None, queueName=None):
if exchange is None:
exchange = config.rabbitmq['exchange_name']
if queueName is None:
queueName = config.rabbitmq['queue_name_callback']
print "exchange: {}".format(exchange)
print "queueName: {}".format(queueName)
print "message: {}".format(msg)
try:
self.channel.basic_publish(
exchange=exchange,
routing_key=queueName,
properties=pika.BasicProperties(
content_type='text/plain',
content_encoding='UTF-8'
),
body=msg)
except pika.exceptions.ConnectionClosed():
print("msg send failed")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def getQueue(self):
return self.queue
def setConsume(self, queueName=None):
def callback(ch, method, properties, body):
# body print to log
logBody = (body[:75] + '..') if len(body) > 75 else body
print(" [x] [{prop}]\tReceived: {body}".format(prop=properties,
body=logBody))
# chunk message to small pieces if big enough
import render
msgBody = render.Body(body, CHUNK_SIZE)
msgBody.genPmidList()
# big message
if msgBody.getPmidSize() >= CHUNK_SIZE:
for chunkMsg in msgBody.chunkPmids():
self.queue.put(chunkMsg)
# small message
else:
self.queue.put(body)
print "Queue Size: {}".format(self.queue.qsize())
if queueName == None:
queueName = config.rabbitmq['queue_name']
self.channel.basic_consume(
callback,
queue=queueName,
no_ack=True)
def start_consuming(self, queue=None):
if queue is None:
print "---mq.queue set as new queue---"
import multiprocessing
self.queue = multiprocessing.Queue()
else:
print "---mq.queue set as waiter.queue---"
self.queue = queue
self.channel.start_consuming()
def main():
exchange = config.rabbitmq['exchange_name']
queueName = config.rabbitmq['queue_name']
mqm = MqMessage()
jsonString = "[1,2,3,4,5,6,7,8,9,0]"
mqm.send(jsonString, exchange, queueName)
if __name__ == "__main__":
main()
|
985,639 | c38d57ad646cfb08b2c44adb22bc97e05a2f4636 | import os
import csv
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
def main():
"""Init tables and seed books.csv"""
db.execute("DROP TABLE IF EXISTS books_tmp;")
db.execute("DROP TABLE IF EXISTS books;")
db.execute("DROP TABLE IF EXISTS authors;")
db.execute("DROP TABLE IF EXISTS users;")
db.execute("DROP TABLE IF EXISTS reviews;")
db.execute("CREATE EXTENSION IF NOT EXISTS pgcrypto;")
db.execute("""
CREATE TEMP TABLE books_tmp (
id serial PRIMARY KEY,
isbn text NOT NULL UNIQUE CHECK (char_length(isbn) = 10),
title text NOT NULL,
author text NOT NULL,
year_pub int NOT NULL
);""")
db.execute("""
CREATE TABLE authors (
id serial PRIMARY KEY,
name text NOT NULL
);
""")
db.execute("""
CREATE TABLE books (
id serial PRIMARY KEY,
isbn text NOT NULL UNIQUE CHECK (char_length(isbn) = 10),
title text NOT NULL,
author_id int REFERENCES authors(id),
year_pub int NOT NULL
);
""")
db.execute("""
CREATE TABLE users (
id uuid NOT NULL DEFAULT gen_random_uuid() PRIMARY KEY,
email text NOT NULL UNIQUE,
pwhash text NOT NULL
);
""")
db.execute("""
CREATE TABLE reviews (
id serial PRIMARY KEY,
user_id uuid REFERENCES users(id),
book_id int REFERENCES books(id),
rating int NOT NULL CHECK (rating >= 1 AND rating <= 5),
review text NOT NULL
);
""")
users = [
{ "email": "thib@example.com", "password": "password123" },
{ "email": "pom@example.com", "password": "meuhmeuh" }
]
for user in users:
db.execute("""
INSERT INTO users (email, pwhash)
VALUES (:email, crypt(:password, gen_salt('bf', 8)));
""", { "email": user.get("email"), "password": user.get("password") })
with open("books.csv", "r") as f:
reader = csv.reader(f)
# skip header
next(reader)
for isbn, title, author, year in reader:
db.execute("""
INSERT INTO books_tmp (isbn, title, author, year_pub)
VALUES (:isbn, :title, :author, :year_pub);
""",
{"isbn": isbn, "title": title, "author": author, "year_pub": year})
db.execute("INSERT INTO authors (name) SELECT DISTINCT author FROM books_tmp;")
db.execute("""
INSERT INTO books (isbn, title, author_id, year_pub)
SELECT b.isbn, b.title, a.id author_id, b.year_pub
FROM books_tmp b JOIN authors a ON b.author = a.name
ORDER BY b.id;
""")
db.commit()
if __name__ == "__main__":
main()
|
985,640 | 6b91fd5752813205a9527ad8230b797ca2168328 | from odoo import api, fields, models, tools, _
from datetime import datetime, timedelta
from odoo.exceptions import UserError
import pytz
from odoo import SUPERUSER_ID
class PosMakePayment(models.TransientModel):
_inherit = 'pos.make.payment'
@api.multi
def type_cash(self):
cash = self.session_id.config_id.journal_ids.filtered(lambda journal: journal.type == 'cash')
if cash:
self.journal_id = cash[0]
if not cash:
raise UserError('Please define Cash journal')
return {
"type": "ir.actions.do_nothing",
}
@api.multi
def type_card(self):
card = self.session_id.config_id.journal_ids.filtered(
lambda journal: journal.type == 'bank' and not journal.is_pay_later)
if card:
self.journal_id = card[0]
if not card:
raise UserError('Please define Card journal')
return {
"type": "ir.actions.do_nothing",
}
|
985,641 | 6c2fb4d570326b7b7b2f75ef508d48096db4c9f6 | def delete(x,y,board):
"""
Function used to delete nums from board and replace them with 0
"""
board[x][y] = 0
def turn(board):
temp = [
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]
]
for i in range(len(board)):
for j in range(len(board)):
temp[i][j] = board[j][i]
return temp
def plot(x,y,board, val):
if(board[x][y] == 0):
board[x][y] = val |
985,642 | 9c638051584415bd480bda0d388ba13e2a775d01 | import numpy as np
from knodle.evaluation.majority import majority_sklearn_report
def test_majority_vote_no_match():
z = np.zeros((2, 4))
t = np.zeros((4, 2))
z[0, 0] = 1
z[1, 1] = 1
t[0, 0] = 1
t[1, 0] = 1
t[2, 0] = 1
t[3, 0] = 1
y = np.array([1, 1])
report = majority_sklearn_report(z, t, y)
assert report["accuracy"] == 0
def test_majority_vote_correctness():
z = np.zeros((2, 4))
t = np.zeros((4, 2))
z[0, 0] = 1
z[1, 1] = 1
t[0, 0] = 1
t[1, 1] = 1
t[2, 0] = 1
t[3, 0] = 1
y = np.array([1, 1])
report = majority_sklearn_report(z, t, y)
assert report["accuracy"] == 0.5
y = np.array([0, 1])
report = majority_sklearn_report(z, t, y)
assert report["accuracy"] == 1
def test_majority_vote_random_guess():
z = np.zeros((4, 4))
t = np.zeros((4, 2))
z[0, 0] = 1
z[1, 1] = 1
z[2, 0] = 1
t[0, 0] = 1
t[1, 1] = 1
t[2, 0] = 1
t[3, 0] = 1
# no info about sample 4, thus random value is chosen
# y_majority = np.array([0, 1, 0, random])
y = np.array([0, 1, 1, 1])
report = majority_sklearn_report(z, t, y)
assert 0.5 <= report["accuracy"] <= 0.75
|
985,643 | 2d33e2d0673aec1c6f385371e5e57779831a3603 | import numpy as np
from skimage.measure import profile_line
from skimage.measure.profile import _line_profile_coordinates as lpcoords
from skimage import io
from skimage import draw
import trace as tr
from matplotlib import pyplot as plt, cm
im = io.imread('zebrafish-spinal-cord.png')
img = np.dstack([np.zeros_like(im), im, np.zeros_like(im)]).astype(float) / im.max()
top_mode, top_width = tr.estimate_mode_width(im[0])
bottom_mode, bottom_width = tr.estimate_mode_width(im[-1])
overlay = np.ones_like(img)
rr, cc = draw.line(0, top_mode, im.shape[0] - 1, bottom_mode)
rect = lpcoords((0, top_mode), (im.shape[1] - 1, bottom_mode),
(top_width + bottom_width) / 2)
rcorners = np.rint(rect[0][[0, 0, -1, -1], [0, -1, -1, 0]]).astype(int)
ccorners = np.rint(rect[1][[0, 0, -1, -1], [0, -1, -1, 0]]).astype(int)
rrect, crect = draw.polygon(rcorners, ccorners, overlay.shape)
overlay[rrect, crect] = 0.5
overlay[rr, cc] = 0.0
fig, axes = plt.subplots(1, 3, figsize=(8, 3))
axes[0].imshow(img, interpolation='nearest')
axes[0].set_xticks([])
axes[0].set_yticks([])
axes[0].set_title('original image')
axes[1].imshow(1 - overlay, interpolation='nearest', cmap=cm.gray)
prof = profile_line(im, (0, top_mode), (im.shape[1] - 1, bottom_mode),
linewidth=(top_width + bottom_width) / 2, mode='reflect')
axes[1].set_xticks([])
axes[1].set_yticks([])
axes[1].set_title('measured overlay')
axes[2].plot(prof, lw=2, c='k')
axes[2].set_ylabel('mean intensity')
axes[2].set_xlabel('distance in pixels')
axes[2].set_ylim(0, 2000)
axes[2].set_xlim(0, 1100)
axes[2].set_yticks(np.arange(200, 2000, 400))
axes[2].set_xticks(np.arange(200, 1100, 400))
axes[2].set_title('intensity profile')
plt.tight_layout()
plt.savefig('fig2.png', bbox_inches='tight', dpi=600)
|
985,644 | 4a5fb33aca30d67ac5a9ec01f3318035bb5ca5ea | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
import math
from models_binary import OPS, constrain
PRIMITIVES_TRIPLE = ['0_plus_multiply', '0_plus_max', '0_plus_min', '0_plus_concat',
'0_multiply_plus', '0_multiply_max', '0_multiply_min', '0_multiply_concat',
'0_max_plus', '0_max_multiply', '0_max_min', '0_max_concat',
'0_min_plus', '0_min_multiply', '0_min_max', '0_min_concat',
'1_plus_multiply', '1_plus_max', '1_plus_min', '1_plus_concat',
'1_multiply_plus', '1_multiply_max', '1_multiply_min', '1_multiply_concat',
'1_max_plus', '1_max_multiply', '1_max_min', '1_max_concat',
'1_min_plus', '1_min_multiply', '1_min_max', '1_min_concat',
'2_plus_multiply', '2_plus_max', '2_plus_min', '2_plus_concat',
'2_multiply_plus', '2_multiply_max', '2_multiply_min', '2_multiply_concat',
'2_max_plus', '2_max_multiply', '2_max_min', '2_max_concat',
'2_min_plus', '2_min_multiply', '2_min_max', '2_min_concat',
'plus_plus', 'multiply_multiply', 'max_max', 'min_min', 'concat_concat',
]
PRIMITIVES_NAS = [0, 2, 4, 8, 16]
def ops_triple(triple, p, q, r):
if triple == 'plus_plus':
return OPS['plus'](OPS['plus'](p, q), r)
elif triple == 'multiply_multiply':
return OPS['plus'](OPS['plus'](p, q), r)
elif triple == 'max_max':
return OPS['max'](OPS['max'](p, q), r)
elif triple == 'min_min':
return OPS['min'](OPS['min'](p, q), r)
elif triple == 'concat_concat':
return OPS['concat'](OPS['concat'](p, q), r)
else:
ops = triple.split('_')
if ops[0] == '0':
return OPS[ops[2]](OPS[ops[1]](p, q), r)
elif ops[0] == '1':
return OPS[ops[2]](OPS[ops[1]](p, r), q)
elif ops[0] == '2':
return OPS[ops[2]](OPS[ops[1]](q, r), p)
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def MixedTriple(embedding_p, embedding_q, embedding_r, weights, FC):
return torch.sum(torch.stack([w * fc(ops_triple(primitive, embedding_p, embedding_q, embedding_r)) \
for w,primitive,fc in zip(weights,PRIMITIVES_TRIPLE,FC)]), 0)
class Virtue_Triple(nn.Module):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(Virtue_Triple, self).__init__()
self.num_ps = num_ps
self.num_qs = num_qs
self.num_rs = num_rs
self.embedding_dim = embedding_dim
self.reg = reg
self._PsEmbedding = nn.Embedding(num_ps, embedding_dim)
self._QsEmbedding = nn.Embedding(num_qs, embedding_dim)
self._RsEmbedding = nn.Embedding(num_rs, embedding_dim)
def compute_loss(self, inferences, labels, regs):
labels = torch.reshape(labels, [-1,1])
loss = F.mse_loss(inferences, labels)
return loss + regs
class NCF_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(NCF_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self._FC = nn.Linear(embedding_dim, 1, bias=False)
self._W = nn.Linear(3*embedding_dim, embedding_dim)
def forward(self, ps, qs, rs):
constrain(next(self._FC.parameters()))
constrain(next(self._W.parameters()))
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
gmf_out = ps_embedding * qs_embedding * rs_embedding
mlp_out = self._W(torch.cat([ps_embedding, qs_embedding, rs_embedding], dim=-1))
inferences = self._FC(F.relu(gmf_out + mlp_out))
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
class DeepWide_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(DeepWide_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self._FC = nn.Linear(3*embedding_dim, 1, bias=False)
def forward(self, ps, qs, rs):
constrain(next(self._FC.parameters()))
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
inferences = self._FC(torch.cat([ps_embedding, qs_embedding, rs_embedding], dim=-1))
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
class CP(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(CP, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self._FC = nn.Linear(embedding_dim, 1, bias=False)
def forward(self, ps, qs, rs):
constrain(next(self._FC.parameters()))
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
inferences = self._FC(ps_embedding * qs_embedding * rs_embedding)
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
class TuckER(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(TuckER, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
w = torch.empty(embedding_dim, embedding_dim, embedding_dim)
nn.init.xavier_uniform_(w)
self._W = torch.nn.Parameter(torch.tensor(w, dtype=torch.float, device='cuda', requires_grad=True))
def forward(self, ps, qs, rs):
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
W_after_p = torch.mm(ps_embedding, self._W.view(ps_embedding.size(1), -1))
W_after_p = W_after_p.view(-1, rs_embedding.size(1), qs_embedding.size(1))
W_after_r = torch.bmm(rs_embedding.view(-1,1,rs_embedding.size(1)), W_after_p)
W_after_q = torch.bmm(W_after_r, qs_embedding.view(-1,qs_embedding.size(1),1))
inferences = W_after_q.view(-1,1)
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
class NAS_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, arch, reg):
super(NAS_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self._FC = []
for i in range(len(arch)):
if i == 0:
self._FC.append(nn.Linear(3*embedding_dim, int(arch[i])))
else:
self._FC.append(nn.Linear(int(arch[i-1]), int(arch[i])))
self._FC.append(nn.ReLU())
if len(self._FC) == 0:
self._FC.append(nn.Linear(3*embedding_dim, 1, bias=False))
else:
self._FC.append(nn.Linear(arch[-1], 1, bias=False))
self._FC = nn.Sequential(*self._FC)
def forward(self, ps, qs, rs):
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
inferences = self._FC(torch.cat([ps_embedding, qs_embedding, rs_embedding], dim=-1))
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
class AutoNeural_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(AutoNeural_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self._FC = nn.Sequential(
nn.Linear(3*embedding_dim, 3*embedding_dim),
nn.Sigmoid(),
nn.Linear(3*embedding_dim, 1))
def forward(self, ps, qs, rs):
for p in self._FC.parameters():
if len(p.size()) == 1: continue
constrain(p)
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
inferences = self._FC(torch.cat([ps_embedding,qs_embedding,rs_embedding], dim=-1))
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
def embedding_parameters(self):
return list(self._PsEmbedding.parameters()) + list(self._QsEmbedding.parameters()) + \
list(self._RsEmbedding.parameters())
def mlp_parameters(self):
return self._FC.parameters()
class Network_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, arch, reg):
super(Network_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self.arch = arch
self.mlp_p = arch['mlp']['p']
self.mlp_q = arch['mlp']['q']
self.mlp_r = arch['mlp']['r']
if arch['triple'] == 'concat_concat':
self._FC = nn.Linear(3*embedding_dim, 1, bias=False)
elif 'concat' in arch['triple']:
self._FC = nn.Linear(2*embedding_dim, 1, bias=False)
else:
self._FC = nn.Linear(embedding_dim, 1, bias=False)
def parameters(self):
return list(self._PsEmbedding.parameters()) + list(self._QsEmbedding.parameters()) + \
list(self._RsEmbedding.parameters()) + list(self._FC.parameters())
def forward(self, ps, qs, rs):
constrain(next(self._FC.parameters()))
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
ps_embedding_trans = self.mlp_p(ps_embedding.view(-1,1)).view(ps_embedding.size())
qs_embedding_trans = self.mlp_q(qs_embedding.view(-1,1)).view(qs_embedding.size())
rs_embedding_trans = self.mlp_r(rs_embedding.view(-1,1)).view(rs_embedding.size())
inferences = self._FC(ops_triple(self.arch['triple'], ps_embedding_trans,
qs_embedding_trans, rs_embedding_trans))
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + \
torch.norm(rs_embedding))
return inferences, regs
class Network_Search_Triple(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(Network_Search_Triple, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
self._FC = nn.ModuleList()
for primitive in PRIMITIVES_TRIPLE:
if primitive == 'concat_concat':
self._FC.append(nn.Linear(3*embedding_dim, 1, bias=False))
elif 'concat' in primitive:
self._FC.append(nn.Linear(2*embedding_dim, 1, bias=False))
else:
self._FC.append(nn.Linear(embedding_dim, 1, bias=False))
self._initialize_alphas()
def _initialize_alphas(self):
self.mlp_p = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1)).cuda()
self.mlp_q = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1)).cuda()
self.mlp_r = nn.Sequential(
nn.Linear(1, 8),
nn.Tanh(),
nn.Linear(8, 1)).cuda()
self._arch_parameters = {}
self._arch_parameters['mlp'] = {}
self._arch_parameters['mlp']['p'] = self.mlp_p
self._arch_parameters['mlp']['q'] = self.mlp_q
self._arch_parameters['mlp']['r'] = self.mlp_r
self._arch_parameters['triple'] = Variable(torch.ones(len(PRIMITIVES_TRIPLE),
dtype=torch.float, device='cuda') / 2, requires_grad=True)
self._arch_parameters['triple'].data.add_(
torch.randn_like(self._arch_parameters['triple'])*1e-3)
def arch_parameters(self):
return list(self._arch_parameters['mlp']['p'].parameters()) + \
list(self._arch_parameters['mlp']['q'].parameters()) + \
list(self._arch_parameters['mlp']['r'].parameters()) + \
[self._arch_parameters['triple']]
def new(self):
model_new = Network_Search_Triple(self.num_ps, self.num_qs, self.num_rs, self.embedding_dim, self.reg).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data = y.data.clone()
try:
x.grad = y.grad.clone()
except:
pass
return model_new
def clip(self):
m = nn.Hardtanh(0, 1)
self._arch_parameters['triple'].data = m(self._arch_parameters['triple'])
def binarize(self):
self._cache = self._arch_parameters['triple'].clone()
max_index = self._arch_parameters['triple'].argmax().item()
for i in range(self._arch_parameters['triple'].size(0)):
if i == max_index:
self._arch_parameters['triple'].data[i] = 1.0
else:
self._arch_parameters['triple'].data[i] = 0.0
def recover(self):
self._arch_parameters['triple'].data = self._cache
del self._cache
def forward(self, ps, qs, rs):
for i in range(len(PRIMITIVES_TRIPLE)):
constrain(next(self._FC[i].parameters()))
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
ps_embedding_trans = self._arch_parameters['mlp']['p'](ps_embedding.view(-1,1)).view(ps_embedding.size())
qs_embedding_trans = self._arch_parameters['mlp']['q'](qs_embedding.view(-1,1)).view(qs_embedding.size())
rs_embedding_trans = self._arch_parameters['mlp']['r'](rs_embedding.view(-1,1)).view(rs_embedding.size())
# the weight is already binarized
assert self._arch_parameters['triple'].sum() == 1.
inferences = MixedTriple(ps_embedding_trans, qs_embedding_trans, rs_embedding_trans,
self._arch_parameters['triple'], self._FC)
regs = self.reg * (torch.norm(ps_embedding) + torch.norm(qs_embedding) + torch.norm(rs_embedding))
return inferences, regs
def genotype(self):
genotype = PRIMITIVES_TRIPLE[self._arch_parameters['triple'].argmax().cpu().numpy()]
genotype_p = F.softmax(self._arch_parameters['triple'], dim=-1)
return genotype, genotype_p.cpu().detach()
def step(self, p_train, q_train, r_train, labels_train, p_valid, q_valid,
r_valid, labels_valid, lr, arch_optimizer, unrolled):
self.zero_grad()
arch_optimizer.zero_grad()
# binarize before forward propagation
self.binarize()
if unrolled:
loss = self._backward_step_unrolled(p_train, q_train, r_train,
labels_train, p_valid, q_valid, r_valid, labels_valid, lr)
else:
loss = self._backward_step(p_valid, q_valid, r_valid, labels_valid)
# restore weight before updating
self.recover()
arch_optimizer.step()
return loss
def _backward_step(self, p_valid, q_valid, r_valid, labels_valid):
inferences, regs = self(p_valid, q_valid, r_valid)
loss = self.compute_loss(inferences, labels_valid, regs)
loss.backward()
return loss
def _backward_step_unrolled(self, p_train, q_train, r_train, labels_train,
p_valid, q_valid, r_valid, labels_valid, lr):
unrolled_model = self._compute_unrolled_model(
p_train, q_train, r_train, labels_train, lr)
unrolled_inference, unrolled_regs = unrolled_model(p_valid, q_valid, r_valid)
unrolled_loss = unrolled_model.compute_loss(unrolled_inference, labels_valid, unrolled_regs)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
vector = [v.grad for v in unrolled_model.parameters()]
implicit_grads = self._hessian_vector_product(vector, p_train, q_train, r_train, labels_train)
for g, ig in zip(dalpha,implicit_grads):
g.sub_(lr, ig)
for v, g in zip(self.arch_parameters(), dalpha):
v.grad = g.clone()
return unrolled_loss
def _compute_unrolled_model(self, p_train, q_train, r_train, labels_train, lr):
inferences, regs = self(p_train, q_train, r_train)
loss = self.compute_loss(inferences, labels_train, regs)
theta = _concat(self.parameters())
dtheta = _concat(torch.autograd.grad(loss, self.parameters())) + \
self.reg * theta
unrolled_model = self._construct_model_from_theta(
theta.sub(lr, dtheta))
return unrolled_model
def _construct_model_from_theta(self, theta):
model_new = self.new()
model_dict = self.state_dict()
params, offset = {}, 0
for k,v in self.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, p_train, q_train, r_train, labels_train, r=1e-2):
R = r / _concat(vector).norm()
for p,v in zip(self.parameters(), vector):
p.data.add_(R, v)
inferences, regs = self(p_train, q_train, r_train)
loss = self.compute_loss(inferences, labels_train, regs)
grads_p = torch.autograd.grad(loss, self.arch_parameters())
for p,v in zip(self.parameters(), vector):
p.data.sub_(2*R, v)
inferences, regs = self(p_train, q_train, r_train)
loss = self.compute_loss(inferences, labels_train, regs)
grads_n = torch.autograd.grad(loss, self.arch_parameters())
for p,v in zip(self.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x,y in zip(grads_p,grads_n)]
|
985,645 | 255a279a11aa5f61f84e959858e7001cae3114d4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: source/encoders/attention.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from source.utils.misc import sequence_mask
class Attention(nn.Module):
"""
Attention
"""
def __init__(self,
query_size,
memory_size=None,
hidden_size=None,
mode="mlp",
return_attn_only=False,
project=False):
super(Attention, self).__init__()
assert (mode in ["dot", "general", "mlp"]), (
"Unsupported attention mode: {mode}"
)
self.query_size = query_size
self.memory_size = memory_size or query_size
self.hidden_size = hidden_size or query_size
self.mode = mode
self.return_attn_only = return_attn_only
self.project = project
if mode == "general":
self.linear_query = nn.Linear(
self.query_size, self.memory_size, bias=False)
elif mode == "mlp":
self.linear_query = nn.Linear(
self.query_size, self.hidden_size, bias=True)
self.linear_memory = nn.Linear(
self.memory_size, self.hidden_size, bias=False)
self.tanh = nn.Tanh()
self.v = nn.Linear(self.hidden_size, 1, bias=False)
self.softmax = nn.Softmax(dim=-1)
if self.project:
self.linear_project = nn.Sequential(
nn.Linear(in_features=self.hidden_size + self.memory_size,
out_features=self.hidden_size),
nn.Tanh())
def __repr__(self):
main_string = "Attention({}, {}".format(self.query_size, self.memory_size)
if self.mode == "mlp":
main_string += ", {}".format(self.hidden_size)
main_string += ", mode='{}'".format(self.mode)
if self.project:
main_string += ", project=True"
main_string += ")"
return main_string
def forward(self, query, memory, mask=None):
"""
query: Tensor(batch_size, query_length, query_size)
memory: Tensor(batch_size, memory_length, memory_size)
mask: Tensor(batch_size, memory_length)
"""
if self.mode == "dot":
assert query.size(-1) == memory.size(-1)
# (batch_size, query_length, memory_length)
attn = torch.bmm(query, memory.transpose(1, 2))
elif self.mode == "general":
assert self.memory_size == memory.size(-1)
# (batch_size, query_length, memory_size)
key = self.linear_query(query)
# (batch_size, query_length, memory_length)
attn = torch.bmm(key, memory.transpose(1, 2))
else:
# (batch_size, query_length, memory_length, hidden_size)
hidden = self.linear_query(query).unsqueeze(
2) + self.linear_memory(memory).unsqueeze(1)
key = self.tanh(hidden)
# (batch_size, query_length, memory_length)
attn = self.v(key).squeeze(-1)
if mask is not None:
# (batch_size, query_length, memory_length)
mask = mask.unsqueeze(1).repeat(1, query.size(1), 1)
attn.masked_fill_(mask, -float("inf"))
# (batch_size, query_length, memory_length)
weights = self.softmax(attn)
if self.return_attn_only:
return weights
# (batch_size, query_length, memory_size)
weighted_memory = torch.bmm(weights, memory)
if self.project:
project_output = self.linear_project(
torch.cat([weighted_memory, query], dim=-1))
return project_output, weights
else:
return weighted_memory, weights
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
|
985,646 | 35fef18599254891ac8cbe3cc62e8e15e90d3926 | def is_prime(num):
i=1
factors=0
while i<num:
if num%i==0:
factors=factors+1
i=i+1
if factors==1:
return True
if factors>=1:
return False |
985,647 | 5e0b54757f05feb78d4142f5e51b929c441d20bc | from selenium import webdriver
import time
class DouyuSpider(object):
def __init__(self):
self.option = webdriver.ChromeOptions()
self.option.binary_location = r'D:\谷歌浏览器\Google\Chrome\Application\chrome.exe'
self.driver = webdriver.Chrome(chrome_options=self.option)
self.start_url = "https://www.douyu.com/directory/all"
def get_content_list(self):
li_list = self.driver.find_elements_by_xpath("//ul[@class='layout-Cover-list']/li")
content_list = []
for li in li_list:
item ={}
item["room_img"] = li.find_element_by_xpath(".//img[@class='DyImg-content is-normal']").get_attribute("src")
item["room_fenlei"] = li.find_element_by_xpath(".//h3[@class='DyListCover-intro']").get_attribute("title")
item["room_title"] = li.find_element_by_xpath(".//h3[@class='DyListCover-intro']").get_attribute("title")
item['anchor_name'] = li.find_element_by_xpath(".//h2[@class='DyListCover-user']").text
item["room_hot"] = li.find_element_by_xpath(".//span[@class='DyListCover-hot']").text
next_url = self.driver.find_elements_by_xpath(".//span[@class='dy-Pagination-item-custom']")
next_url = next_url[0] if len(next_url)>0 else None
print(item)
content_list.append(item)
return content_list, next_url
def save_content_list(self,content_list):
with open('斗鱼房间信息/douyu.txt','w', encoding='urf-8') as f:
f.write(content_list)
print('保存成功')
def run(self):
# 1.star_url
# 2.发送请求
self.driver.get(self.start_url)
# 3.提取数据,提取下一页元素
content_list, next_url = self.get_content_list()
# 4.保存数据
self.save_content_list(content_list)
# 5.点击下一页元素,进入循环
while next_url is not None:
next_url.click()
content_list, next_url = self.get_content_list()
self.save_content_list()
if __name__ == '__main__':
douyu = DouyuSpider()
douyu.run()
|
985,648 | aec0726b90feffea01e6f7a1292415bde88c4b3f | """
Morgan Christensen
Simple calculator project to get used to tkinter
"""
from tkinter import *
# number storage
numbers = []
# functions
def add():
numbers.append(e.get())
global math
math = "addition"
# print(numbers)
e.delete(0, END)
def subtract():
numbers.append(e.get())
global math
math = "subtraction"
# print(numbers)
e.delete(0, END)
def multiply():
numbers.append(e.get())
global math
math = "multiplication"
# print(numbers)
e.delete(0, END)
def divide():
numbers.append(e.get())
global math
math = "division"
# print(numbers)
e.delete(0, END)
def entry(number):
cur_num = e.get()
e.delete(0, END)
e.insert(0, str(cur_num) + str(number))
def equal():
if math == "addition":
numbers.append(e.get())
# print(numbers)
e.delete(0, END)
answer = int(numbers[0]) + int(numbers[1])
e.insert(0, str(answer))
numbers.clear()
elif math == "subtraction":
numbers.append(e.get())
# print(numbers)
e.delete(0, END)
answer = int(numbers[0]) - int(numbers[1])
e.insert(0, str(answer))
numbers.clear()
elif math == "multiplication":
numbers.append(e.get())
# print(numbers)
e.delete(0, END)
answer = int(numbers[0]) * int(numbers[1])
e.insert(0, str(answer))
numbers.clear()
elif math == "division":
numbers.append(e.get())
# print(numbers)
e.delete(0, END)
answer = int(numbers[0]) / int(numbers[1])
e.insert(0, str(answer))
numbers.clear()
def clear():
numbers.clear()
e.delete(0, END)
calc = Tk()
calc.title("Simple Calculator")
# Entry used as label for the numbers selected
e = Entry(calc, width=50, borderwidth=5)
e.grid(row=0, column=0, columnspan=3, padx=10, pady=10)
# Number Buttons
b1 = Button(calc, text="1", padx=40, pady=20, command=lambda: entry(1))
b2 = Button(calc, text="2", padx=40, pady=20, command=lambda: entry(2))
b3 = Button(calc, text="3", padx=40, pady=20, command=lambda: entry(3))
b4 = Button(calc, text="4", padx=40, pady=20, command=lambda: entry(4))
b5 = Button(calc, text="5", padx=40, pady=20, command=lambda: entry(5))
b6 = Button(calc, text="6", padx=40, pady=20, command=lambda: entry(6))
b7 = Button(calc, text="7", padx=40, pady=20, command=lambda: entry(7))
b8 = Button(calc, text="8", padx=40, pady=20, command=lambda: entry(8))
b9 = Button(calc, text="9", padx=40, pady=20, command=lambda: entry(9))
b0 = Button(calc, text="0", padx=40, pady=20, command=lambda: entry(0))
# Number buttons grid
b1.grid(row=3, column=0)
b2.grid(row=3, column=1)
b3.grid(row=3, column=2)
b4.grid(row=2, column=0)
b5.grid(row=2, column=1)
b6.grid(row=2, column=2)
b7.grid(row=1, column=0)
b8.grid(row=1, column=1)
b9.grid(row=1, column=2)
b0.grid(row=4, column=0)
# Function Buttons
b_add = Button(calc, text="+", padx=39, pady=20, command=lambda: add())
b_subtract = Button(calc, text="-", padx=39, pady=20, command=lambda: subtract())
b_multiply = Button(calc, text="x", padx=39, pady=20, command=lambda: multiply())
b_divide = Button(calc, text="/", padx=39, pady=20, command=lambda: divide())
b_equal = Button(calc, text="=", padx=39, pady=20, command=lambda: equal())
b_clear = Button(calc, text="Clear", padx=135, pady=20, command=lambda: clear())
# Function button grid
b_add.grid(row=4, column=1)
b_subtract.grid(row=1, column=3)
b_multiply.grid(row=2, column=3)
b_divide.grid(row=3, column=3)
b_equal.grid(row=4, column=2)
b_clear.grid(row=5, columnspan=3, column=0)
calc.mainloop()
|
985,649 | 98cbdcca3c3706a8ebf59d02b1e2afe1f170201b | # Generated by Django 3.0.8 on 2020-07-24 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todolist', '0002_todo_duesness'),
]
operations = [
migrations.RenameField(
model_name='todo',
old_name='duesness',
new_name='dueness',
),
migrations.AddField(
model_name='todo',
name='progress',
field=models.TextField(default='notStarted'),
),
]
|
985,650 | 416e075527b4b00af2606006a521ea0017f3f919 | import asyncio
import pytest
from PLATER.services.util.overlay import Overlay
@pytest.fixture()
def graph_interface_apoc_supported():
class MockGI:
def supports_apoc(self):
return True
async def run_apoc_cover(self, idlist):
return [{
'result': [
{
'source_id': 'NODE:0',
'target_id': 'NODE:2',
'edge': {
'type': 'biolink:related_to',
'id': 'SUPPORT_EDGE_KG_ID_1'
}
}, {
'source_id': 'NODE:00',
'target_id': 'NODE:22',
'edge': {
'type': 'biolink:related_to',
'id': 'SUPPORT_EDGE_KG_ID_2'
}
}, { # Edge relating two nodes from different answers
# we should expect this NOT to be in response.
'source_id': 'NODE:0',
'target_id': 'NODE:22',
'edge': {
'type': 'biolink:related_to',
'id': 'SUPPORT_EDGE_KG_ID_3'
}
}
]
}]
return MockGI()
@pytest.fixture()
def graph_interface_apoc_unsupported():
class MockGI:
def supports_apoc(self):
return True
return MockGI()
@pytest.fixture()
def reasoner_json():
return {
# Although this is not particularly useful in testing...
'query_graph': {
'nodes': [
{'id': 'n0', 'type': 'type'},
{'id': 'n1', 'type': 'type'},
{'id': 'n2', 'type': 'type'}
],
'edges': [
{'id': 'e0', 'source_id': 'n0', 'target_id': 'n1'},
{'id': 'e1', 'source_id': 'n1', 'target_id': 'n2'},
]
},
# Knowledge_graph Here also we don't really care about what was in
# kg
'knowledge_graph':
{
'nodes': [],
'edges': []
},
'results': [
{
'node_bindings': [
{'qg_id': 'n0', 'kg_id': 'NODE:0'},
{'qg_id': 'n1', 'kg_id': 'NODE:1'},
{'qg_id': 'n2', 'kg_id': 'NODE:2'},
],
'edge_bindings': [
{'qg_id': 'e0', 'kg_id': 'EDGE:0'},
{'qg_id': 'e1', 'kg_id': 'EDGE:1'},
{'qg_id': 'e2', 'kg_id': 'EDGE:2'},
]
},
{
'node_bindings': [
{'qg_id': 'n0', 'kg_id': 'NODE:00'},
{'qg_id': 'n1', 'kg_id': 'NODE:11'},
{'qg_id': 'n2', 'kg_id': 'NODE:22'},
],
'edge_bindings': [
{'qg_id': 'e0', 'kg_id': 'EDGE:00'},
{'qg_id': 'e1', 'kg_id': 'EDGE:11'},
{'qg_id': 'e2', 'kg_id': 'EDGE:22'},
]
},
{
'node_bindings': [
{'qg_id': 'n0', 'kg_id': 'NODE:000'},
{'qg_id': 'n1', 'kg_id': 'NODE:111'},
{'qg_id': 'n2', 'kg_id': 'NODE:222'},
],
'edge_bindings': [
{'qg_id': 'e0', 'kg_id': 'EDGE:000'},
{'qg_id': 'e1', 'kg_id': 'EDGE:111'},
{'qg_id': 'e2', 'kg_id': 'EDGE:222'},
]
,
}
]
}
def test_overlay_adds_support_bindings(graph_interface_apoc_supported, reasoner_json):
ov = Overlay(graph_interface=graph_interface_apoc_supported)
event_loop = asyncio.get_event_loop()
response = event_loop.run_until_complete(ov.overlay_support_edges(reasoner_json))
edges = response['knowledge_graph']['edges']
edge_ids = list(map(lambda edge: edge['id'], edges))
assert len(edge_ids) == 2
assert 'SUPPORT_EDGE_KG_ID_1' in edge_ids
assert 'SUPPORT_EDGE_KG_ID_2' in edge_ids
checked = False
for answer in response['results']:
all_node_ids = list(map(lambda x: x['kg_id'], answer['node_bindings']))
all_edge_kg_ids = list(map(lambda x: x['kg_id'], answer['edge_bindings']))
if ('NODE:0' in all_node_ids and 'NODE:2' in all_node_ids) \
or ('NODE:00' in all_node_ids and 'NODE:22' in all_node_ids):
assert 'SUPPORT_EDGE_KG_ID_1' in all_edge_kg_ids or 'SUPPORT_EDGE_KG_ID_2' in all_edge_kg_ids
checked = True
assert checked
|
985,651 | 669ae5e6eea2ba099ac0903a3534bb75e71e339a | import time
s=0
m=0
while s<=60:
print m, 'Minutes', s, 'Seconds'
time.sleep(1) #program stops for 1 second
s+=1
if s == 60:
m+=1
s=0
elif m == 60:
m=0
s=0 |
985,652 | 9df01959d226cc1eb6ccdf0e16006be37274ec9c | """
Jesse@FDU-VTS-MIA
created by 2019/11/26
"""
class Solution:
def longestPalindrome(self, s: str) -> str:
ans = self.expand_center(s)
return ans
def dp(self, s):
"""
TLE
"""
n = len(s)
if not n: return ""
P = [[False for j in range(n)] for i in range(n)]
start = 0
maxL = 1
for i in range(n):
P[i][i] = True
if i < n - 1 and s[i] == s[i + 1]:
P[i][i + 1] = True
start = i
maxL = 2
for l in range(3, n + 1, 1):
for i in range(n - l + 1):
P[i][i + l - 1] = (P[i + 1][i + l - 2] and s[i] == s[i + l - 1])
if P[i][i + l - 1] and l > maxL:
maxL = l
start = i
return s[start:start + maxL]
def expand_center(self, s):
def expand_func(s, l, r):
left = l
right = r
while left >= 0 and right < len(s) and s[left] == s[right]:
left -= 1
right += 1
return right - left - 1
n = len(s)
if not n: return ""
end = start = 0
for i in range(n):
len1 = expand_func(s, i, i) # odd
len2 = expand_func(s, i, i+1) # even
len_ = max(len1, len2)
if len_ > end - start:
start = i - (len_-1)//2
end = i + len_//2
return s[start: end+1]
|
985,653 | 58a879dd27bc12ccf2d073c43092e691568f9c21 | N = int(input('Quantos alunos? '))
students = {}
for i in range(1, N+1):
name = input(f'Nome do aluno {i}: ')
notas = []
for j in range(1, 5):
nota = float(input(f'{j}ª Nota do aluno {name}: '))
notas.append(nota)
students[name] = notas
for name, notas in students.items():
average = sum(notas) / len(notas)
result = 'aprovado' if average >= 7.0 else 'reprovado'
print(f'O aluno {name} foi {result} com média {average:.1f}')
|
985,654 | eef3bd17f539f9e98be515c5175279aa4a223e38 | from flask import Flask
import os
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
import cv2
from class_CNN import NeuralNetwork
from class_PlateDetection import PlateDetector
# Load pretrained model
########### INIT ###########
# Initialize the plate detector
plateDetector = PlateDetector(type_of_plate='RECT_PLATE',
minPlateArea=4500,
maxPlateArea=30000)
# Initialize the Neural Network
myNetwork = NeuralNetwork(modelFile="model/binary_128_0.50_ver3.pb",
labelFile="model/binary_128_0.50_labels_ver2.txt")
UPLOAD_FOLDER = os.path.dirname(os.path.realpath(__file__)) + '/uploads'
print(UPLOAD_FOLDER)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'bmp'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def hello_world():
return 'Plate recognition service'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/detect', methods=['POST'])
def upload_file():
# check if the post request has the file part
if 'file' not in request.files:
return ''
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return ''
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
img = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename))
possible_plates = plateDetector.find_possible_plates(img)
if possible_plates is not None:
for i, p in enumerate(possible_plates):
chars_on_plate = plateDetector.char_on_plate[i]
recognized_plate, _ = myNetwork.label_image_list(chars_on_plate, imageSizeOuput=128)
return recognized_plate
return ''
if __name__ == "__main__":
app.run(host='0.0.0.0') |
985,655 | 16385e255196882b33d297809f3086fa4516ccb2 | import asyncio
import aiohttp
import sys
import json
import random
def generate_cmd(cmd, id_, timestamp):
return {"cmd": cmd,
"id": id_,
"expire_time": timestamp}
async def test_client(loop, account_number, node_id):
basic_auth = aiohttp.BasicAuth(account_number, "imapassord")
session = aiohttp.ClientSession(auth=basic_auth)
ws = await session.ws_connect('http://localhost:8080/receptor-controller')
async def periodic_writer():
await asyncio.sleep(2)
while True:
print("writing")
await ws.send_str(f"ROUTE:{node_id}:timestamp")
delay_msecs = random.randrange(100, 1000) / 1000
await asyncio.sleep(delay_msecs)
loop.create_task(periodic_writer())
while True:
print("here")
msg = await ws.receive()
print("there")
#print("type(msg):", type(msg))
#print("dir(msg):", dir(msg))
if msg.type == aiohttp.WSMsgType.text:
if msg.data[:2] == "HI":
print("Gotta HI...")
print("Sending HI...")
await ws.send_str(json.dumps(generate_cmd("HI", node_id, "timestamp")))
#await ws.send_str("ROUTE:node-x:timestamp")
if msg.data == 'close':
print("CLOSE!")
await ws.close()
break
else:
print("recv:", msg.data)
# await ws.send_str(msg.data + '/answer')
elif msg.type == aiohttp.WSMsgType.closed:
print("WSMsgType.closed")
break
elif msg.type == aiohttp.WSMsgType.error:
print("WSMsgType.error")
break
if __name__ == "__main__":
loop = asyncio.new_event_loop()
coros = [test_client(loop, "%02d"%i, "node_%02d"%i) for i in range(int(sys.argv[1]), int(sys.argv[2]))]
loop.run_until_complete(asyncio.wait(coros))
#task = loop.create_task(test_client(loop, sys.argv[1], sys.argv[2]))
#loop.run_until_complete(coros)
|
985,656 | 9a2773c39713bb3b7568c05e8b1bcecd66bcb960 | from ..consts import NOTHING # noqa
from .config import Config, ParameterError # noqa
from .parameter_types import String, Integer # noqa
from .spec import Spec # noqa
|
985,657 | d0d2f36f03c2b06f76e3e4f0e3835487a194cd11 | """
Basic Views for the Inventory System
"""
from django.shortcuts import render
#front page
def inventory_greeter(request):
template = 'inventory/inventory_greeter.html'
context = {}
return render(request, template, context)
|
985,658 | 394378bd14739d374c7b1e70699fe78aabb47164 | #!/usr/bin/env python3
"""
Module for processing the Human Phenotype Ontology.
"""
__author__ = 'Orion Buske (buske@cs.toronto.edu)'
import os
import sys
import re
import logging
logger = logging.getLogger(__name__)
class HPError(Exception):
pass
class HPObsoleteError(HPError):
pass
def get_descendants(root, acc=None):
"""Add to acc all descendants of root"""
if acc is None:
acc = set()
acc.add(root)
for child in root.children:
get_descendants(child, acc)
return acc
def get_ancestors(root, acc=None):
if acc is None:
acc = set()
acc.add(root)
for parent in root.parents:
get_ancestors(parent, acc)
return acc
class HPNode(object):
"""HPO graph node
Attributes:
id
name
parents (HP objects, filled in by HPO)
children (HP objects, filled in by HPO)
alts (HP terms)
"""
def __init__(self, lines):
self.parents = set()
self.children = set()
self.alts = set()
self._parent_hps = set()
for line in lines:
line = line.strip()
field, value = line.split(': ', 1)
if field == 'is_obsolete':
raise HPObsoleteError()
elif field == 'id':
assert value.startswith('HP:') and value[-1].isdigit(), value
self.id = value
elif field == 'name':
if '{' in value:
value = value[:value.index('{')].strip()
self.name = value
elif field == 'alt_id':
assert value.startswith('HP:') and value[-1].isdigit(), value
self.alts.add(value)
elif field == 'is_a':
hp = value.split('!')[0].strip()
assert hp.startswith('HP:') and hp[-1].isdigit(), value
self._parent_hps.add(hp)
try:
assert self.id
assert self.name
if self.id != 'HP:0000001':
assert self._parent_hps
except:
logger.error("Error parsing TERM:\n{}".format('\n'.join(lines)))
raise
def __str__(self):
return str(self.id)
def __repr__(self):
return str(self)
def link(self, hps):
"""Link to objects for parents and children, given lookup dict"""
for hp in self._parent_hps:
parent = hps[hp]
self.parents.add(parent)
parent.children.add(self)
def is_root(self):
return len(self._parent_hps) == 0
def ancestors(self):
return get_ancestors(self)
def _iter_hp_terms(reader):
term_lines = None
for line in reader:
line = line.strip()
if not line: continue
if line == '[Term]':
if term_lines:
yield term_lines
term_lines = []
elif line.startswith('[') and line.endswith(']'):
if term_lines:
yield term_lines
# Entering a non-term block, do not parse
term_lines = None
else:
if term_lines is not None:
term_lines.append(line)
if term_lines:
yield term_lines
class HPO(object):
"""HPO graph
Attributes:
version
hps: {hp -> HP}
root: HP
"""
def __init__(self, filename, new_root=None):
"""Load the HPO ontology specified in the OBO file 'filename'.
If new_root is specified (an HPO term ID), the HPO will be truncated to
only include that node and descendants.
"""
self.hps = {}
self.root = None
logger.info("Parsing graph...")
roots = []
with open(filename, encoding='utf-8') as ifp:
version_str = ifp.readline().strip()
assert version_str.startswith('format-version')
self.version = version_str.split(': ')[1]
logger.info("HPO version: {}".format(self.version))
for lines in _iter_hp_terms(ifp):
try:
hp = HPNode(lines)
except HPError:
continue
except:
logger.error('Error parsing term: {}'.format(lines))
raise
if hp.is_root():
roots.append(hp)
# Relate all alt HP ids to object
self.hps[hp.id] = hp
for hpid in hp.alts:
assert hpid not in self.hps
self.hps[hpid] = hp
# Connect network of HP objects
nodes = set(self.hps.values())
for node in nodes:
node.link(self.hps)
logger.info("Found {:d} HP nodes ({:d} terms) in graph".format(len(nodes), len(self.hps)))
logger.debug("Here are 5:")
for i, k in zip(list(range(5)), nodes):
logger.debug(" {:d}: {}".format(i, k))
if len(roots) == 1:
self.root = roots[0]
else:
logger.warning("Warning: found {:d} root nodes, leaving root as None".format(len(roots)))
self.root = None
if new_root:
assert new_root in self.hps
self.filter_to_descendants(new_root)
def filter_to_descendants(self, root_hp):
root = self.hps[root_hp]
safe_nodes = get_descendants(root)
logger.info("Filtering to the {:d} nodes descendant of {} ({})...".format(len(safe_nodes), root_hp, root.name))
hps = {}
safe_hps = set()
for node in safe_nodes:
safe_hps.add(node.id)
safe_hps.update(node.alts)
if node.id in hps:
logger.warning('Found duplicate of HP term:' + node.id)
hps[node.id] = node
for alt_hp in node.alts:
if alt_hp in hps:
logger.warning('Found duplicate of HP term:' + alt_hp)
else:
hps[alt_hp] = node
# Reset all connections in network
for node in safe_nodes:
node.parents.clear()
node.children.clear()
node._parent_hps.intersection_update(safe_hps)
# Re-link
for node in safe_nodes:
node.link(self.hps)
# Replace attributes
self.root = root
self.hps = hps
def __getitem__(self, key):
return self.hps[key]
def __iter__(self):
return iter(set(self.hps.values()))
def __len__(self):
return len(set(self.hps.values()))
def descendant_terms(self, root_hp):
root = self.hps[root_hp]
descendants = get_descendants(root)
terms = set()
for node in descendants:
terms.add(node.id)
terms.update(node.alts)
return terms
def script(hpo_filename):
hpo = HPO(hpo_filename)
# hpo.filter_to_descendants('HP:0000118')
for line in sys.stdin:
hpoid = line.strip()
if hpoid:
print(hpoid, hpo[hpoid].name)
def parse_args(args):
from argparse import ArgumentParser
description = __doc__.strip()
parser = ArgumentParser(description=description)
parser.add_argument('hpo_filename', metavar='hp.obo')
return parser.parse_args()
def main(args=sys.argv[1:]):
args = parse_args(args)
script(**vars(args))
if __name__ == '__main__':
sys.exit(main())
|
985,659 | 215f0e526142298bff10da363b3b7c0eff11c282 | from __future__ import division
import pandas as pd
import numpy as np
from preprocess_data import update_with_cc_means
# from generate_features import cat_to_binary
from sklearn import preprocessing, cross_validation, svm, metrics, tree, decomposition, svm
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier, OrthogonalMatchingPursuit, RandomizedLogisticRegression
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import ParameterGrid
from sklearn.metrics import *
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_recall_curve, recall_score, auc, f1_score
import random
import pylab as pl
import matplotlib.pyplot as plt
from scipy import optimize
import time
import csv
def define_clfs_params():
clfs = {'RF': RandomForestClassifier(n_estimators=50, n_jobs=-1),
'ET': ExtraTreesClassifier(n_estimators=10, n_jobs=-1, criterion='entropy'),
'AB': AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200),
'LR': LogisticRegression(penalty='l1', C=1e5),
'SVM': svm.SVC(kernel='linear', probability=True, random_state=0),
'GB': GradientBoostingClassifier(learning_rate=0.05, subsample=0.5, max_depth=6, n_estimators=10),
'NB': GaussianNB(),
'DT': DecisionTreeClassifier(),
'SGD': SGDClassifier(loss="hinge", penalty="l2"),
'KNN': KNeighborsClassifier(n_neighbors=3)
}
grid = {
'RF':{'n_estimators': [1,10,100,1000,10000], 'max_depth': [1,5,10,20,50,100], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10]},
'LR': { 'penalty': ['l1','l2'], 'C': [0.00001,0.0001,0.001,0.01]}, #,0.1,1,10
'SGD': { 'loss': ['hinge','log','perceptron'], 'penalty': ['l2','l1','elasticnet']},
'ET': { 'n_estimators': [1,10,100,1000,10000], 'criterion' : ['gini', 'entropy'] ,'max_depth': [1,5,10,20,50,100], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10]},
'AB': { 'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [1,10,100,1000,10000]},
'GB': {'n_estimators': [1,10,100,1000,10000], 'learning_rate' : [0.001,0.01,0.05,0.1,0.5],'subsample' : [0.1,0.5,1.0], 'max_depth': [1,3,5,10,20,50,100]},
'NB' : {},
'DT': {'criterion': ['gini', 'entropy'], 'max_depth': [1,5,10], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10]}, #,20,50,100
'SVM' :{'C' :[0.00001,0.0001,0.001,0.01,0.1,1,10],'kernel':['linear']},
'KNN' :{'n_neighbors': [1,5,10,25,50,100],'weights': ['uniform','distance'],'algorithm': ['auto','ball_tree','kd_tree']}
}
return clfs, grid
def magic_loop(models_to_run, clfs, params, X, y, k=0.5):
'''
'''
rows = [['Models', 'Parameters', 'Split', 'AUROC', 'Accuracy at '+str(k), 'Recall at '+str(k), 'F1 at '+str(k), 'precision at ' + str(k)]]
tracker=0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
for index,clf in enumerate([clfs[x] for x in models_to_run]):
parameter_values = params[models_to_run[index]]
for p in ParameterGrid(parameter_values):
clf.set_params(**p)
y_pred_probs = clf.fit(X_train, y_train).predict_proba(X_test)[:,1]
print(models_to_run[index] + ' ' + str(tracker) + '/1213')
tracker += 1
print(p)
# print(len(y_test))
# print(len(y_train))
# print(y_train == y_test[1:])
threshold = np.sort(y_pred_probs)[::-1][int(k*len(y_pred_probs))]
y_pred = np.asarray([1 if i >= threshold else 0 for i in y_pred_probs])
score_list = []
score_list.append(metrics.accuracy_score(y_test, y_pred))
score_list.append(metrics.recall_score(y_test, y_pred))
score_list.append(metrics.f1_score(y_test, y_pred))
score_list.append(metrics.precision_score(y_test, y_pred))
print(score_list)
# print(type(score_list))
rows.append(score_list)
return rows
# print(type(y_pred_probs))
# print(type(y_test))
# print(y_pred_probs)
# print(y_test)
# for i in range(1, 20):
# try:
# print('real: {}; prob: {}'.format(y_test[i], y_pred_probs[i]))
# except:
# continue
# def magic_loop(models_to_run, clfs, params, X, y, Ks=[0.1, 0.5, 0.9]):
# '''
# X and y need to be formatted
# '''
# tracker = 0
# model_list=[['Models', 'Parameters', 'Split', 'AUROC']]
# for k in Ks:
# model_list[0] += ['Accuracy at '+str(k), 'Recall at '+str(k), 'F1 at '+str(k), 'precision at ' + str(k)]
# print(model_list)
# for n in range(1, 2):
# # print("split: {}".format(n))
# for index,clf in enumerate([clfs[x] for x in models_to_run]):
# # print(models_to_run[index])
# try:
# # d = {}
# # print("parameters {}".format(p))
# clf.set_params(**p)
# # clf.fit(X_train, y_train)
# y_pred_probs = clf.fit(X_train, y_train).predict_proba(X_test)[:,1]
# # y_pred_probs = clf.predict_proba(X_test)[:,1]
# row=[models_to_run[index], p, n, roc_auc_score(y_test, y_pred_probs)]
# for k in Ks: #by default Ks has one value
# # print(y_pred_probs)
# # print()
# # for i in y_test:
# # print(i)
# # print(y_test)
# row += score_list #evaluate_models_at_k(y_pred, X_test, y_test, k)
# # plot_precision_recall_n(y_test, y_pred_probs, clf)
# # model_list = [['Models', 'Parameters', 'Split', 'Accuracy at '+str(k), 'Recall at '+str(k), 'AUROC', 'F1 at '+str(k), 'precision at ' + str(k)]]
# # model_list.append( d['accuracy at '+str(k)], d['recall'], d['AUROC'], d['F1'], d['precision at ' + str(k)]])
# model_list.append(row)
# # print(pd.DataFrame(model_list))
# tracker += 1
# print(models_to_run[index] + ' ' + str(tracker) + '/1213')
# print(p)
# except IndexError as e:
# print('Error:',e)
# continue
# return model_list
def get_summary():
'''
Takes output from magic loop and returns a summary of the highest values
'''
pass
def read_data(filename, response):
'''
Reads in from transformed csv file and generates X and Y arrays
'''
df = pd.read_csv(filename)
###############
df = df.dropna()
###############
Y = df[response]
df = df.drop(response, 1)
df = df.drop(df.columns[[0]], axis=1)
return df, Y
def main(data_filename, response, output_filename, summary_filename):
models_to_run = models_to_run = ['LR', 'NB', 'DT', 'RF']
clfs, params = define_clfs_params()
X, y = read_data(data_filename, response)
rows = magic_loop(models_to_run, clfs, params, X, y)
# print(rows)
for i in rows:
print(i)
print(type(i))
if __name__ == "__main__":
main('cs-training.csv', 'SeriousDlqin2yrs', 'loop_full.csv', 'loop_summary.csv') |
985,660 | 7ada3469551de8b8ec5895dc7d01907afc571862 | from .range import *
from .expr import *
from .decl import *
__all__ = ['Location', 'Range'] + expr.__all__ + decl.__all__
|
985,661 | 8b22655edded8cc4d8afdade99c629471344494e | import unittest
import time
from src.store import Store
class TestStore(unittest.TestCase):
def test_cache_get(self):
store = Store(retry=0, expiry=20)
store.cache_set('bonnie', 'clyde')
value = store.cache_get('bonnie')
self.assertEqual(value, b'clyde')
def test_timeout_default_expiry(self):
store = Store(retry=0, expiry=1)
store.cache_set('beauty', 'beast')
time.sleep(2)
value = store.cache_get('beauty')
self.assertEqual(value, None)
def test_timeout_override_expiry(self):
store = Store(retry=0, expiry=20)
store.cache_set('lelik', 'bolik', 1)
time.sleep(2)
value = store.cache_get('lelik')
self.assertEqual(value, None)
def test_get(self):
store = Store(retry=0, expiry=20)
store.set('sid', 'nancy')
value = store.get('sid')
self.assertEqual(value, 'nancy')
def test_get_retry(self):
store = Store(host='non_existent', retry=1, expiry=20)
store.cache_set('sid', 'nancy')
store.cache_get('sid')
self.assertEqual(store.action_count, 2)
def test_set_retry(self):
store = Store(host='non_existent', retry=1, expiry=20)
store.cache_set('sid', 'nancy')
self.assertEqual(store.action_count, 2)
|
985,662 | c3d5e2640fd1017a9cf520f26e666cc8cd71ad6d | """
BFS 是常见的广度优先搜索算法(Breadth-First-Search),是一种利用队列实现的搜索算法。
先探索其实点周的node,像四周扩散
"""
# init the graphs data
graphs = {
'A': ['B', 'C'],
'B': ['A', 'C', 'D'],
'C': ['A', 'B', 'D', 'E'],
'D': ['B', 'C', 'E', 'F'],
'E': ['C', 'D'],
'F': ['D']
}
def bfs(graph, s):
"""
implement the bfs
:param graph: graph data
:param s: start node
:return:
"""
# we use queue in bfs
queue = [s]
visited = [s]
while len(queue) > 0:
vertex = queue.pop(0)
nodes = graph[vertex]
for n in nodes:
if n not in visited:
queue.append(n)
visited.append(n)
return visited
def bfs_target(graph, s, e):
"""
use bfs find the shortest path
:param graph: graph data
:param s: start node
:param e: end node
:return: the shortest path from s to e
"""
queue = [s]
visited = [s]
find = False
while len(queue) > 0:
vertex = queue.pop(0)
nodes = graph[vertex]
for n in nodes:
if n not in visited:
queue.append(n)
visited.append(n)
if n == e:
find = True
break
if find:
break
return visited
if __name__ == '__main__':
print(bfs(graphs, 'A'))
print(bfs_target(graphs, 'A', 'D'))
|
985,663 | 4f5117c3c14b348a1e4828a3f007f86394ae8434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Load all import
import tweepy
import sys
from unidecode import unidecode
from ConfigParser import SafeConfigParser
# Set configuration
reload(sys)
sys.setdefaultencoding('utf8')
class Twitter:
def __init__(self, config):
# Load configuration from config.ini
parser = SafeConfigParser()
parser.read(config)
# Constants
CONSUMER_KEY = parser.get('twitter', 'CONSUMER_KEY')
CONSUMER_SECRET = parser.get('twitter', 'CONSUMER_SECRET')
ACCESS_TOKEN = parser.get('twitter', 'ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = parser.get('twitter', 'ACCESS_TOKEN_SECRET')
# Identification
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
self.api = tweepy.API(auth)
def get_tweets_from(self, user, n_tweets):
# Retrieve last n_tweets of the user
# Usefull data: tweet.{text/retweeted/favorited}
tweets = api.user_timeline(id = user, count = n_tweets)
|
985,664 | d1d7f4587159832b8a31d94f67b0deaf88a44016 |
#! /usr/bin/python3
''' file to infer the critical values of delta likelihood to infer
aneuploidy '''
import pandas as pd
import re
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
from sklearn.preprocessing import OneHotEncoder
#import the data
Data=pd.read_csv('../Results/Interpret_delta_new',sep='\t',names=["Ploidies","Inferred_Ploidy","SNPs","Mean_read_depth","H_0","H_1","Delta","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20"],index_col=False)
# Create new column with normalised data and other features wanted
Data['Normalised_delta']=(Data['Delta']/Data['SNPs'])
Ploids=list(Data['Ploidies'])
base=[]
no_base=[]
aneu=[]
no_aneu=[]
is_aneu=[]
NSAMS=[]
for ploid in Ploids:
ploid=re.split(',|x',ploid)
base.append(int(ploid[0]))
no_base.append(int(ploid[1]))
aneu.append(int(ploid[2]))
no_aneu.append(int(ploid[3]))
if int(ploid[0])==int(ploid[2]):
is_aneu.append(0)
else:
is_aneu.append(1)
NSAMS.append(int(ploid[1])+int(ploid[3]))
Data['Base_ploidy']=base
Data['Base_ploidy_number']=no_base
Data['Aneuploidy_level']=aneu
Data['Number_of_aneuploidy']=no_aneu
Data['NSAMS']=NSAMS
Data['Aneuploidy']=is_aneu
#Seperate Data into X: the independent variables and y the dependent variables
X_All=Data[['Inferred_Ploidy','Mean_read_depth','Normalised_delta','NSAMS','H_0','H_1','SNPs']].values
X=Data[['Inferred_Ploidy','Mean_read_depth','Normalised_delta','NSAMS']].values
y=Data.iloc[:,-1].values
# Seperate data into a training set and set to test on
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25, random_state=1)
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_X= StandardScaler()
X_train=sc_X.fit_transform(X_train)
X_test=sc_X.transform(X_test)
#Making ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier=Sequential()
classifier.add(Dense(activation='relu',input_dim=4,units=5, kernel_initializer='uniform'))
classifier.add(Dense(activation='relu',units=5, kernel_initializer='uniform'))
#classifier.add(Dense(activation='relu',units=5, kernel_initializer='uniform'))
#classifier.add(Dense(activation='relu',units=4, kernel_initializer='uniform'))
classifier.add(Dense(activation='sigmoid',units=1,kernel_initializer='uniform'))
classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
classifier.fit(x=X_train,y=y_train,batch_size=100,epochs=200)
#Predict the test set results
y_pred=classifier.predict(X_test)
y_pred_class=(y_pred>0.5)
#Evaluating the performance of the logistic regression using confusion matrix
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred_class)
print(cm)
accuracy=(cm[0][0]+cm[1][1])/sum(sum(cm))
print(accuracy)
#can get predictiosn using classifier.predict([[data]])
|
985,665 | 2cb17f61e707c431d2083b0598fece61c0f7b7d2 | #coding=utf-8
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import os
import cv2
import numpy as np
import datetime
from yolov3 import yolov3_body
from yolov3 import yolov3_loss as Loss
from datasets import Dataset
import config
flags = tf.app.flags
FLAGS = flags.FLAGS
# for model
flags.DEFINE_integer('batch_size', config._batch_size, 'The number of images in each batch during training.')
flags.DEFINE_integer('classes', config._num_classes, 'The classes number.')
flags.DEFINE_integer('max_boxes_num', config._max_boxes, 'The max number of boxes in one image.')
flags.DEFINE_integer('max_steps', config._max_steps, 'The max training steps.')
flags.DEFINE_integer('print_steps', config._print_steps, 'Used for print training information.')
flags.DEFINE_integer('saved_steps', config._saved_steps, 'Used for saving model.')
# for checkpoints and summary
flags.DEFINE_string('restore_ckpt_path', config._coco_tf_weights, 'Path to save training checkpoint.')
flags.DEFINE_string('saved_ckpt_path', config._saved_weights, 'Path to save training checkpoint.')
flags.DEFINE_string('saved_summary_train_path', config._saved_summary_train_path, 'Path to save training summary.')
flags.DEFINE_string('saved_summary_val_path', config._saved_summary_val_path, 'Path to save test summary.')
# for leaning rate
flags.DEFINE_float('initial_lr', config._initial_lr, 'The initial learning rate.')
flags.DEFINE_float('end_lr', config._end_lr, 'The end learning rate.')
flags.DEFINE_integer('decay_steps', config._decay_steps, 'Used for poly learning rate.')
flags.DEFINE_float('weight_decay', config._weight_decay, 'The weight decay value for l2 regularization.')
flags.DEFINE_float('power', config._power, 'Used for poly learning rate.')
def letterbox_resize(img, new_width, new_height, interp=0):
'''
Letterbox resize. keep the original aspect ratio in the resized image.
'''
ori_height, ori_width = img.shape[:2]
resize_ratio = min(new_width / ori_width, new_height / ori_height)
resize_w = int(resize_ratio * ori_width)
resize_h = int(resize_ratio * ori_height)
img = cv2.resize(img, (resize_w, resize_h), interpolation=interp)
image_padded = np.full((new_height, new_width, 3), 128, np.uint8)
dw = int((new_width - resize_w) / 2)
dh = int((new_height - resize_h) / 2)
image_padded[dh: resize_h + dh, dw: resize_w + dw, :] = img
return image_padded
with tf.name_scope('input'):
x = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, None, None, 3], name='x_input')
y_true_1 = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, 13, 13, 3, 5 + FLAGS.classes], name='y_true_13')
y_true_2 = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, 26, 26, 3, 5 + FLAGS.classes], name='y_true_26')
y_true_3 = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, 52, 52, 3, 5 + FLAGS.classes], name='y_true_52')
with tf.variable_scope('yolov3'):
yolo_outputs = yolov3_body(x, num_classes=FLAGS.classes, is_training=True)
variables = tf.contrib.framework.get_variables_to_restore(include=['yolov3'])
with tf.name_scope('loss'):
loss = Loss(yolo_outputs=yolo_outputs, y_true=[y_true_1, y_true_2, y_true_3], num_classes=FLAGS.classes)
tf.summary.scalar('loss', loss)
with tf.name_scope('learning_rate'):
global_step = tf.Variable(0, trainable=False)
lr = tf.train.polynomial_decay(
learning_rate=FLAGS.initial_lr,
global_step=global_step,
decay_steps=FLAGS.decay_steps,
end_learning_rate=FLAGS.end_lr,
power=FLAGS.power,
cycle=False,
name='lr',
)
tf.summary.scalar('learning_rate', lr)
with tf.name_scope('opt'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss, global_step=global_step)
#train_op = tf.train.AdamOptimizer(1e-4).minimize(loss)
voc_train = Dataset('train')
voc_val = Dataset('val')
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
exclude_vars = ['conv_59', 'conv_67', 'conv_75']
variables_to_resotre = [v for v in variables if v.name.split('/')[1] not in exclude_vars]
saver_to_restore = tf.train.Saver(variables_to_resotre)
saver_to_save = tf.train.Saver()
# if os.path.exists(saved_ckpt_path):
ckpt = tf.train.get_checkpoint_state(FLAGS.restore_ckpt_path)
if ckpt and ckpt.model_checkpoint_path:
saver_to_restore.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
train_summary_writer = tf.summary.FileWriter(FLAGS.saved_summary_train_path, sess.graph)
val_summary_writer = tf.summary.FileWriter(FLAGS.saved_summary_val_path, sess.graph)
epoches = 1
for i in range(FLAGS.max_steps + 1):
batch_image, batch_y_true_1, batch_y_true_2, batch_y_true_3 = voc_train.__next__(batch_size=FLAGS.batch_size, max_boxes_num=FLAGS.max_boxes_num)
feed_dict={
x: batch_image,
y_true_1: batch_y_true_1,
y_true_2: batch_y_true_2,
y_true_3: batch_y_true_3,
}
val_batch_image, val_batch_y_true_1, val_batch_y_true_2, val_batch_y_true_3 = voc_val.__next__(batch_size=FLAGS.batch_size,
max_boxes_num=FLAGS.max_boxes_num)
val_feed_dict = {
x: val_batch_image,
y_true_1: val_batch_y_true_1,
y_true_2: val_batch_y_true_2,
y_true_3: val_batch_y_true_3,
}
if i % FLAGS.print_steps == 0:
train_loss = sess.run(loss, feed_dict=feed_dict)
val_loss = sess.run(loss, feed_dict=val_feed_dict)
print(datetime.datetime.now().strftime("%Y.%m.%d-%H:%M:%S"), " | Step: %d, | train_loss: %f, | val_loss: %f "%(i, train_loss, val_loss))
if i * FLAGS.batch_size > voc_train.num_samples * epoches:
train_loss = sess.run(loss, feed_dict=feed_dict)
val_loss = sess.run(loss, feed_dict=val_feed_dict)
lr_val = sess.run(lr)
print("Epoch %d finished, "%epoches , datetime.datetime.now().strftime("%Y.%m.%d-%H:%M:%S"), " | Step: %d, | Lr: %f, | train_loss: %f, | val_loss: %f "%(i, lr_val, train_loss, val_loss))
epoches += 1
sess.run(train_op, feed_dict=feed_dict)
if i % FLAGS.print_steps == 0:
train_summary = sess.run(merged, feed_dict=feed_dict)
train_summary_writer.add_summary(train_summary, i)
val_summary = sess.run(merged, feed_dict=val_feed_dict)
val_summary_writer.add_summary(val_summary, i)
if not os.path.exists('checkpoints'):
os.mkdir('checkpoints')
if i % FLAGS.saved_steps == 0:
saver_to_save.save(sess, os.path.join(FLAGS.saved_ckpt_path, 'yolov3.model'), global_step=i) |
985,666 | d8cc15ea3486f39c8b211f427ccf635363383822 | import base64
import contextlib
import hashlib
import sqlite3
import time
def sha1(val):
s = hashlib.sha1()
if isinstance(val, str):
val = val.encode()
s.update(val)
return s.hexdigest()
def create_connection(db_path):
conn = sqlite3.connect(db_path)
conn.create_function('sha1', 1, sha1)
return conn
@contextlib.contextmanager
def connect(db_path):
with create_connection(db_path) as conn:
yield conn
class DBLogic:
def __init__(self, db_connection):
self.db_connection = db_connection
self.initialize_db()
def initialize_db(self):
self.db_connection.executescript("""
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY,
username TEXT,
password TEXT,
full_name TEXT
);
""")
self.db_connection.executescript("""
INSERT OR IGNORE INTO users VALUES (1, 'boss', sha1('Dancing in the dark'), 'Bruce Summersteen');
INSERT OR IGNORE INTO users VALUES (2, 'edward', sha1('666'), 'Edward Hailden');
INSERT OR IGNORE INTO users VALUES (3, 'alice', sha1('Into the flood again.'), 'Alice InRopes');
INSERT OR IGNORE INTO users VALUES (4, 'bob', sha1('Is this love'), 'Bob Marmite');
INSERT OR IGNORE INTO users VALUES (5, 'system', '', 'Grape Galili');
INSERT OR IGNORE INTO users VALUES (6, 'test', sha1('1234'), 'Testy McTestFace');
INSERT OR IGNORE INTO users VALUES (7, 'admin', sha1('ReallyStr0nkP@ssw0rd'), 'System Administrator');
""")
def select_scalar(self, *args, **kwargs):
"""Utility to return a scalar value from a query."""
row = self.db_connection.execute(*args, **kwargs).fetchone()
return None if row is None else row[0]
def login(self, username, password):
match = self.select_scalar(
'SELECT * FROM users WHERE username = ? AND password = sha1(?)',
(username, password,)
)
if match:
return True, base64.b64encode(username.encode()).decode()
else:
return False, ''
def admin_login(self, password):
match = None
had_error = False
bad_token_str = ''
##DOC## Important - this must be a single line to show the query
##DOC## in the stacktrace!
##DOC## To perform an SQLI, do `') OR 1=1 ; --`
match = self.select_scalar('SELECT * FROM users where username = "admin" AND password = sha1("%s")' % (password,))
return True if match else False
def validate_login(self, cookie):
if not cookie:
return False
try:
# b64decode returns bytes, another decode to get a string
login = base64.b64decode(cookie).decode()
except:
return False
if self.select_scalar(
'SELECT * FROM users WHERE username = ?',
(login,)
):
return login
else:
return None
def get_user_name(self, username):
return self.select_scalar(
'SELECT full_name FROM users WHERE username = ?',
(username,)
)
def get_user_id(self, username):
return self.select_scalar(
'SELECT user_id FROM users WHERE username = ?',
(username,)
)
|
985,667 | ee86ba8a151cb308daf7ef5f2f3cabdef9a121d8 | from django.shortcuts import render, get_object_or_404, reverse, redirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from .forms import VendorForm, DeliverTownForm, DeliverPostalForm
from .models import Vendor, VendorDeliveryTown, VendorDeliveryPostal
from order.models import OrderLineItem, Process
# Create your views here.
@login_required
def view_vendor_profiles(request):
vendor_profiles = Vendor.objects.filter(user=request.user)
username = request.user.username
return render(request, "vendor/vendor_profiles.html", {
"vendor_profiles": vendor_profiles,
"username": username
})
@login_required
def create_vendor(request):
vendor_form = VendorForm()
if request.method == 'POST':
create_form = VendorForm(request.POST)
if create_form.is_valid():
form = create_form.save(commit=False)
form.user = request.user
form.save()
messages.success(request, "New vendor added." )
return redirect(reverse(view_vendor_profiles))
else:
messages.warning(request, "Contact number must be 8 digits numbers.")
return render(request, "vendor/vendor_form.html", {
'form': vendor_form
})
else:
return render(request, "vendor/vendor_form.html", {
'form': vendor_form
})
@login_required
def edit_vendor_profile(request, vendor_profile_id):
profile = get_object_or_404(Vendor, pk=vendor_profile_id)
vendor_name = profile.name
validation_status = profile.license_check
profile_form = VendorForm(instance=profile)
if request.method == "POST":
create_form = VendorForm(request.POST, instance=profile)
if create_form.is_valid():
create_form.save()
Vendor.objects.filter(id=vendor_profile_id).update(license_check=False)
messages.success(request, f" '{profile}'' successfully updated. Vendor Profile is pending validation.")
return redirect(reverse(view_vendor_profiles))
else:
messages.warning(request, "Contact number must be 8 digits numbers.")
return redirect(reverse(edit_vendor_profile, kwargs={'vendor_profile_id': vendor_profile_id}))
else:
return render(request, "vendor/vendor_profile_detail.html", {
"form": profile_form,
"vendor_profile_id": vendor_profile_id,
"validation_status": validation_status,
"vendor_name": vendor_name
})
def delete_vendor_profile(request, vendor_profile_id):
profile = get_object_or_404(Vendor, pk=vendor_profile_id)
vendor_name = profile.name
if request.method == "POST":
vendor_profile = get_object_or_404(Vendor, pk=vendor_profile_id)
vendor_profile.delete()
messages.success(request, f" {vendor_profile} removed.")
return redirect(reverse(view_vendor_profiles))
else:
return render(request, "vendor/vendor_delete_profile.html", {
"vendor_name" : vendor_name
})
def create_delivery_area(request, vendor_profile_id):
if request.method == "POST":
vendor_object = get_object_or_404(Vendor, pk=vendor_profile_id)
try:
##### adding to manytomany field into vendor_object #####
town = request.POST["town"].upper()
existing_town_number = VendorDeliveryTown.objects.filter(town=town).count() # check if town already exist in VendorDeliveryTown table(0 means not exist yet)
if existing_town_number == 0:
f = VendorDeliveryTown.objects.create(town=town) # create a new town object in VendorDeliveryTown table
vendor_object.vendordeliverytown.add(f) # add the town object into Vendor table's town manytomany field
else:
vendor_filter_by_town = Vendor.objects.filter(id=vendor_profile_id).filter(vendordeliverytown__town=town)
if vendor_filter_by_town:
messages.warning(request, "Town already exists.")
else:
existing_town_object = get_object_or_404(VendorDeliveryTown, town=town)
vendor_object.vendordeliverytown.add(existing_town_object)
except:
postal = request.POST["postal"]
existing_postal = VendorDeliveryPostal.objects.filter(postal_code=postal).count()
if existing_postal == 0:
f = VendorDeliveryPostal.objects.create(postal_code=postal)
vendor_object.vendordeliverypostal.add(f)
else:
vendor_filter_by_postal = Vendor.objects.filter(id=vendor_profile_id).filter(vendordeliverypostal__postal_code=postal)
if vendor_filter_by_postal:
messages.warning(request, "Postal code already exists.")
else:
existing_postal_object = get_object_or_404(VendorDeliveryPostal, postal_code=postal)
vendor_object.vendordeliverypostal.add(existing_postal_object)
return redirect(reverse(create_delivery_area, kwargs={'vendor_profile_id': vendor_profile_id}))
else:
vendor_object = get_object_or_404(Vendor, pk=vendor_profile_id)
towns = VendorDeliveryTown.objects.filter(vendor=vendor_object)
postals = VendorDeliveryPostal.objects.filter(vendor=vendor_object)
vendor_name = vendor_object.name
return render(request, "vendor/vendor_delivery_area.html", {
"towns": towns,
"postals": postals,
"vendor_profile_id": vendor_profile_id,
"vendor_name": vendor_name
})
def remove_town_vendor(request, vendor_profile_id, town_vendor_id):
town_object = get_object_or_404(VendorDeliveryTown, pk=town_vendor_id)
vendor_object = get_object_or_404(Vendor, pk=vendor_profile_id)
vendor_object.vendordeliverytown.remove(town_object)
return redirect(reverse(create_delivery_area, kwargs={'vendor_profile_id': vendor_profile_id}))
def remove_postal_vendor(request, vendor_profile_id, postal_vendor_id):
postal_object = get_object_or_404(VendorDeliveryPostal, pk=postal_vendor_id)
vendor_object = get_object_or_404(Vendor, pk=vendor_profile_id)
vendor_object.vendordeliverypostal.remove(postal_object)
return redirect(reverse(create_delivery_area, kwargs={'vendor_profile_id': vendor_profile_id}))
def view_vendor_orders(request, vendor_profile_id):
username = request.user.username
vendor = Vendor.objects.get(id=vendor_profile_id)
if request.method == "POST":
orderlineitem_id = request.POST["special"]
process = get_object_or_404(Process, title="delivered")
OrderLineItem.objects.filter(id=orderlineitem_id).update(process=process)
orders_outstanding = OrderLineItem.objects.all().filter(food__vendor__id=vendor_profile_id).filter(process__title="undelivered")
orders_completed = OrderLineItem.objects.all().filter(food__vendor__id=vendor_profile_id).filter(process__title="delivered")
if "search" in request.GET:
query = request.GET["search"]
if not query:
messages.error(request, "You have not enter a search term.")
return redirect(reverse(view_vendor_orders, kwargs={"vendor_profile_id": vendor_profile_id}))
if query.isnumeric():
queries = Q(datetime__year=query)|Q(datetime__day=query)|Q(datetime__month=query)
orders_outstanding = OrderLineItem.objects.all().filter(food__vendor__id=vendor_profile_id).filter(process__title="undelivered")
orders_completed = OrderLineItem.objects.all().filter(queries).filter(food__vendor__id=vendor_profile_id).filter(process__title="delivered")
else:
queries = Q(food__title__icontains=query)|Q(buyer__user__username__icontains=query)
orders_outstanding = OrderLineItem.objects.all().filter(food__vendor__id=vendor_profile_id).filter(process__title="undelivered")
orders_completed = OrderLineItem.objects.all().filter(queries).filter(food__vendor__id=vendor_profile_id).filter(process__title="delivered")
else:
orders_outstanding = OrderLineItem.objects.all().filter(food__vendor__id=vendor_profile_id).filter(process__title="undelivered")
orders_completed = OrderLineItem.objects.all().filter(food__vendor__id=vendor_profile_id).filter(process__title="delivered")
return render(request, "vendor/vendor_orders.html", {
"orders_outstanding": orders_outstanding,
"orders_completed": orders_completed,
"vendor": vendor,
"username": username,
"vendor_profile_id": vendor_profile_id
})
|
985,668 | 37b5d5164c5eb1c9f9acb1bb58b85d3a2fe14c8b | ##### import packages
#base
import os
from collections import defaultdict
import numpy as np
import scipy.stats
from matplotlib import pyplot as plt
import random
import pyreadr
import numpy as np
import pandas as pd
import seaborn as sns
# %matplotlib inline
# %autosave 30
#pyro contingency
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.infer.autoguide import AutoDelta
from pyro.optim import Adam
from pyro.infer import SVI, TraceEnum_ELBO, config_enumerate, infer_discrete, Predictive
from pyro.ops.indexing import Vindex
from pyro.infer import MCMC, NUTS
import torch
from torch.distributions import constraints
pyro.enable_validation(True)
# import umap
# import plotly
# import plotly.graph_objs as go
#misc
import pickle
import torch.nn.functional as F
import itertools
import time
import arviz as az
# import homebrew modules
import tomtom_models as tm
import tomtom_util as tu
########## import data
# import data and previously fitted parameters
# import pickled data
with open('tomtom_data_preprocessed.pkl','rb') as f:
[tself_norm_all_3d, tself_norm_noauto_3d, tself_raw_all_3d, tself_raw_noauto_3d,
ttarg_norm_all_3d, ttarg_norm_noauto_3d, ttarg_raw_all_3d, ttarg_raw_noauto_3d,
tavg_norm_all_3d, tavg_norm_noauto_3d, tavg_raw_all_3d, tavg_raw_noauto_3d] = pickle.load(f)
# import pickled parameters from varying-k analysis
with open('tomtom_fitted_models.pkl','rb') as f:
[seeds_self_norm_all_grp,maps_self_norm_all_grp,logprobs_self_norm_all_grp,mem_self_norm_all_grp,
seeds_self_norm_all_dim,maps_self_norm_all_dim,logprobs_self_norm_all_dim,
seeds_self_raw_noauto_grp,maps_self_raw_noauto_grp,logprobs_self_raw_noauto_grp,mem_self_raw_noauto_grp,
seeds_self_raw_noauto_dim,maps_self_raw_noauto_dim,logprobs_self_raw_noauto_dim] = pickle.load(f)
# load previously generated data
with open('model_recovery_gen_dat.pkl','rb') as f:
[gendat_self_norm_all_grp,gendat_self_norm_all_dim,
gendat_self_raw_noauto_grp,gendat_self_raw_noauto_dim] = pickle.load(f)
# norm all grp
# Fitting a priori K GROUP model on normed all data
tm.mtype = 'group'
tm.target = 'self' # 'self','targ','avg'
tm.dtype = 'norm' # 'norm','raw'
tm.auto = 'all' # 'noauto','all'
tm.stickbreak = False
tm.optim = pyro.optim.Adam({'lr': 0.0005, 'betas': [0.8, 0.99]})
tm.elbo = TraceEnum_ELBO(max_plate_nesting=1)
# initializing storage
modrec_seeds_self_norm_all_grp = []
modrec_maps_self_norm_all_grp = []
modrec_logprobs_self_norm_all_grp = []
# each element in the outermost list is all gendat for a singel k-MAP
tm.K = 1
for kmap in gendat_self_norm_all_grp:
print(tm.K)
# each element in the second layer is a tensor nrep*nsample*datadim
stor1_seeds_self_norm_all_grp = []
stor1_maps_self_norm_all_grp = []
stor1_logprobs_self_norm_all_grp = []
for tens in kmap:
# iterate through the first dimension of the tensor, fitting model for each layer
stor2_seeds_self_norm_all_grp = []
stor2_maps_self_norm_all_grp = []
stor2_logprobs_self_norm_all_grp = []
for i in np.arange(tens.shape[0]):
pyro.clear_param_store()
seed, mmap, mem, lp = tm.tomtom_svi(tens[i])
stor2_seeds_self_norm_all_grp.append(seed)
stor2_maps_self_norm_all_grp.append(mmap)
stor2_logprobs_self_norm_all_grp.append(lp)
stor1_seeds_self_norm_all_grp.append(stor2_seeds_self_norm_all_grp)
stor1_maps_self_norm_all_grp.append(stor2_maps_self_norm_all_grp)
stor1_logprobs_self_norm_all_grp.append(stor2_logprobs_self_norm_all_grp)
modrec_seeds_self_norm_all_grp.append(stor1_seeds_self_norm_all_grp)
modrec_maps_self_norm_all_grp.append(stor1_maps_self_norm_all_grp)
modrec_logprobs_self_norm_all_grp.append(stor1_logprobs_self_norm_all_grp)
tm.K += 1
with open('refit_mod_self_norm_all_grp','wb') as f:
pickle.dump([modrec_maps_self_norm_all_grp,modrec_seeds_self_norm_all_grp,modrec_logprobs_self_norm_all_grp],f)
|
985,669 | af9a19874c5d4ddbc410a4901b3a7b1f8c6c929d | # ################################################### #
import math
import random
import sys
import time
import PIL.Image
from modules import colorutils
from PIL import Image, ImageDraw
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def drawRects():
global config
changeColor(config.colorSwitch)
# For single square dividing, rows = cols all the time
# But for double square, start with rows, divide columns
# then divide rows, then repeat
# if (rows * lineWidth) < config.screenHeight : rows = int(config.screenHeight/lineWidth)
rHeight = round(config.screenHeight / config.rows)
squaresToDraw = round(rHeight / 2)
# if(rows*lineWidth < config.screenHeight)
additionalRows = 0
rowDiff = config.screenHeight - config.rows * rHeight
if rowDiff > config.lineWidth + 1:
additionalRows = rowDiff
for row in range(0, config.rows + additionalRows):
yOffset = round(row * rHeight)
for col in range(0, config.cols):
rWidth = round(config.screenWidth / config.cols)
xOffset = round(col * rWidth)
config.colorSwitchMode = round(random.uniform(1, 4))
for n in range(0, squaresToDraw, config.lineWidth):
# --------------------------------------------------------------#
# Alternate Bands of Color, keep to one scheme per set of squares
changeColor(config.colorSwitch, config.colorSwitchMode)
xStart = n + xOffset
xEnd = xOffset + rWidth - n - 1
yStart = n + yOffset
yEnd = yOffset + rHeight - n - 1
# config.draw.rectangle((xStart, yStart, xEnd, yEnd), outline=(r,g,b))
for l in range(0, config.lineWidth):
config.draw.rectangle(
(xStart + l, yStart + l, xEnd - l, yEnd - l),
outline=(config.r, config.g, config.b),
)
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def changeColor(rnd=False, choice=3):
global config
# rnd = True
if rnd == False and config.rows < 2:
val = round(255 * config.brightness)
if config.r == val:
config.r = 0
config.g = val
config.b = 0
# Add variant that we pulse red/blue not just red/greeen
# red/blue makes for pink afterimage so more about excitement
# than red/green making yellow after image, which feels like it's
# more about food ...
if random.random() > 0.5:
config.b = 0
config.g = val
else:
config.r = val
config.g = 0
config.b = 0
else:
choice = round(random.uniform(1, 8))
# choice = 3
if choice == 1:
clr = config.colorutil.getRandomColorWheel(config.brightness)
if choice == 2:
clr = config.colorutil.getRandomRGB(config.brightness)
if choice >= 3:
clr = config.colorutil.randomColor(config.brightness)
if config.grayMode:
clr = config.colorutil.randomGray(config.brightness)
# clr = config.colorutil.getRandomColorWheel(config.brightness)
config.r = clr[0]
config.g = clr[1]
config.b = clr[2]
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def main(run=True):
global config, workConfig
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
# make script to reduce from one square to 2 to 4 to 8 to 16...
# Like a frenetic Albers excercise that is more like a sign
# advertising itself
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
config.x = config.y = 0
config.r = 255
config.g = config.b = 0
config.pulseSpeed = 0.1
config.colorSwitch = True
config.countLimit = 10
config.rHeight = 0
config.rWidth = 0
config.rows = 1
config.cols = 1
config.lineWidth = 1
# rows, cols
config.divisionOfSquares = [1, 1, 2, 2, 4, 4, 8, 8, 16, 16, 32, 32]
config.colorutil = colorutils
config.grayMode = False
config.count = 0
config.rWidth = config.screenWidth
config.rHeight = config.screenHeight
# reseting render image size
config.renderImage = Image.new("RGBA", (config.actualScreenWidth, 32))
config.image = Image.new("RGBA", (config.screenWidth, config.screenHeight))
config.canvasImage = Image.new("RGBA", (config.screenWidth, config.screenHeight))
config.draw = ImageDraw.Draw(config.image)
config.id = config.image.im.id
config.lineWidth = config.lineWidth = int(workConfig.get("squares", "lineWidth"))
config.pulseSpeed = float(workConfig.get("squares", "pulseSpeed"))
config.pasteDelay = float(workConfig.get("squares", "pasteDelay"))
config.mode = workConfig.get("squares", "mode")
config.countLimit = int(workConfig.get("squares", "countLimit"))
try:
config.forceHoldDivision = int(workConfig.get("squares", "forceHoldDivision"))
config.divisionPosition = config.forceHoldDivision
except Exception as e:
config.forceHoldDivision = -1
config.divisionPosition = 0
print(e)
if run:
runWork()
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def iterate():
global config
drawRects()
# If colorSwitch is set to False then random colors are generated
# Or the fixed 2-color pattern is used
# if it's set to True, then a palette or gray is used
if random.random() > 0.2:
config.colorSwitch = True
if random.random() > 0.9:
config.colorSwitch = False
if random.random() > 0.995:
config.grayMode = True
if random.random() > 0.92:
config.grayMode = False
config.count += 1
if config.count >= config.countLimit:
if config.forceHoldDivision != -1:
config.divisionPosition += 0
else:
config.divisionPosition += 1
if config.divisionPosition >= len(config.divisionOfSquares) - 1:
reset()
config.divisionOfSquares = list(reversed(config.divisionOfSquares))
if config.divisionOfSquares[0] == 1:
config.divisionPosition = 2
config.countLimit = 1
# if(int(config.screenHeight /divisionOfSquares[divisionPosition])) <= lineWidth : reset()
config.cols = config.divisionOfSquares[config.divisionPosition + 1]
config.rows = config.divisionOfSquares[config.divisionPosition]
config.count = 0
config.countLimit = round(config.countLimit * (2 / config.rows)) + round(
random.uniform(2, 10)
)
if random.random() > 0.8:
config.colorSwitch = False
## Paste an alpha of the next image, wait a few ms
## then past a more opaque one again
## softens the transitions just enough
mask1 = config.image.point(lambda i: min(i * 1, 50))
config.canvasImage.paste(config.image, (0, 0), mask1)
config.render(config.canvasImage, 0, 0, config.image)
time.sleep(config.pasteDelay)
mask2 = config.image.point(lambda i: min(i * 25, 100))
config.canvasImage.paste(config.image, (0, 0), mask2)
config.render(config.canvasImage, 0, 0, config.image)
time.sleep(config.pasteDelay)
mask3 = config.image.point(lambda i: min(i * 25, 255))
config.canvasImage.paste(config.image, (0, 0), mask3)
config.render(config.canvasImage, 0, 0, config.image)
# config.render(config.canvasImage, 0, 0, config.image)
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def runWork():
global config
print(bcolors.OKGREEN + "** " + bcolors.BOLD)
print("RUNNING squares.py")
print(bcolors.ENDC)
while True:
iterate()
time.sleep(config.pulseSpeed)
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def reset():
global config
config.divisionPosition = 0
config.countLimit = config.countLimit
config.lineWidth = int(random.uniform(1, 9))
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
def callBack():
global config
# animator()
"""""" """""" """""" """""" """""" """""" """""" """""" """""" """""" ""
|
985,670 | b7e411ed82bfe115f38b406c5abaefbf6b17c5c0 | # feat/test2 - a
# feat/test
country = "United States of America"
# slicing
# substring. get string begin at index 5.
print(country[5:])
# get first 5 chars
print(country[:5])
# defined starting point:ending point/index up to but not including
print(country[5:10])
|
985,671 | 5c5c8d0788bd2f90b8ce79e93e3c2d351a0831e2 | from arduino.arduinoparallel import ArduinoParallel
class PortMock:
def __init__(self, port='COM5', baudrate=115200, parity='N', bytesize=8, stopbits=1, timeout=1):
self.port = port
self.baudrate = baudrate
self.parity = parity
self.bytesize = bytesize
self.stopbits = stopbits
self.timeout = timeout
def close(self):
return True
def write(self, command):
print(f'PortMock write: {command.strip()}')
return len(command)
def read_all(self):
answer = 'answer'
print(f'PortMock read_all: {answer}')
return answer
@property
def in_waiting(self):
return 10
@property
def is_open(self):
return True
class ArduinoParallelMock(ArduinoParallel):
def __init__(self, *args, **kwargs):
self._port = PortMock()
self._name = 'Parallel mock'
self._delay = 0
def query(self, question: str):
self.write(question)
# while not self._port.in_waiting:
# pass
return self.read_all()
|
985,672 | 81d014313ea6cb9dfc2971651f179ab881c69596 | # implemented by Fengyu
from flask import Flask, request, Response, make_response, session
import requests
import json
import argparse
import logging
import sys
import logging
from flask_cors import CORS
# email
from flask_mail import Mail
# database import
from tinydb import TinyDB, where, Query
db = TinyDB('users.json')
electiondb = TinyDB('election.json')
votedb = TinyDB('vote.json')
from flask_mail import Message
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.disabled = True
CORS(app)
# def sendConfirmation(user):
# msg = Message("Hello from Fengyu",
# sender="caifengyutruman@gmail.com",
# recipients=[user['email']])
# with app.app_context():
# mail.send(msg)
@app.route('/users/authenticate', methods=['POST'])
def authenticate():
if request.method == 'POST':
context = json.loads(request.data)
# find username first
incomingUser = context['username']
incomingPwd = context['password']
if not db.contains(where('username') == incomingUser):
return Response(status=404)
else:
# check the correctness of password
dbUser = db.search(where('username') == incomingUser)[0]
if dbUser['password'] == incomingPwd:
# print('found')
res = dict()
res['id'] = dbUser['id']
res['username'] = dbUser['username']
res['firstName'] = dbUser['firstName']
res['lastName'] = dbUser['lastName']
res['email'] = dbUser['email']
res['token'] = 'random'
res = json.dumps(res)
return Response(res, status=200)
else:
return Response(status=404)
return Response(status=404)
@app.route('/createElection', methods=['POST'])
def createPoll():
if request.method == 'POST':
poll = json.loads(request.data)
pollName = poll['name']
if electiondb.contains(where('name') == pollName):
return Response(status=404)
else:
electiondb.insert(poll)
return Response('success', status=200)
return Response(status=404)
@app.route('/getElection', methods=['GET'])
def getElection():
if request.method == 'GET':
elections = electiondb.all()
res = json.dumps(elections)
return Response(res, mimetype='application/json')
return Response(status=404)
@app.route('/vote', methods=['POST'])
def vote():
if request.method == 'POST':
vote = json.loads(request.data)
votedb.insert(vote)
election = electiondb.search(where('name') == vote['election'])[0]
res = election['publickey']
res['g'] = str(res['g'])
res['p'] = str(res['p'])
res['q'] = str(res['q'])
res['y'] = str(res['y'])
res = json.dumps(res)
print(res)
return Response(res, status=200)
return Response(status=404)
@app.route('/getVoted', methods=['POST'])
def getVoted():
if request.method == 'POST':
myID = json.loads(request.data)
myID = myID['voter']
# print(myID)
alreadyVotes = votedb.search(where('voter')==myID)
# print(alreadyVotes)
res = json.dumps(alreadyVotes)
return Response(res, status=200)
return Response(status=404)
@app.route('/users', methods=['GET'])
def getUsers():
if request.method == 'GET':
# return the data of user
# user id first name/last name
# print('Get all users')
allUsers = db.all()
res = json.dumps(allUsers)
return Response(res, status=200)
return Response(status=404)
@app.route('/users/register', methods=['POST'])
def register():
if request.method == 'POST':
# check the duplicate
# if ok, put it into the database
# else return failure
registerInfo = json.loads(request.data)
if db.contains(where('username') == registerInfo['username']):
return Response(status=400)
else:
registerInfo['id'] = len(db)
db.insert(registerInfo)
# sendConfirmation(registerInfo)
return Response(status=200)
return Response(status=404)
@app.route('/publickey', methods=['POST'])
def getPublicKey():
if request.method == 'POST':
pkcontainer = json.loads(request.data)
pkcontainer = pkcontainer['pkcontainer']
print(pkcontainer)
name = pkcontainer['name']
pk = pkcontainer['publickey']
theone = electiondb.search(where('name') == name)[0]
theone['publickey'] = pk
electiondb.upsert(theone, where('name') == name)
return Response(status=200)
return Response(status=404)
@app.route('/users/<id>', methods=['POST'])
def getUserInfo(id):
if request.method == 'POST':
# get information for one user
print(id)
return Response(status=200)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Config')
parser.add_argument("-i", "--ip", type=str, help="Please give the ip", default="127.0.0.1")
parser.add_argument("-p", "--port", type=int, help="Please give the running port", default=4000)
args = parser.parse_args()
app.logger.setLevel(logging.INFO)
app.run(debug=True, host=args.ip, port=args.port) |
985,673 | b69f8670a05f381cbc441ef88615c0fd10ad007b | from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.orm import relationship
from database.db_entity_collection import BaseModel
class MonstersWeapons(BaseModel):
__tablename__ = 'monsters_weapons'
id_monsters_weapons = Column(Integer, primary_key=True, unique =True)
id_monsters = Column(Integer, ForeignKey('monsters.id_monsters'))
id_weapons = Column(Integer, ForeignKey('weapons.id_weapons')) |
985,674 | 096489f172bb80442f6cddaec4a5d82c4fd6aa81 | from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PyQt4 import QtCore
from PyQt4 import QtGui
from PyQt4.QtGui import *
from email import Encoders
from email.MIMEBase import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import os
import smtplib
import sys
import datetime
from configparser import ConfigParser
import MySQLdb
import os
class SQL:
db = None
PATH = "dbfiles"
def read_db_config(self,filename='db.ini', section='mysql'):
try:
parser = ConfigParser()
parser.read(filename)
# get section, default to mysql
params = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
params[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, filename))
return params
except Exception as e:
print e
return 0
def init(self):
self.connect()
param = self.read_db_config()
try:
sql = 'CREATE DATABASE '+param['database']
cur = self.db.cursor().execute(sql)
print cur
except Exception as e:
print "Error in creating db ["+str(e)+"]"
try:
os.mkdir(self.PATH)
except OSError as e:
print e
for i in os.listdir(self.PATH):
if (i.find(".sql")!=-1):
f = open(self.PATH+"\\"+i)
sql = ""
for k in f.readlines():
sql+=k
print sql
try:
cur = self.db.cursor().execute(sql)
print cur
except Exception as e:
print "Error in creating table ["+str(e)+"]"
self.close()
return 0
def dropTables(self):
self.connect()
try:
sql = "DROP TABLE items"
cur = self.db.cursor().execute(sql)
print cur
except Exception as e:
print "Error in DROP table ["+str(e)+"]"
try:
sql = "DROP TABLE itemType"
cur = self.db.cursor().execute(sql)
print cur
except Exception as e:
print "Error in DROP table ["+str(e)+"]"
self.close()
return 0
def connect(self):
param = self.read_db_config()
self.db = MySQLdb.connect(host=param['host'], # your host, usually localhost
user=param['user'], # your username
passwd=param['password'], # your password
db=param['database']) # name of the data base
#for row in cur.fetchall():
# print row[0]
return 0
def doBackup(self):
mass = self.doSQL("SELECT * FROM tab")
def doSQL(self,sqlText):
try:
self.connect()
cur = self.db.cursor()
cur.execute(sqlText)
self.close()
return cur
except Error as e:
print e
self.close()
return 0
def insert(self):
sql = "INSERT INTO `users` (`email`, `password`) VALUES (%s, %s)"
cursor.execute(sql, ('webmaster@python.org', 'very-secret'))
return 0
def close(self):
self.db.close()
return 0
class Generator(QtGui.QWidget):
def __init__(self):
super(Generator, self).__init__()
self.initUI()
def initUI(self):
# param = self.read_config()
self.codeS_title = QtGui.QLabel('Type')
self.title = QtGui.QLabel('Country')
self.author = QtGui.QLabel('ID')
self.review = QtGui.QLabel('Count')
self.price = QtGui.QLabel('Price')
self.email = QtGui.QLabel('Email')
self.contryEdit = QtGui.QLineEdit()
self.contryEdit.setText(str("fsdfs"))
self.idEdit = QtGui.QLineEdit()
self.idEdit.setText(str("param['id']"))
self.log = QtGui.QTextEdit()
self.countEdit = QtGui.QLineEdit()
self.countEdit.setText(str("param['count']"))
self.priceEdit = QtGui.QLineEdit()
self.priceEdit.setText(str("param['price']"))
self.emailEdit = QtGui.QLineEdit()
self.emailEdit.setText(str("param['email']"))
self.image = QtGui.QLabel("window")
self.image.setGeometry(10, 10, 200, 400)
self.image.setPixmap(QtGui.QPixmap("barcode_scanner_icon.png"))
self.button = QtGui.QPushButton()
self.button.setText("GENERATE")
#self.button.clicked.connect(self.handleButton)
self.toHtml = QtGui.QPushButton()
self.toHtml.setText("SEND")
#self.toHtml.clicked.connect(self.toHTML)
self.clear = QtGui.QPushButton()
self.clear.setText("HELP")
#self.clear.clicked.connect(self.HELPMe)
self.watermark = QtGui.QPushButton()
self.watermark.setText("Watermark")
#self.watermark.clicked.connect(self.addWaterMark)
self.progress = QtGui.QProgressBar();
self.progress.setMaximum(0)
self.progress.setMinimum(0)
self.progress.setValue(0)
self.listWidget = QComboBox()
#self.listWidget.addItems(CODE_TYPES)
self.grid = QtGui.QGridLayout(self)
self.grid.setSpacing(20)
self.listWidget = QComboBox()
#self.listWidget.addItems(CODE_TYPES)
self.grid.addWidget(self.image, 1, 1)
self.grid.addWidget(self.codeS_title, 2, 0)
self.grid.addWidget(self.listWidget, 2, 1)
self.grid.addWidget(self.title, 3, 0)
self.grid.addWidget(self.contryEdit, 3, 1)
self.grid.addWidget(self.author, 4, 0)
self.grid.addWidget(self.idEdit, 4, 1)
self.grid.addWidget(self.review, 5, 0)
self.grid.addWidget(self.countEdit, 5, 1)
self.grid.addWidget(self.price, 6, 0)
self.grid.addWidget(self.priceEdit, 6, 1)
self.grid.addWidget(self.email, 7, 0)
self.grid.addWidget(self.emailEdit, 7, 1)
self.grid.addWidget(self.button, 8, 1)
self.grid.addWidget(self.progress, 9, 1)
self.grid.addWidget(self.log, 10, 1, 5, 1)
self.grid.addWidget(self.toHtml, 11, 0)
self.grid.addWidget(self.watermark, 12, 0)
self.grid.addWidget(self.clear, 13, 0)
self.setLayout(self.grid)
self.setGeometry(300, 300, 450, 100)
self.setWindowTitle('BARCODE GENERATOR')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Generator()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
#a = SQL()
#a.init()
#a.doSQL("SELECT * FROM tab") |
985,675 | e33a894aa5e8c3b69fecbd2885ae92abc9e879c0 |
class Config(object):
'''
保存网络的配置信息
'''
data_path = 'data/'
num_workers = 4
image_size = 96 #图片尺寸
batch_size = 128
max_epoch = 200
lrG = 2e-4 #生成器的学习率
lrD = 2e-4 #鉴别器的学习率
gpu = True
nz = 100 #噪声维度
ngf = 64 #生成器feature map数
ndf = 64 #判别器feature map数
save_path = 'imgs/'
vis = True
env = 'GAN'
plot_time = 20 #间隔20,visdom画图一次
d_every = 1 #每一个batch训练一次判别器
g_every = 5 #每5个训练一次生成器
decay_every = 5 #每10个epoch保存一次模型
netd_path = None
netg_path = None
# 只测试不训练
gen_img = 'result.png'
# 从512张生成的图片中保存最好的64张
gen_num = 64
gen_search_num = 512
gen_mean = 0 # 噪声的均值
gen_std = 1 # 噪声的方差
def parse(self,kwargs):
'''
根据字典跟新参数
:param kwargs:
:return:
'''
for k,v in kwargs.items():
if not hasattr(self,k):
warnings.warn('warning: opt has no attribute %s' %k)
setattr(self,k,v)
print('user config:')
for k,v in self.__class__.__dict__.items():
if not k.startswith('_'):
print(k,getattr(self,k))
Config.parse = parse
opt = Config()
|
985,676 | c3b875b92e082d8c29cd17641023c983934d553a | #!/usr/bin/python
# -*- coding: utf-8
from setuptools import setup, find_packages
pkg_vars = {}
setup(
name='ddns',
author='Selçuk Karakayalı',
author_email='skarakayali@gmail.com',
url='https://github.com/karakays/ddns',
description='Dynamically update DNS records of the enclosing environment',
install_requires=['requests>=2.21.0'],
license='MIT',
packages=find_packages(),
python_requires='>=3.6',
keywords=['dns', 'dynamic dns', 'ip address'],
long_description=open('README.rst').read(),
entry_points={
'console_scripts': [ 'ddns = ddns.__main__:main' ]
},
classifiers=[ "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License"
]
)
|
985,677 | a8c24623fbd346260abac82cf665d641977d8067 | from django.contrib.auth.models import User
from django.db import models
class Job(models.Model):
title = models.CharField(max_length=255)
short_description = models.TextField()
long_description = models.TextField(blank=True, null=True)
created_by = models.ForeignKey(User, related_name='jobs', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
changed_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Application(models.Model):
job = models.ForeignKey(Job, related_name='applications', on_delete=models.CASCADE)
content = models.TextField()
experience = models.TextField()
created_by = models.ForeignKey(User, related_name='applications', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True) |
985,678 | 935f90addbb5aaebcf769011b6f51b104b6e02da | from peewee import *
db = SqliteDatabase('sqlite3.db')
class Question(Model):
question_text = CharField()
#pub_date = DateTimeField("date published")
class Meta:
database = db
def __str__(self):
return self.question_text
class Choice(Model):
question = ForeignKeyField(Question, backref='choices')
choice_text = CharField()
votes = IntegerField(default=0)
class Meta:
database = db
def __str__(self):
return self.choice_text
def create_tables():
with db:
db.create_tables([Question, Choice]) |
985,679 | 3703565199a347c0c75dc917829649c090d9ad0d | from django.shortcuts import render,HttpResponse,redirect
from .models import Blog,Comment
from django.contrib import messages
from django.views import View
# Create your views here.
class home(View):
def get(self,request):
posts=Blog.objects.all()
content={'allposts':posts}
return render(request,'blog/index.html',content)
class blogger(View):
def get(self,request,slug):
post=Blog.objects.filter(Blog_slug=slug).first()
allcomments=Comment.objects.filter(post=post,parent=None)
allreplies=Comment.objects.filter(post=post).exclude(parent=None)
repdict={}
for reply in allreplies:
if reply.parent.c_id not in repdict.keys():
repdict[reply.parent.c_id]=[reply]
else:
repdict[reply.parent.c_id].append(reply)
content={'fullpost':post,'comments':allcomments,'allreply':repdict}
# print(f'content is : {content}')
# print(f'repliy dic is : {repdict}')
# for key,value in repdict:
# print(f'all reply is : {key},{value}')
return render(request,'blog/blogger.html',content)
class postcomment(View):
def post(self,request):
content=request.POST.get('comment')
post_id=request.POST.get('postid')
comment_id=request.POST.get('commentid')
# print(content,post_id)
User=request.user
post=Blog.objects.get(Bid=post_id)
if comment_id =="":
comment=Comment.objects.create(c_text=content,post=post,user=User)
messages.success(request,"Comment added successfully")
else:
parentsno=Comment.objects.get(c_id=comment_id)
comment=Comment.objects.create(c_text=content,post=post,user=User,parent=parentsno)
messages.success(request,"Reply added successfully")
comment.save()
return redirect(f"blogger/{post.Blog_slug}") |
985,680 | b0d57a2ba0422497cead0b2ed2a527b2d9811b3a | from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import Http404
from serializers import RentSerializer
from permissions import IsOwnerReadOnly
from rest_framework import viewsets
from models import Equipment
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework import status
class RentList(APIView):
def get(self, request, format=None):
equips = Equipment.objects.all()
serializer = RentSerializer(equips, many=True)
return Response(serializer.data)
def post(self, request, format=None):
serializer = RentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class RentDetail(APIView):
def get_object(self, pk):
try:
return Equipment.objects.get(pk=pk)
except Equipment.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
equipment = self.get_object(pk)
serializer = RentSerializer(equipment)
return Response(serializer.data)
def put(self, request, pk, format=None):
equipment = self.get_object(pk)
serializer = RentSerializer(equipment, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, format=None):
equipment = self.get_object(pk)
equipment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
985,681 | 1cefe6e48c85783a43ebd4a8170831e6e24c3ed9 | # https://www.youtube.com/watch?v=BuIsI-YHzj8
# http://deeplearning.net/tutorial/gettingstarted.html
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
# GPU acceleration
# theano.config.device = "gpu"
"""
>>> np.linspace(-1, 1, 11)
array([-1. , -0.8, -0.6, -0.4, -0.2, 0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
trainX = np.asarray(np.linspace(-1, 1, 101))
"""
>>> np.random.randn(3)
array([ 0.02066501, 0.76675396, 0.62194306])
"""
trainY = 2 * trainX + np.random.randn(trainX.size)
print "trainX is ", trainX
print "trainY is ", trainY
# Initialize symbolic variables
X = T.scalar("X")
Y = T.scalar("Y")
# Define symbolic model
def model(X, w):
return X * w
# Initialize model parameter W
# hybrid variable -> need data associated with them
W = theano.shared(np.asarray([0.], dtype=theano.config.floatX), "W")
print "W =", W.get_value()
y_predicted = model(X, W)
print "y_predicted = X*W =", y_predicted
##
# plot the training set
# "bo" means blue circles representing each point
# "r" means read line
# x axis from -1.5 to +1.5
# y_predicted axis from -6 to +6
# ---
plt.plot(trainX, trainY, "bo", trainX, model(trainX, W.eval()), "r")
plt.axis([-1.5, 1.5, -6, 6])
plt.show()
##
# Define symbolic loss
cost = T.mean(T.sqr(y_predicted - Y)) # cost = average of sqrt(prediction - target)
print "cost function = mean(sqrt(y_predicted-Y)) =", cost
# Determine partial derivative of cost w.r.t. parameter W
gradient = T.grad(cost=cost, wrt=W)
print "gradient = partial derivative of cost w.r.t. W =", gradient
# Define how to update parameter W based on gradient
learning_rate = 0.01
updates = [[W, W - gradient * learning_rate]]
print "update = [[W, W-gradient*0.01]] =", updates
# Define theano function that compiles symbolic expressions
train = theano.function(
inputs=[X, Y],
outputs=cost,
updates=updates,
allow_input_downcast=True,
mode="DebugMode"
)
# Iterate through data 100 times, updating parameter W after each iteration
for i in range(10):
for x, y_target in zip(trainX, trainY):
# print "train(x,y) with x=", x, " and y=", y_target
train(x, y_target)
print "iteration", i, ": W =", W.get_value()
##
# plot the trained set
# "bo" means blue circles representing each point
# "r" means read line
# x axis from -1.5 to +1.5
# y_predicted axis from -6 to +6
# ---
plt.plot(trainX, trainY, "bo", trainX, model(trainX, W.eval()), "r")
##
plt.axis([-1.5, 1.5, -6, 6])
plt.show()
print "W =", W.get_value() # something around 2
|
985,682 | 9cb110c267e4f1c9c221cd48a2e6eba998e76053 | # SConscript for lib subdirectory
# Import envrionment
Import('env')
# Now get list of .cpp files
src_files = Glob('*.cpp')
objs = env.Object(src_files)
yaml_objs = SConscript('yaml/SConscript')
bson_objs = SConscript('bson/SConscript')
if env['tests'] == True:
gtest_libs = SConscript('gtest/SConscript')
else:
gtest_libs = []
# specific list of objects for dstbuild
dst_objs = env.Object(Split("superstl.cpp statelist.cpp"))
ret_objs = [objs + bson_objs + yaml_objs, dst_objs]
# Add gtest library
ret_objs[0] += gtest_libs
Return('ret_objs')
|
985,683 | 1b0a1ebd9fc8c558e0d79588b97e0e1ac72220e2 | def getchr(plus,txt,it=97):
if(txt.isupper()):it=65
tmp = ord(txt) - it # 0-25 input
tmp = (tmp+plus)%26 # 0-25 ans
return chr(tmp + it)
plus = int(input())
txt = input()
print(getchr(plus,txt)) |
985,684 | 9f6db5d95956f526bb743b5c9b64aefc918fcca3 | from django import forms
from django.forms import ModelForm,Select
from .models import Messages,UserInfo,DoctorMessageMapping
import drchronoAPI
TIME_CHOICES=[]
for h in xrange(24):
for m in ['00','30']:
t='{:02d}:{}:00'.format(h,m)
TIME_CHOICES.append((t,t[:-3]))
class UserInfoForm(ModelForm):
msg_subject=forms.CharField(max_length=200)
msg_text=forms.CharField(max_length=1000,widget=forms.Textarea())
#def __init__(self,*args,**kwargs):
#self.user=kwargs.pop('user')
#super(UserInfoForm,self).__init__(*args,**kwargs)
#self.fields['message'].choices=drchronoAPI.get_user_message_name(self.user)
class Meta:
model=UserInfo
fields=['is_active','send_time']
widgets={'send_time':forms.Select(choices=TIME_CHOICES)}
def is_valid(self):
valid=super(UserInfoForm,self).is_valid()
if not valid:
data=self.cleaned_data
if data.get('is_active')==False:
valid=True
return valid |
985,685 | a3928a534a678cd8c8304ed6de5bf0abc04e4c5e | import math
import numpy.linalg as LN
import numpy as np
ep = lambda p,b: p*b
is_close = lambda old, new, e: LN.norm(old-new) <= e
def meanshift_segmentation(im, features, bandwidth):
cluster_means = []
points = range(features.shape[0])
np.random.shuffle(points)
points = set(points)
while len(points) > 0:
points, cluster = meanshift(features, points, bandwidth)
meanshift_merge(cluster_means, cluster, bandwidth)
im_cluster = np.zeros(features.shape[0], dtype=np.int)
fvecs = np.asarray(cluster_means)
for f_idx in xrange(features.shape[0]):
differ = fvecs-features[f_idx]
diff_norm = LN.norm(differ, axis=1)
closest_cluster = diff_norm.argmin()
im_cluster[f_idx] = closest_cluster
# print "No. of clusters:", len(cluster_means), "at bandwidth", bandwidth
im_cluster_assign = np.reshape(im_cluster, im.shape[0:-1])
return im_cluster_assign
def meanshift(features, pts, bandwidth):
pt = pts.pop()
mean_vec = features[pt]
do_move = True
cluster = set([pt])
while do_move and len(pts)>0:
in_pts = np.asarray(list(pts))
fvecs = features[in_pts]
differ = fvecs-mean_vec
diff_norm = LN.norm(differ, axis=1)
diff_is_close = np.where(diff_norm<=bandwidth)
is_close_pts = in_pts[diff_is_close]
if is_close_pts.shape[0] > 0:
cluster = set(is_close_pts)
cl_in_pts = np.asarray(list(cluster))
is_close_feat = features[cl_in_pts]
is_close_feat_mean = np.mean(is_close_feat, axis=0)
do_move = not is_close(mean_vec, is_close_feat_mean, ep(.01, bandwidth))
mean_vec = is_close_feat_mean
pts -= set(is_close_pts)
else:
break
return pts, mean_vec
def meanshift_merge(cluster_means, new_cluster, bandwidth):
found = False
for c in cluster_means:
if is_close(new_cluster, c, ep(.5, bandwidth)):
found = True
break
if not found:
cluster_means.append(new_cluster)
def kmeans_segmentation(im, features, num_clusters):
f_indx = lambda h,w: (h*im.shape[1])+w
mean = features[np.random.choice(features.shape[0], num_clusters)]
ep = 0.05
mean_change = np.inf
im_cluster = None
while mean_change > ep:
new_mean, im_cluster = centroid_mean(mean, features, im.shape)
mean_change = np.linalg.norm(mean-new_mean)
mean = new_mean
im_assign = np.reshape(im_cluster, im.shape[0:-1])
return im_assign
def centroid_mean(mean, features, im_shape):
rsums = np.zeros(mean.shape)
rcounts = np.zeros(mean.shape[0], dtype=np.int)
im_cluster = np.zeros(features.shape[0], dtype=np.int)
for f_idx in range(features.shape[0]):
closest_cluster = None
dist = np.inf
for i in range(mean.shape[0]):
d = np.linalg.norm(features[f_idx]-mean[i,:])
if d < dist:
closest_cluster = i
dist = d
rsums[closest_cluster] = rsums[closest_cluster] + features[f_idx]
rcounts[closest_cluster] = rcounts[closest_cluster] + 1
im_cluster[f_idx] = closest_cluster
new_mean = rsums.transpose()/rcounts
new_mean = new_mean.transpose()
return new_mean, im_cluster
|
985,686 | a7a526182e964cb87951911494c13aed1703fc70 | #!/usr/bin/env python3
#__*__coding: utf8__*__
import requests
url = 'http://www.goubanjia.com'
header = {
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Referer': 'http://www.baidu.com',
}
r = requests.get(url, headers=header)
#print(r.text)
print(r.json()) |
985,687 | f909fde49b9dd5a51693232a4eefef30c79f6aaf | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pupil', '0004_auto_20160214_1454'),
]
operations = [
migrations.DeleteModel(name="User_info"),
migrations.DeleteModel(name="User_data"),
]
|
985,688 | 214d4fac7534d4c2b3ca09f8c7123c2ffd7d53e9 | #Tp Relations
class Ville:
"""Ville"""
def __init__(self,nomVille):
self._nomVille = nomVille
self._batiments = []
@property
def nomVille(self):
return self._nomVille
@nomVille.setter
def nomVille(self,v):
self._nomVille = v
#Methodes de gestion de la liste des bâtiments
def get_batiments(self):
return self._batiments
def add_batiments(self,b):
if b not in self._batiments:
self._batiments.append(b)
def remove_batiments(self,b):
if b in self._batiments:
self._batiments.remove(b)
#methode demandée
def liste_villes(self):
return len(self._batiments)
class Batiment:
"""Batiments"""
def __init__(self,nomBat):
self._nomBat = nomBat
self._employes = []
@property
def nomBat(self):
return self._nomBat
@nomBat.setter
def nomBat(self,b):
self._nomBat = b
#Methodes de gestion des employes
def get_employes(self):
return self._employes
def add_employes(self,e):
if e not in self._employes:
self._employes.append(e)
def remove_employes(self,e):
if e in self._employes:
self._employes.remove(e)
class Employe:
"""Employes"""
def __init__(self,nom,prenom):
self._nom = nom
self._prenom = prenom
@property
def nom(self):
return self._nom
@nom.setter
def nomBat(self,n):
self._nom = n
@property
def prenom(self):
return self._prenom
@prenom.setter
def prenom(self,p):
self._prenom = p
class Entreprise:
"""Entreprises"""
def __init__(self,nomEntr):
self._nomEntr = nomEntr
self._batiments =[]
self._villes =[]
@property
def nomEntr(self):
return self._nomEntr
@nomEntr.setter
def nomEntr(self,e):
self._nomEntr = e
#Methodes de gestion de la liste des bâtiments
def get_batiments(self):
return self._batiments
def add_batiments(self,b):
if b not in self._batiments:
self._batiments.append(b)
def remove_batiments(self,b):
if b in self._batiments:
self._batiments.remove(b)
#Methodes de gestion de la liste des villes
def get_villes(self):
return self._villes
def add_villes(self,v):
if v not in self._villes:
self._villes.append(v)
def remove_villes(self,v):
if v in self._villes:
self._villes.remove(v)
|
985,689 | 0a3b0d5a842aa0bef2388bf309adb6369513fff7 | import pandas as pd
import plotly.express as px
df = pd.read_csv("Size of TV,_Average time spent watching TV in a week (hours).csv")
fig = px.scatter(df, x="Size of TV", y="\tAverage time spent watching TV in a week (hours)")
fig.show() |
985,690 | 1f2dce411ccdff82d8d431a1881c314f8dbfad1a | ################################################################
## Simple Calculator App (Jython) ##
## Author: Nick Belli ##
## Date: 04/18/2018 ##
## Uncomment popup boxes for testing and training purposes ##
## Docs are delimited with "## ", commented code is "#" ##
################################################################
def printMenu():
userSelection = input("Please select an operation: \n" +
"1: Add \n" +
"2: Subtract \n" +
"3: Multiply \n" +
"4: Divide \n" +
"0: Exit " )
return userSelection
def addTwoNumbers(myNum1, myNum2):
return int(myNum1) + int(myNum2)
def subtractTwoNumbers(myNum1, myNum2):
return int(myNum1) - int(myNum2)
def multiplyTwoNumbers(myNum1, myNum2):
return int(myNum1) * int(myNum2)
def divideTwoNumbers(myNum1, myNum2):
return int(myNum1) / int(myNum2)
def returnMessage():
return 'My result is: ' + str(myResult)
## Welcome users to the application ##
popup("Welcome to the Calculator!")
## Begin the program by setting the sentinel value userSelection ##
## userSelection has to be instantiated to something in order to begin the loop ##
userSelection = ''
while userSelection != "0":
## Prompt user for variables and instantiate the result to null ##
myNum1 = input("Please enter your first number: ")
myNum2 = input("Please enter your second number: ")
myResult = ''
## Popup boxes to unit test acceptance of input ##
#popup('My first number is: ' + myNum1)
#popup('My second number is: ' + myNum2)
## Show the variables being stored as strings ##
## Strings concatenate expressions instead of using arithmetic expressions ##
#myResult = myNum1 + myNum2
## Show the conversion to integer for use of arithmetic expressions ##
#myResult = int(myNum1) + int(myNum2)
## Popup a menu and ask the user to select an operation ##
userSelection = printMenu()
## Unit test to show that a userSelection exists ##
#popup(userSelection)
if userSelection == "1":
myResult = addTwoNumbers(myNum1, myNum2)
popup(returnMessage())
elif userSelection == "2":
myResult = subtractTwoNumbers(myNum1, myNum2)
popup('My result is: ' + str(myResult))
elif userSelection == "3":
myResult = multiplyTwoNumbers(myNum1, myNum2)
popup('My result is: ' + str(myResult))
elif userSelection == "4":
if int(myNum2) == 0:
popup("You can't divide by Zero!")
else:
myResult = divideTwoNumbers(myNum1, myNum2)
popup('My result is: ' + str(myResult))
elif userSelection == "0":
popup("Thank you for using the simple calculator")
else:
## Catch everything that is not a provided option and loop again ##
popup("Please select a valid option")
|
985,691 | 4182f473f8f52f2cdd61a4757d6c08da5a998d92 | from client import CLIENT
import argparse
from threading import Thread
import datetime
from concurrent.futures import ThreadPoolExecutor
def argv():
# Create parser
arg_parser = argparse.ArgumentParser()
# Add arguments
arg_parser.add_argument('-t', '--test', help='Specify which test to run')
arg_parser.add_argument('-p', '--port', help='Specify port to connect to')
# Return parsed arguments
return arg_parser.parse_args()
def test1(port):
client1= CLIENT(b'E11000000000DA7A0000000501020304050B1E00000000',port)
client1.send()
def test2(port):
for test in range (0,50):
print('test ', test)
client1= CLIENT(b'E11000000000DA7A0000000501020304050B1E00000000',port)
client1.send()
def test4(port):
msg=b'E11000000000DA7A000003E8'
for i in range(0,500):
msg+=b'01'
msg+=b'0B1E00000000'
print('data length: ',len(msg))
client1= CLIENT(msg,port)
client1.send()
def main():
args = argv()
if args.test == 'test1':
#test1 : test simple client connecting and sending binary string
test1(args.port)
if args.test == 'test2':
#test2 : test a big number of connections
test2(args.port)
if args.test == 'test3':
#test3 10 simultanious connections
executors_list = []
# 5 simultanious connections are accepted and the rest are handled as
# empty slot in the queue are available.
with ThreadPoolExecutor(max_workers=5) as executor:
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
executors_list.append(executor.submit(test1, args.port))
if args.test == 'test4':
#test2 : test sending big data
test4(args.port)
if __name__ == "__main__":
main()
|
985,692 | d2eff1a2eb951446d26aee40a608fc38c123da20 | from bar import foo
def f():
foo.boring_function()
foo.interesting_function()
|
985,693 | 9dd14ca34a57355095322387974dad65c127b738 | from math import pi, sqrt
class Triangulo:
def __init__(self, opcion: str):
self.opcion = opcion.lower() #Convierte a mminusculas.
def Program(self):
try:
if self.opcion == "perimetro":
perimetro = self.__HallarPerimetro(float(input("Lado 1: ")), float(input("Lado 2: ")), float(input("Lado 3: ")))
print(f"El resultado del {self.opcion} es: {perimetro}")
elif self.opcion == "area":
area = self.__HallarArea(float(input("Escribe la base: ")), float(input("Escribe la altura: ")))
print(f"El resultado del {self.opcion} es: {area}")
elif self.opcion == "hipotenusa":
hipotenusa = self.__HallarHipotenusa(int(input("Cateto (b): ")), int(input("Cateto (c): ")))
print(f"El resultado de la {self.opcion} es: {hipotenusa}")
elif self.opcion == "salir":
print(f"El programa se cerrará.")
else:
print(f"No escribió una opción correcta.")
except SyntaxError:
print(SyntaxError)
def __HallarPerimetro(self, lado1, lado2, lado3):
resultadoPerimetro = lado1 + lado2 + lado3
return resultadoPerimetro
def __HallarArea(self, base, altura):
resultadoArea = (base * altura) / 2
return resultadoArea
def __HallarHipotenusa(self, cat1, cat2):
hipotenusa = cat1**2 + cat2**2
resultadoHipotenusa = sqrt(hipotenusa)
return resultadoHipotenusa
triangulo = Triangulo(input("Escribe una opción (1. Perimetro, 2. Area, 3. Hipotenusa, 4. Salir): "))
triangulo.Program() |
985,694 | 69f9ae86070822b356cad95b0eaa1aef8d1e8593 | #!/usr/bin/env python
from setuptools import setup, find_packages
try:
from pyqt_distutils.build_ui import build_ui
cmdclass = {'build_ui': build_ui}
except ImportError:
cmdclass = {}
setup(
name='foo',
version='0.1',
packages=find_packages(),
license='MIT',
author='Colin Duquesnoy',
author_email='colin.duquesnoy@gmail.com',
description='Example of use of pyqt',
cmdclass=cmdclass,
) |
985,695 | efe6f428be1505287aa4a1139c6cf39be325aa26 | # https://www.geeksforgeeks.org/finding-all-subsets-of-a-given-set-in-java/
def subsets_of_set_bits(input_set):
list_set = list(input_set)
num_elements = 2**len(list_set)
num_bits = len("{0:b}".format(num_elements-1))
all_subsets = []
for i in range(0, num_elements):
binary = "{0:b}".format(i).zfill(num_bits)
subset = []
for idx, c in enumerate(binary):
if c == '1':
subset.append(list_set[idx])
all_subsets.append(subset)
return all_subsets
# https://www.youtube.com/watch?v=RnlHPR0lyOE
def subsets_of_set_backtracking(input_set):
# Backtracking
def generate_bitstrings(i, B, bitstrings, num_elements):
if i == num_elements:
bitstrings.append(B.copy())
else:
B[i] = 0
generate_bitstrings(i+1, B, bitstrings, num_elements)
B[i] = 1
generate_bitstrings(i+1, B, bitstrings, num_elements)
list_set = list(input_set)
num_elements = len(list_set)
bitstrings = []
all_subsets = []
B = [0]*num_elements
generate_bitstrings(0, B, bitstrings, num_elements)
for bitstring in bitstrings:
temp = []
for idx, num in enumerate(bitstring):
if num == 1:
temp.append(list_set[idx])
all_subsets.append(temp)
return all_subsets
input = set([1, 2, 3, 4])
print(subsets_of_set_backtracking(input)) |
985,696 | 7219de952e40f642169621728b86111abfa7c7a4 |
from soz_analizi.shekilci import *
from soz_analizi.shekilciler import *
from operator import itemgetter
#from soz_analizi.luget.rama_doldur import efsane
dey_isim=['is.']
dey_feil=['f.']
dey_evez=['ev.']
dey_sif=['sif.']
dey_say=['say.']
dey_zerf = ['z.','zer.']
hallanan=['Isim']
mensub=['Isim']
import os
cala={'isim':'Isim','feil':'Feil','sifet':'Sifet','say':'Say','evezlik':'Evezlik','zerf':'Zerf',
'Isim':'Isim','Feil':'Feil','Sifet':'Sifet','Say':'Say','Evezlik':'Evezlik','Zerf':'Zerf'}
class sz:
def __init__(self,kopya):
if kopya.__class__.__name__=='sz':
self.kok=kopya.kok
self.ozu=kopya.ozu
self.hecalar=kopya.hecalar
self.saitler=kopya.saitler
self.nitq=kopya.nitq
self.shekilciler=kopya.shekilciler+[]
self.sonra=[]
else:
if kopya[-3:] in ('maq', 'mək'):
self.kok=kopya[:-3]
else:
self.kok=kopya
self.ozu=self.kok
self.shekilciler=[]
self.hecalar=[]
self.saitler=[]
self=sz.istisnalar(self)
def nitq(self,n):
self.nitq=cala[n]
self=self.istisnalar()
def yaz(self,shekilci):
if len(self.shekilciler)>0:
if self.shekilciler[len(self.shekilciler)-1] in sait and shekilci[-1:] in sait:#burda sehf var diesen
self.shekilciler[len(self.shekilciler)-1]=self.shekilciler[len(self.shekilciler)-1][:-1]
self.shekilciler[len(self.shekilciler)-1].secilmis=self.shekilciler[len(self.shekilciler)-1].secilmis[:-1]
self.ozu=shekilci.de(self)
self.shekilciler.append(shekilci)
return self
def hecaya_bol(self):
self.hecalar=[]
self.saitler=[]
if(self.ozu==''):
self.hecalar=[]
return
heca=''
for i in range(0,len(self.ozu)):
if self.ozu[i] in sait:
self.saitler.append(i)
if(len(self.saitler)<1):
return self.ozu
self.hecalar.append(self.ozu[:self.saitler[0]+1])
for i in range(1,len(self.saitler)):
self.hecalar.append(self.ozu[self.saitler[i-1]+1:self.saitler[i]+1] )
if self.saitler[i]-self.saitler[i-1]>2:
self.hecalar[i-1]+=self.ozu[self.saitler[i-1]+1]
self.hecalar[i]=self.hecalar[i][1:]
self.hecalar[len(self.hecalar)-1]+=self.ozu[self.saitler[len(self.saitler)-1]+1:]
cvc=''
for he in self.hecalar:
cvc+=he+'-'
return cvc[:-1]
def istisnalar(self):
if self.kok=='camaat' or self.kok=='əhali':
self.sonra=[i_men,i_hal,i_sex]
return self
if(self.nitq=='Isim'):
self.sonra=n_isim
return self
if(self.nitq=='Feil'):
self.sonra=n_feil
return self
if(self.nitq=='Sifet'):
self.sonra=n_sifet
return self
if(self.nitq=='Say'):
self.sonra=n_say
return self
if(self.nitq=='Evezlik'):
self.sonra=n_evezlik
return self
if(self.nitq=='Zerf'):
self.sonra=n_zerf
return self
return self
def yarat(self):
cvb=[]
#fazilando
#ilk shekilcini elave edir
for qrup in self.sonra:
for she in qrup:
y_soz=sz(self)
cvb.append(y_soz.yaz(she))
for sooz in cvb:
#qru son wekilciye elave olunacag shekilci keteqoryasidi
#meselem eger sonuncu sekilci mensubietdisa onda qru hal,sexs olur
for qru in sooz.shekilciler[-1].sonra:
#she qru qrupunda olan shekilcilerdi
#cem ucun lar/ler
#filtirleri bura qoyirsan,
#y_soz.nitq,she.adi, ve y_soz.shekilcilerden istifade edeceksen cox gumanki
for she in qru:
y_soz=sz(sooz)
#filtr buraligdi if(flanshey): continue
#qinama inet yoxdu bilmirem bawmi neynen qatim comment yaziram
cvb.append(y_soz.yaz(she))
return cvb
def adliq(self):
if self.nitq not in hallanan:
return False
for s in self.shekilciler:
if s.adi in (hal2,hal3,hal4,hal5,hal6):
return False
return True
def yiyelik(self):
if self.nitq not in hallanan:
return False
for s in self.shekilciler:
if s.adi in (hal3,hal4,hal5,hal6):
return False
return True
def yonluk(self):
if self.nitq not in hallanan:
return False
for s in self.shekilciler:
if s.adi in (hal3):
return True
return False
def tesirlik(self):
if self.nitq not in hallanan:
return False
for s in self.shekilciler:
if s.adi in (hal4):
return True
return False
def yerlik(self):
if self.nitq not in hallanan:
return False
for s in self.shekilciler:
if s.adi in (hal5):
return True
return False
def cixisliq(self):
if self.nitq not in hallanan:
return False
for s in self.shekilciler:
if s.adi in (hal6):
return True
return False
def mesubiyyeti(self,req):
if self.nitq not in mensub:
return False
if req==0:
for s in self.shekilciler:
if s.adi in (mens1,mens2,mens3,mens4,mens5):
return True
if req==1:
for s in self.shekilciler:
if s.adi in (mens1):
return True
if req==2:
for s in self.shekilciler:
if s.adi in (mens2):
return True
if req==3:
for s in self.shekilciler:
if s.adi in (mens3):
return True
if req==4:
for s in self.shekilciler:
if s.adi in (mens4):
return True
if req==5:
for s in self.shekilciler:
if s.adi in (mens5):
return True
return False
#########################################################
class lug:
dic={}
def add(self,ad):
if len(lug.dic.keys())<5:
# pazz=os.path.join(os.getcwd(),'home')
# pazz=os.path.join(pazz,'c')
# pazz=os.path.join(pazz,'cu79127')
# pazz=os.path.join(pazz,'Dilciaz')
# pazz=os.path.join(pazz,'public_html')
# pazz=os.path.join(pazz,'Dilci')
# pazz=os.path.join(pazz,'backend')
pazz=os.path.join(os.getcwd(),'soz_analizi')
# pazz=os.path.join(pazz,'soz_analizi')
pazz=os.path.join(pazz,'luget')
pazz=os.path.join(pazz,'luget.txt')
file = open(pazz,'r',encoding='utf-8')
sozder=file.readlines()
for k in sozder:
zaz=k.split('\t')[0]
if zaz[:3] in lug.dic.keys():
lug.dic[zaz[:3]].append(k)
else:
lug.dic[zaz[:3]]=[k]
return self.de(ad)
if ad in lug.dic.keys():
return lug.dic[ad]
else:
lug.dic[ad]=[]
return []
def de(self,ad):
if ad in lug.dic.keys():
#print(len(lug.dic.keys()))
return lug.dic[ad]
return self.add(ad)
#########################################################
def soz_kokudu(kok,soz):
if(kok==soz):
return True
if(kok[-3:]=="MƏK" or kok[-3:]=="MAQ"):
kok=kok[:-3]
if(len(kok)>len(soz)):
return False
if(len(kok)==len(soz)and kok!=soz):
return False
i=0
err=0
for her in kok:
if(i>=len(kok)):
break
if (her!=soz[i]):
if (her=='i'):
i+=1
else:
err+=1
if (err==2):
return False
i+=1
return True
def firran(xam,soz,cvb):
mumkundu=[]
if(xam.ozu==soz.ozu):
sj=''
for sheki in xam.shekilciler:
sj+=sheki.adi+","
#print("-soz tapildi shekilciler: "+sj)
cvb.append(xam)
return cvb
if(xam.nitq=='Isim'):
soz=sz(soz)
secilmisNitq=n_isim
elif(xam.nitq=='Feil'):
soz=sz(soz)
secilmisNitq=n_feil
elif(xam.nitq=='Sifet'):
soz=sz(soz)
secilmisNitq=n_sifet
elif(xam.nitq=='Say'):
soz=sz(soz)
secilmisNitq=n_say
elif(xam.nitq=='Evezlik'):
soz=sz(soz)
secilmisNitq=n_evezlik
elif(xam.nitq=='Zerf'):
soz=sz(soz)
secilmisNitq=n_zerf
else:
secilmisNitq=[]
print("alinmadi")
return
if(len(xam.shekilciler)==0):
for qrup in secilmisNitq:
for she in qrup:
y_soz=sz(xam)
y_soz.yaz(she)
if(soz_kokudu(y_soz.ozu,soz.ozu)):
#print('alinir diesen : '+y_soz.ozu+"->"+soz.ozu)
cvb=[]+firran(y_soz,soz,cvb)
else:
for qru in xam.shekilciler[-1].sonra:
for she in qru:
y_soz=sz(xam)
y_soz.yaz(she)
if(soz_kokudu(y_soz.ozu,soz.ozu)):
#print('alinir diesen : '+y_soz.ozu+"->"+soz.ozu)
cvb=[]+firran(y_soz,soz,cvb)
return cvb
def asaqi(soz):
for k in range(0,len(soz)):
if soz[k]=='I':
soz=soz[0:k]+'ı'+soz[k+1:]
if soz[k]=='İ':
soz=soz[0:k]+'i'+soz[k+1:]
if soz[k]=='Ə':
soz=soz[0:k]+'ə'+soz[k+1:]
soz=soz.lower()
return (soz)
def yuxari(soz):
for k in range(0,len(soz)):
if soz[k]=='ı':
soz=soz[0:k]+'I'+soz[k+1:]
if soz[k]=='i':
soz=soz[0:k]+'İ'+soz[k+1:]
if soz[k]=='ə':
soz=soz[0:k]+'Ə'+soz[k+1:]
soz=soz.upper()
return (soz)
def duzelt(soz):
cvb=''
soz=asaqi(soz)
for i in range(0,len(soz)):
if soz[i] in ('0','1','2','3','4','5','6','7','8','9','q','ü','e','r','t','y','u','i','o','p','ö','ğ','a','s','d','f','g','h','j','k','l','ı','ə','z','x','c','v','b','n','m','ç','ş'):
cvb=cvb+soz[i]
return cvb
lll=''
def cehd_ele(ad,soz):
suret=0
'''
'''
cvb=[]
#try:
if(suret==0):
koko=lug()
sozder=koko.de(ad)
for s in sozder:
k=s.split('\t')
if(soz_kokudu(k[0],soz) and len(k[1])>1):
if(k[1][:-2] in dey_feil):
if(k[0][-2:] not in ['MA','MƏ']):
cvb.append([k[0],k[1][:-1].split(';')[:-1]])#cvb.append([k[0],k[1][:-1].split(';')]) kohne versiya
else:
cvb.append([k[0],k[1][:-1].split(';')[:-1]])
else:
global lll
if(lll==''):
lll=efsane()
sozder=lll.de_yaver(ad)
for s in sozder:
k=s.split('\t')
#print(k)
if(soz_kokudu(k[0],soz)):
cvb.append([k[0],k[1][:-1].split(';')])
return cvb
#except:
# return []
def sirala(sira):
for i in range(0,len(sira)):
for j in range(0,len(sira)-i-1):
if(len(sira[j][0])<len(sira[j+1][0])):
temp=sira[j]
sira[j]=sira[j+1]
sira[j+1]=temp
return sira
def nitqi_tap(soz):
soz=yuxari(soz)
cvb=[]
for h in soz:
if(h in ('0','1','2','3','4','5','6','7','8','9')):
return []
if(len(soz)>1):
a=cehd_ele(soz[:2],soz)
cvb+=a
if(len(soz)>2):
a=cehd_ele(soz[:3],soz)
cvb+=a
if(soz[2]!='M'):
a=cehd_ele(soz[:2]+'M',soz)
cvb+=a
a=cehd_ele(soz[:1],soz)
cvb+=a
cvb=sirala(cvb)
return cvb[:]
def deqiq_olsun(soz):
for h in soz:
if(h in ('0','1','2','3','4','5','6','7','8','9')):
return []
cvb=[]
mumkun=nitqi_tap(soz)
for cut in mumkun:
#print(cut)
s=''
for n in cut[1]:
s=sz(asaqi(cut[0]))
if(n in dey_isim):
s.nitq('Isim')
kk=sz(soz)
kk.nitq('Isim')
cvb=firran(s,kk,cvb)
elif(n in dey_feil):
s.nitq('Feil')
kk=sz(soz)
kk.nitq('Feil')
cvb=firran(s,kk,cvb)
elif(n in dey_sif):
s.nitq('Sifet')
kk=sz(soz)
kk.nitq('Sifet')
cvb=firran(s,kk,cvb)
elif(n in dey_say):
s.nitq('Say')
kk=sz(soz)
kk.nitq('Say')
cvb=firran(s,kk,cvb)
elif(n in dey_evez):
s.nitq('Evezlik')
kk=sz(soz)
kk.nitq('Evezlik')
cvb=firran(s,kk,cvb)
elif(n in dey_zerf):
s.nitq('Zerf')
kk=sz(soz)
kk.nitq('Zerf')
cvb=firran(s,kk,cvb)
#print(296)
#for ca in cvb:
# print(ca.kok)
# print(ca.nitq)
else:
#bu hisse muveqqetidir yeni nitq hisseleri elave olana kimi
s.nitq('Isim')
kk=sz(soz)
kk.nitq('Isim')
cvb=firran(s,kk,cvb)
#bura kimi sil ve continue leri yigisdir
return cvb
|
985,697 | f1dafc72224edd41f3360c73e23e53a89958e3dc | import socket, sys, threading
import json
import signal
import time
import os
def exit_gracefully(signum, frame):
# restore the original signal handler as otherwise evil things will happen
# in raw_input when CTRL+C is pressed, and our signal handler is not re-entrant
signal.signal(signal.SIGINT, original_sigint)
try:
if input("\nReally quit? (y/n)> ").lower().startswith('y'):
print("yes")
server.exit()
sys.exit(0)
except KeyboardInterrupt:
print("Ok ok, quitting")
sys.exit(1)
# restore the exit gracefully handler here
signal.signal(signal.SIGINT, exit_gracefully)
def run_program():
while True:
time.sleep(1)
print("a")
if __name__ == '__main__':
sys.tracebacklimit = 0
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, exit_gracefully)
print("In my new app")
os.system("start cmd /c py router_obj.py")
while True:
msg = input("Create server(s)/client(c):")
if msg == 's':
os.system("start cmd /c py server_obj.py")
elif msg == 'c':
os.system("start cmd /c py client_obj.py")
elif msg == 'q':
break
else:
print("invalid input")
print("Exiting program")
|
985,698 | d12b98cd70346b50fb5d10a8026e54a60d638c9d | import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
import pickle
from sklearn.decomposition import PCA
from mlc.utility.FeatureUtil import FeatureVector
from sklearn.preprocessing import LabelEncoder
from csv import DictReader
"""
original
"""
class FeatureVectorData(FeatureVector):
def __init__(self):
self.name = "feature_vector"
self.list = None
def convertFileToFeature(self):
print ("train reading")
df = pd.read_csv("../data/train.csv")
encoder = LabelEncoder()
labels = df.target
labels = encoder.fit_transform(labels).astype(np.int32)
df = df.drop('target',1)
df = df.drop('ID',1)
# Junk cols - Some feature engineering needed here
df = df.ix[:, 520:660].fillna(-1)
train = df.values.copy()
print ("test reading")
test_df = pd.read_csv("../data/test.csv")
test_df = test_df.drop('ID',1)
# Junk cols - Some feature engineering needed here
test_df = test_df.ix[:, 520:660].fillna(-1)
test = test_df.values.copy()
return np.array(train).astype(np.float32),np.array(labels).astype(np.float32),np.array(test).astype(np.float32)
class FeatureVectorDataNew(FeatureVector):
def __init__(self):
self.name = "feature_vector_new"
self.list = None
def convertFileToFeature(self):
train = pd.read_csv("../data/train.csv")
mixCol = [8,9,10,11,12,18,19,20,21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 38, 39, 40, 41, 42, 43, 44, 45,
73, 74, 98, 99, 100, 106, 107, 108, 156, 157, 158, 159, 166, 167, 168, 169, 176, 177, 178, 179, 180,
181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 202, 205, 206, 207,
208, 209, 210, 211, 212, 213, 214, 215, 216, 218, 219, 220, 221, 222, 223, 224, 225, 240, 371, 372, 373, 374,
375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436,
437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457,
458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
479, 480, 481, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509,
510, 511, 512, 513, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 840]
#Columns with logical datatype
alphaCol = [283, 305, 325, 352, 353, 354, 1934]
#Columns with Places as entries
placeCol = [200, 274, 342]
#Columns with timestamps
dtCol = [75, 204, 217]
selectColumns = []
rmCol = mixCol+alphaCol+placeCol+dtCol
for i in range(1,1935):
if i not in rmCol:
selectColumns.append(i)
cols = [str(n).zfill(4) for n in selectColumns]
strColName = ['VAR_' + strNum for strNum in cols]
# Use only required columns
nrows = 500
train = pd.read_csv("../data/train.csv", skiprows=[107], usecols=strColName)
label = pd.read_csv("../data/train.csv", skiprows=[107], usecols=['target'])
class FeatureVectorHashTrick(FeatureVector):
def __init__(self):
self.name = "hash_trick"
self.list = None
def convertFileToFeature(self):
alpha = .005 # learning rate
beta = 1
L1 = 0. # L1 regularization, larger value means more regularized
L2 = 0. # L2 regularization, larger value means more regularized
# C, feature/hash trick
D = 2 ** 24 # number of weights to use
interaction = False # whether to enable poly2 feature interactions
train_path='data/train.csv'
test_path='data/test.csv'
df = pd.read_csv("data/train.csv")
encoder = LabelEncoder()
labels = df.target
labels = encoder.fit_transform(labels).astype(np.int32)
def data(path, D):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
x_array = []
time = 0
count_time = 0
for t, row in enumerate(DictReader(open(path), delimiter=',')):
time += 1
try:
ID=row['ID']
del row['ID']
except:
pass
# process clicks
y = 0.
target='target'#'IsClick'
if target in row:
if row[target] == '1':
y = 1.
del row[target]
x = []
for key in row:
value = row[key]
# one-hot encode everything with hash trick
index = abs(hash(key + '_' + value)) % D
x.append(index)
count_time += 1
x_array.append(x)
print time,count_time
return np.array(x_array)
train = data(train_path,D)
test = data(test_path,D)
return np.array(train).astype(np.float32),np.array(labels).astype(np.float32),np.array(test).astype(np.float32)
class FeatureVectorHashTrick(FeatureVector):
def __init__(self):
self.name = "str_label"
self.list = None
def convertFileToFeature(self):
print ("train reading")
df = pd.read_csv("../data/train.csv")
encoder = LabelEncoder()
labels = df.target
labels = encoder.fit_transform(labels).astype(np.int32)
df = df.drop('target',1)
df = df.drop('ID',1)
# Junk cols - Some feature engineering needed here
df = df.fillna(-1)
train = df.values.copy()
print ("test reading")
test_df = pd.read_csv("../data/test.csv")
test_df = test_df.drop('ID',1)
# Junk cols - Some feature engineering needed here
test_df = test_df.fillna(-1)
test = test_df.values.copy()
return np.array(train),np.array(labels).astype(np.float32),np.array(test)
if __name__ == '__main__':
feature_vector = FeatureVectorHashTrick()
train, labels, test = feature_vector.getVector(std=False,one_of_k=False,label_base=True,pca=False,MinMaxScaler=False)
filename = feature_vector.getFeatureName()
pickle.dump((train, labels, test), open("feature_vector/" + filename, "w"))
|
985,699 | a2888df4b521f2ff3497154a3b6cc2e2f70c586e | def anagram(s, t):
freq = dict()
for letter in s:
if letter in freq:
freq[letter] += 1
else:
freq[letter] = 1
for letter in t:
if letter in freq:
freq[letter] -= 1
if freq[letter] < 0:
return False
else:
return False
for value in freq.values():
if value != 0:
return False
return True
s = "anagram"
t = "nagaram"
print(anagram(s,t))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.