index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,500 | 26eb1553391bfe90209b54213935cb6e79445598 | # -*- coding: utf-8 -*-
from odoo import http
# class Warehouse(http.Controller):
# @http.route('/warehouse/warehouse/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/warehouse/warehouse/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('warehouse.listing', {
# 'root': '/warehouse/warehouse',
# 'objects': http.request.env['warehouse.warehouse'].search([]),
# })
# @http.route('/warehouse/warehouse/objects/<model("warehouse.warehouse"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('warehouse.object', {
# 'object': obj
# }) |
988,501 | aefe32e5606bd8b0a2da3eeb3285c41067b7e801 | # There are a total of n courses you have to take, labeled from 0 to n-1.
# Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
# Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
# There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
# Example:
# Input: 2, [[1,0]]
# Output: [0,1]
# Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
# course 0. So the correct course order is [0,1] .
from collections import defaultdict, deque
class Solution:
class Graph:
def __init__(self, vertices):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
def initGraph(self, prerequisites):
g = self.Graph(0)
for l in range(len(prerequisites)):
g.addEdge(prerequisites[l][1], prerequisites[l][0])
return g.graph
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
g = self.initGraph(prerequisites)
indegree = {}
for dest, src in prerequisites:
indegree[dest] = indegree.get(dest, 0) + 1
indegree_queue = deque([k for k in range(numCourses) if k not in indegree])
courses = []
while indegree_queue:
vertex = indegree_queue.popleft()
courses.append(vertex)
if vertex in g:
for neighbor in g[vertex]:
indegree[neighbor] -= 1
if indegree[neighbor] == 0:
indegree_queue.append(neighbor)
return courses if len(courses) == numCourses else [] |
988,502 | cb8cfb1824fcc2d831f280397ca73f6aeabdf4a1 | import turtle
turtle = turtle.Turtle()
for i in range(500): # 重复500次
turtle.forward(i)
turtle.left(91)
|
988,503 | 2ee01e17fc812c4735ca7576e1ea3d6ea0016d62 | #!/usr/bin/python
import os
import sys
from monitor.wrapper import plc
from monitor.database.info.model import *
api = plc.cacheapi
api.AuthCheck()
# for each site, find total number of assigned slivers
# find median, high, low, average
site_list = []
for site in api.GetSites({'peer_id': None}):
sl = api.GetSlices(site['slice_ids'])
sliver_cnt = 0
for slice in sl:
sliver_cnt += len(slice['node_ids'])
val = (site['login_base'], sliver_cnt, site['max_slices'])
site_list.append(val)
#print val
site_list.sort(lambda x,y: cmp(y[1], x[1]))
totals = 0
use_count = 0
print "loginbase,status,sliver_count,max_slices"
for i in site_list:
if i[1] != 0:
h = HistorySiteRecord.get_by(loginbase=i[0])
print "%10s,%s,%s,%s" % (i[0],h.status, i[1], i[2])
use_count += 1
totals += i[1]
site_avg = totals/len(site_list)
#print "high: %s %s" % site_list[0]
#print "low: %s %s" % site_list[-1]
#print "median: %s %s" % site_list[len(site_list)/2]
#print "used median: %s %s" % site_list[use_count/2]
#print "all avg: %s" % site_avg
#print "used avg: %s" % (totals/use_count)
#print "totals: %s" % totals
#print "use_count: %s" % use_count
#print "site_count: %s" % len(site_list)
|
988,504 | fc28a2b93c60c6a08ee1fe2592d3137336bdc04d | # -*- coding: utf-8 -*-
"""
Some stuff to query and populate the database
"""
# python imports
# import decimal
# Scientific imports
import numpy as np
import pandas as pd
from decimal import Decimal
from decimal import getcontext
# Astronomical imports
from astroquery.simbad import Simbad
from astropy.coordinates import SkyCoord
from astropy.table import Column
from astropy import units as u
# peewee imports
from peewee import IntegrityError
# Stellar imports
from .Stellar import Star, Gravity, Magnitude, Name, Temperature, Abundance
# Setting the precision of the decimal objects
getcontext().prec = 3
# Star is the main table of the database. All other tables are directly linked to it. Knowing the full content of the DB requires listing the content of Star.
Database = {'Temperature': ['starid', 'temperature'],
'Gravity': ['starid', 'gravity'],
'Name': ['starid', 'alternatename'],
'Abundance': ['starid', 'Carbon', 'Oxygen', 'Titanium'],
'Magnitude': ['starid', 'umag', 'bmag', 'vmag', 'rmag', 'imag', 'jmag', 'hmag', 'kmag'],
'Star': ['idstar', 'name', 'rightascension', 'declination', 'pmra', 'pmdec']}
# The default parameters for a simbad query do not contain enough fields for our needs.
# Missing are magnitudes, proper motions, metallicities
# Let's add them now.
Filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K']
Motions = ['pmra', 'pmdec']
fields = Simbad.get_votable_fields()
Present = False
for f in fields:
if 'flux' in f:
Present = True
if not Present:
for Filter in Filters:
Simbad.add_votable_fields('flux('+Filter+')')
fields = Simbad.get_votable_fields()
Present = False
for f in fields:
if 'pm'in f:
Present = True
if not Present:
for Motion in Motions:
Simbad.add_votable_fields(Motion)
def query(star):
n = Simbad.query_objectids(star)
d = Simbad.query_object(star)
# query_object returns bytes instead of strings, thus converting.
d[0]['MAIN_ID'] = d[0]['MAIN_ID'].decode()
# Simbad returns bytecode. Changing it to strings.
n.convert_bytestring_to_unicode()
# Transforming the Astropy Name Table into a string, so we can insert it in a DataFrame cell directly.
t = ', '.join([i for i in n['ID']])
# Adding a column with the alternate names.
d['ALTNAME'] = Column([t], dtype=object)
return d
def correctcoordinates(dataframe):
for i in range(dataframe.shape[0]):
ra = dataframe['RA'][i]
dec = dataframe['DEC'][i]
sc = SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))
dataframe.loc[i:i, ('RA')] = sc.ra.hour
dataframe.loc[i:i, ('DEC')] = sc.dec.deg
return dataframe
def correctname(star):
"""
Modify the name of some stars so it matches the simbad naming scheme
"""
# Correction for the BPS stars.
if star.startswith('BS') or star.startswith('CS'):
star = 'BPS ' + star
#
return star
def onlinedata(star):
"""
Query the Simbad database to get the information about the star(s)(s).
"""
data = None
if not isinstance(star, list):
star = [star]
for s in star:
# Stacking the results one after each in a numpy array.
s = correctname(s)
print(('Star : {0}'.format(s)))
d = query(s)
if data is None:
data = np.array(d)
else:
data = np.hstack((data, d))
df = pd.DataFrame(data)
df = correctcoordinates(df)
return df
def decimalformat(value):
"""
Allows the Decimal numbers to have the proper format
"""
getcontext().prec = 8
v = str(value)
return Decimal(v)/Decimal('1.000000')
def listofstars():
"""
Returns a list of all the stars present in the Star table
"""
a = []
for star in Star.select():
a.append(star.name)
return a
def addstar(starname):
"""
Add a record in the database.
"""
try:
Star.create(name=starname)
except IntegrityError:
print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))
def addinfostar(star, field, value):
id = idstar(star)
for table, attribute in list(Database.items()):
if field in attribute:
model = table
toinsert = {'starid': id,
field: value}
print(('Inserting {0} into {1}:{2} for star {3}'.format(value, model, field, star)))
toi = eval(model)(**toinsert)
return toi.save()
def idstar(star):
# query = Gravity.select().where(Gravity.gravity != 0)
# for g in query:
# ...: print (g.gravity, g.starid.name, g.starid.pmra)
# In this case, since the table Gravity is linked to the table Star via starid, we get the name of the star directly using starid.name
"""
Returns the data for a given star.
"""
# Star._meta.columns lists all columns of the Table.
# Gravity._meta.database.get_tables() liste toutes les tables de la base de données
id = Star.get(Star.name == star).get_id()
return id
def getdata(star):
if isinstance(star, list):
result = {}
for s in star:
result.update({s: extractdata(idstar(s))})
return result
elif isinstance(star, str):
return(extractdata(idstar(star)))
else:
return
def extractdata(id):
temp = {}
# Getting the parameters from the tables.
for key in list(Database.keys()):
val = {}
for attr in Database[key]:
params = []
if attr == 'starid' or attr == 'idstar':
continue
if key == 'Star':
query = Star.select().where(Star.idstar == id)
else:
query = eval(key).select().join(Star).where(Star.idstar == id)
for field in query:
if attr != 'starid':
params.append(getattr(field, attr))
val.update({attr: params})
temp.update(val)
return temp
|
988,505 | 64897eb349b4a09fba528c02ec369a95dc062fed | import pandas as pd
import numpy as np
import random
import math
from numpy import linalg as LA
from sklearn.decomposition import FastICA
import matplotlib.pyplot as plt
from knn import Knn
class IrisICA:
def __init__(self, file_name):
df = pd.read_csv(file_name)
df['class'] = df['class'].apply(lambda x: 0 if x == 'Iris-setosa' \
else (1 if x == 'Iris-versicolor' else 2))
self.irisdata = df.astype(float).values.tolist()
self.train_data = []
self.test_data = []
self.number_of_features = len(self.irisdata[0]) - 1
def plotIrisData(self, title):
plt.figure(title)
plot_data = {}
for data in self.irisdata:
if data[-1] not in plot_data:
plot_data.update({data[-1]: [data[:-1]]})
else:
plot_data[data[-1]].append(data[:-1])
# print (plot_data)
plot_data_T = []
for key in plot_data:
plot_data_T.append(list(map(list, zip(*plot_data[key]))))
n_feature = self.number_of_features
for i in range(n_feature):
for j in range(n_feature):
plt.subplot(n_feature, n_feature, (i*n_feature)+j+1)
if (i != j):
plt.plot(plot_data_T[0][i], plot_data_T[0][j], 'ro', \
plot_data_T[1][i], plot_data_T[1][j], 'bo', \
plot_data_T[2][i], plot_data_T[2][j], 'go')
else:
plt.plot()
def applyIcaFromFullIris(self, number_components=4):
ica_input = []
for data in self.irisdata:
ica_input.append(data[:-1])
# print (np.array(ica_input))
ica = FastICA(n_components=number_components, whiten=False)
ica_out = ica.fit_transform(ica_input)
# replace original data
for data_iris, ica_data in zip(self.irisdata, ica_out):
data_iris[:-1] = ica_data
# print (np.array(self.irisdata))
def getSortedComponentEnergy(self):
energies = []
num_conponent = len(self.irisdata[0])-1
for data in self.irisdata:
for i in range(num_conponent):
if len(energies) < num_conponent:
energies.append(math.pow(data[i], 2))
else:
energies[i] += math.pow(data[i], 2)
sorted_engergy_index = sorted(range(len(energies)), \
key=lambda x:energies[x], reverse=True)
# print (sorted_engergy_index)
# print (energies)
return sorted_engergy_index
def getTrainTestSet(self, components_index, train_size=0.7):
random.shuffle(self.irisdata)
num_train = int(len(self.irisdata) * train_size)
for i in range(len(self.irisdata)):
data_point = []
for index in components_index:
data_point.append(self.irisdata[i][index])
data_point.append(self.irisdata[i][-1])
if (i <= num_train):
self.train_data.append(data_point)
else:
self.test_data.append(data_point)
return self.train_data, self.test_data
def icaKnnTest():
iris_data = IrisICA('iris_data_set/iris.data')
iris_data.plotIrisData('iris data before ica')
iris_data.applyIcaFromFullIris(number_components=4)
energy_of_components = iris_data.getSortedComponentEnergy()
train_data, test_data = iris_data.getTrainTestSet(energy_of_components[:2], train_size=0.7)
iris_data.plotIrisData('iris data after ica')
knn = Knn()
print (knn.kNearestNeighbors(train_data, test_data))
plt.show()
def icaKnnLoop(loop=10):
accuracy = 0
for i in range(loop):
iris_data = IrisICA('iris_data_set/iris.data')
iris_data.applyIcaFromFullIris(number_components=4)
energy_of_components = iris_data.getSortedComponentEnergy()
train_data, test_data = iris_data.getTrainTestSet(energy_of_components[:2], train_size=0.7)
knn = Knn()
current_accuracy = knn.kNearestNeighbors(train_data, test_data)
accuracy += current_accuracy
print ('round ', i+1, ' accuracy: ', current_accuracy)
return accuracy / loop
if __name__ == "__main__":
np.set_printoptions(precision=3)
print ('Average accuracy: ', icaKnnLoop(loop=10))
|
988,506 | 077eb5b6f7104b157b804c682679a5974a58d717 | # Generated by Django 2.0.5 on 2018-06-22 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdfanalyzer', '0016_auto_20180622_1925'),
]
operations = [
migrations.RenameField(
model_name='uploadfile',
old_name='wordcount',
new_name='word_count',
),
migrations.AddField(
model_name='uploadfile',
name='most_common_word',
field=models.CharField(default='0000', max_length=100),
),
]
|
988,507 | d4246ad3f8de6a09798d025eade7629c034bce9f | """
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
* The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details
provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
* Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
* This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from sqlalchemy import text
from time import strftime
from app.api.v1.models.owner import Owner
from app.api.v1.models.devices import Devices
from app.api.v1.models.imeis import Imei
from app.api.v1.models.pairings import Pairing
from app.api.v1.models.pairing_codes import Pairing_Codes
def create_view(db):
try:
query = text("""CREATE OR REPLACE VIEW public.test_view AS SELECT owner.contact,
imei.imei,
devices.brand,
devices.model,
devices.serial_no,
devices.mac,
pairing_codes.pair_code,
pairing_codes.is_active
FROM owner
JOIN devices ON devices.owner_id = owner.id
JOIN imei ON imei.device_id = devices.id
JOIN pairing_codes ON pairing_codes.device_id = devices.id;""")
db.engine.execute(query)
except Exception as e:
db.session.rollback()
finally:
db.session.close()
def complete_db_insertion(session, db, t_owner_id, t_contact, t_device_id, t_model, t_brand, t_serial, t_rat,
t_paircode, t_imei_id, t_imei, t_mac=None):
owner_add = Owner(id=t_owner_id, contact=t_contact)
session.add(owner_add)
device_add = Devices(id=t_device_id, model=t_model, brand=t_brand, serial_no=t_serial, rat=t_rat,
owner_id=t_owner_id, mac=t_mac)
session.add(device_add)
paircode_add = Pairing_Codes(pair_code=t_paircode, is_active=True, device_id=t_device_id)
session.add(paircode_add)
imei_add = Imei(id=t_imei_id, imei=t_imei, device_id=t_device_id)
session.add(imei_add)
db.session.commit()
def first_pair_db_insertion(session, db, t_pair_id, t_msisdn, t_mno, t_imei_id):
primary_add = Pairing(id=t_pair_id,
primary_id=0,
msisdn=t_msisdn,
is_primary=True,
creation_date=strftime("%Y-%m-%d %H:%M:%S"),
operator_name=t_mno,
add_pair_status=True,
imei_id=t_imei_id)
session.add(primary_add)
db.session.commit()
def add_pair_db_insertion(session, db, t_sec_id, t_primary_id, t_sec_msisdn, t_imei_id):
sec_add = Pairing(id=t_sec_id, # adding secondary pair incase one or more secondary pairs already exists
primary_id=t_primary_id,
msisdn=t_sec_msisdn,
is_primary=False,
creation_date=strftime("%Y-%m-%d %H:%M:%S"),
add_pair_status=False,
imei_id=t_imei_id)
session.add(sec_add)
db.session.commit()
def add_pair_confrm_db_insertion(session, db, t_sec_no, t_primary_id, t_mno):
chks = Pairing.query.filter(db.and_(Pairing.msisdn == '{}'.format(t_sec_no),
Pairing.is_primary == False,
Pairing.primary_id == '{}'.format(t_primary_id),
Pairing.end_date == None,
Pairing.add_pair_status == False)).first()
if chks:
chks.add_pair_status = True
chks.operator_name = t_mno
chks.updated_at = '{}'.format(strftime("%Y-%m-%d %H:%M:%S"))
db.session.commit()
def athty_input_payload(sn, model, brand, serial_no, rat, imei, mac=None, cond=0):
if cond == 0: # all parameters present
data = {
"contact_no": sn,
"model": model,
"brand": brand,
"serial_no": serial_no,
"mac": mac,
"rat": rat,
"imei": imei
}
elif cond == 1: # contact_no is missing
data = {
"model": model,
"brand": brand,
"serial_no": serial_no,
"mac": mac,
"rat": rat,
"imei": imei
}
elif cond == 2: # model is missing
data = {
"contact_no": sn,
"brand": brand,
"serial_no": serial_no,
"mac": mac,
"rat": rat,
"imei": imei
}
elif cond == 3: # brand is missing
data = {
"contact_no": sn,
"model": model,
"serial_no": serial_no,
"mac": mac,
"rat": rat,
"imei": imei
}
elif cond == 4: # serial_no is missing
data = {
"contact_no": sn,
"model": model,
"brand": brand,
"mac": mac,
"rat": rat,
"imei": imei
}
elif cond == 5: # rat is missing
data = {
"contact_no": sn,
"model": model,
"brand": brand,
"serial_no": serial_no,
"mac": mac,
"imei": imei
}
elif cond == 6: # imei is missing
data = {
"contact_no": sn,
"model": model,
"brand": brand,
"serial_no": serial_no,
"mac": mac,
"rat": rat,
}
elif cond == 7: # mac is missing
data = {
"contact_no": sn,
"model": model,
"brand": brand,
"serial_no": serial_no,
"rat": rat,
"imei": imei
}
return data
def athty_search_db_insertion(session, db, t_owner_id, t_contact, t_device_id, t_model, t_brand, t_serial,
t_rat, t_paircode, t_imei_id, t_imei, t_mac=None):
owner_add = Owner(id=t_owner_id, contact=t_contact)
session.add(owner_add)
device_add = Devices(id=t_device_id, model=t_model, brand=t_brand, serial_no=t_serial,
rat=t_rat, owner_id=t_owner_id, mac=t_mac)
session.add(device_add)
imei_id = t_imei_id
for val in t_imei:
imei_add = Imei(id=imei_id, imei=val, device_id=t_device_id)
session.add(imei_add)
imei_id += 1
paircode_add = Pairing_Codes(pair_code=t_paircode, is_active=True, device_id=t_device_id)
session.add(paircode_add)
db.session.commit()
def athty_search_payload(start, limit, t_imei, t_mac, t_serial, t_contact, cond=0):
url = 'api/v1/device-search'
if cond == 0:
data = '{api}?start={st}&limit={lt}&contact={msisdn}&imei={imei}&mac={mac}&serial_no={sno}'. \
format(api=url, st=start, lt=limit, msisdn=t_contact, imei=t_imei, mac=t_mac, sno=t_serial)
elif cond == 1: # contact is missing
data = '{api}?start={st}&limit={lt}&imei={imei}&mac={mac}&serial_no={sno}'.\
format(api=url, st=start, lt=limit, imei=t_imei, mac=t_mac, sno=t_serial)
elif cond == 2: # imei is missing
data = '{api}?start={st}&limit={lt}&contact={msisdn}&mac={mac}&serial_no={sno}'. \
format(api=url, st=start, lt=limit, msisdn=t_contact, mac=t_mac, sno=t_serial)
elif cond == 3: # mac is missing
data = '{api}?start={st}&limit={lt}&contact={msisdn}&imei={imei}&serial_no={sno}'. \
format(api=url, st=start, lt=limit, msisdn=t_contact, imei=t_imei, sno=t_serial)
elif cond == 4: # serial_no is missinf
data = '{api}?start={st}&limit={lt}&contact={msisdn}&imei={imei}&mac={mac}'. \
format(api=url, st=start, lt=limit, msisdn=t_contact, imei=t_imei, mac=t_mac)
elif cond == 5: # searching through MAC only
data = '{api}?start={st}&limit={lt}&mac={mac}'.\
format(api=url, st=start, lt=limit, mac=t_mac)
elif cond == 6: # searching through CONTACT only
data = '{api}?start={st}&limit={lt}&contact={msisdn}'. \
format(api=url, st=start, lt=limit, msisdn=t_contact)
elif cond == 7: # searching through Serial_No only
data = '{api}?start={st}&limit={lt}&serial_no={sno}'. \
format(api=url, st=start, lt=limit, sno=t_serial)
elif cond == 8: # searching through IMEI only
data = '{api}?start={st}&limit={lt}&imei={imei}'. \
format(api=url, st=start, lt=limit, imei=t_imei)
elif cond == 9:
data = '{api}?start={st}&limit={lt}'. \
format(api=url, st=start, lt=limit)
elif cond == 10:
data = {
"start": start,
"limit": limit,
"search_a": {
"MAC": t_mac,
"CONTACT": t_contact,
"Serial_No": t_serial,
"IMEI": t_imei
}
}
return data
def mno_imsi_upload(sn, mno, imsi, cond=0):
if cond == 0:
data = {
"msisdn": sn,
"operator": mno,
"imsi": imsi
}
elif cond == 1: # MSISDN is missing
data = {
"operator": mno,
"imsi": imsi
}
elif cond == 2: # Operator is missing
data = {
"msisdn": sn,
"imsi": imsi
}
elif cond == 3: # IMSI is missing
data = {
"msisdn": sn,
"operator": mno
}
return data
|
988,508 | ccb142967088bdc85a1d1a99bb0c5a076954d290 |
# Copyright (C) 2015-2018 by Yuri Victorovich. All rights reserved.
# This code is licensed under BSD license.
## This is the module that recomputes IP and UDP packet checksums
import socket
import struct
import array
def is_big_endian():
return struct.pack("H",1) == "\x00\x01"
# Checksums are specified in rfc#768
if is_big_endian():
def checksum(pkt):
adj=False
if len(pkt) % 2 == 1:
pkt += bytearray([0])
adj=True
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
if adj:
pkt[len(pkt)-1:] = b''
return s & 0xffff
else:
def checksum(pkt):
adj=False
if len(pkt) % 2 == 1:
pkt += bytearray([0])
adj=True
s = sum(array.array("H", pkt))
s = (s >> 16) + (s & 0xffff)
s += s >> 16
s = ~s
if adj:
pkt[len(pkt)-1:] = b''
return (((s>>8)&0xff)|s<<8) & 0xffff
def packet_new_udp_headers_for_cksum(pkt):
header = bytearray(struct.pack("!4s4sBBH",
pkt[12:16],
pkt[16:20],
0,
socket.IPPROTO_UDP,
len(pkt)-20))
return header+pkt[20:]
def checksum_calc_udp_packet(pkt):
pkt[26:28] = bytearray(struct.pack("H", 0))
pkt[26:28] = bytearray(struct.pack("H", socket.htons(checksum(packet_new_udp_headers_for_cksum(pkt)))))
|
988,509 | cc500477173a9a869a16f18031c47600ecc9bc6d | # -*- coding: utf-8 -*-
"""
--------------------------------------------------------------------------
glmnetCoef computes coefficients from a "glmnet" object.
--------------------------------------------------------------------------
DESCRIPTION:
This function extracts coefficients at certain lambdas if they are
in the lambda sequence of a "glmnet" object or make predictions
if they are not in that sequence.
USAGE:
glmnetCoef(object, s, exact)
Fewer input arguments (more often) are allowed in the call, but must
come in the order listed above. To set default values on the way, use
scipy.empty([0]).
For example, ncoef = glmnetCoef(fit,scipy.empty([0]),False).
INPUT ARGUMENTS:
obj Fitted "glmnet" model object.
s Value(s) of the penalty parameter lambda at which computation
is required. Default is the entire sequence used to create
the model.
exact If exact = False (default), then the function uses
linear interpolation to make predictions for values of s
that do not coincide with those used in the fitting
algorithm. Note that exact = True is not implemented.
OUTPUT ARGUMENTS:
result A (nvars+1) x length(s) scipy 2D array with each column being the
coefficients at an s. Note that the first row are the
intercepts (0 if no intercept in the original model).
LICENSE: GPL-2
AUTHORS:
Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
Fortran code was written by Jerome Friedman
R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
The original MATLAB wrapper was written by Hui Jiang,
and is updated and maintained by Junyang Qian.
This Python wrapper (adapted from the Matlab and R wrappers)
is written by Balakumar B.J., bbalasub@stanford.edu
Department of Statistics, Stanford University, Stanford, California, USA.
REFERENCES:
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Models via Coordinate Descent,
http://www.jstatsoft.org/v33/i01/
Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010
Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent,
http://www.jstatsoft.org/v39/i05/
Journal of Statistical Software, Vol. 39(5) 1-13
Tibshirani, Robert., Bien, J., Friedman, J.,Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2010) Strong Rules for Discarding Predictors in Lasso-type Problems,
http://www-stat.stanford.edu/~tibs/ftp/strong.pdf
Stanford Statistics Technical Report
SEE ALSO:
glmnet, glmnetPrint, glmnetPredict, and cvglmnet.
EXAMPLES:
x = scipy.random.rand(100,20);
y = scipy.random.rand(100,1);
fit = glmnet(x = x.copy(),y = y.copy());
ncoef = glmnetCoef(fit,scipy.array([0.01, 0.001]));
"""
import scipy
from .glmnetPredict import glmnetPredict
def glmnetCoef(obj, s = None, exact = False):
if s is None:
s = obj['lambdau']
if exact and len(s) > 0:
raise NotImplementedError('exact = True not implemented in glmnetCoef')
result = glmnetPredict(obj, scipy.empty([0]), s, 'coefficients')
return(result)
|
988,510 | 2a51fdf19b99c29d36d3768b319f8834ebf3547e | ##################################################### Import system libraries ######################################################
import matplotlib as mpl
mpl.rcdefaults()
mpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
nominal_values as noms,
std_devs as stds,
)
################################################ Finish importing system libraries #################################################
################################################ Adding subfolder to system's path #################################################
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"python_custom_scripts")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
############################################# Finish adding subfolder to system's path #############################################
##################################################### Import custom libraries ######################################################
from curve_fit import ucurve_fit
from table import (
make_table,
make_full_table,
make_composed_table,
make_SI,
write,
)
from regression import (
reg_linear,
reg_quadratic,
reg_cubic
)
from error_calculation import(
MeanError
)
################################################ Finish importing custom libraries #################################################
################################ FREQUENTLY USED CODE ################################
#
########## IMPORT ##########
# t, U, U_err = np.genfromtxt('data.txt', unpack=True)
# t *= 1e-3
########## ERRORS ##########
# R_unc = ufloat(R[0],R[2])
# U = 1e3 * unp.uarray(U, U_err)
# Rx_mean = np.mean(Rx) # Mittelwert und syst. Fehler
# Rx_mean_err = MeanError(noms(Rx)) # Fehler des Mittelwertes
#
## Relative Fehler zum späteren Vergleich in der Diskussion
# RelFehler_G = (G_mess - G_lit) / G_lit
# RelFehler_B = (B_mess - B_lit) / B_lit
# write('build/RelFehler_G.tex', make_SI(RelFehler_G*100, r'\percent', figures=1))
# write('build/RelFehler_B.tex', make_SI(RelFehler_B*100, r'\percent', figures=1))
########## CURVE FIT ##########
# def f(t, a, b, c, d):
# return a * np.sin(b * t + c) + d
#
# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0]) # p0 bezeichnet die Startwerte der zu fittenden Parameter
# params = ucurve_fit(reg_linear, x, y) # linearer Fit
# params = ucurve_fit(reg_quadratic, x, y) # quadratischer Fit
# params = ucurve_fit(reg_cubic, x, y) # kubischer Fit
# a, b = params
# write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
# write('build/parameter_b.tex', make_SI(b * 1e-3, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
########## PLOTTING ##########
# plt.clf # clear actual plot before generating a new one
#
## automatically choosing limits with existing array T1
# t_plot = np.linspace(np.amin(T1), np.amax(T1), 100)
# plt.xlim(t_plot[0]-1/np.size(T1)*(t_plot[-1]-t_plot[0]), t_plot[-1]+1/np.size(T1)*(t_plot[-1]-t_plot[0]))
#
## hard coded limits
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
# plt.ylabel(r'$U \:/\: \si{\kilo\volt}$')
# plt.legend(loc='best')
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/aufgabenteil_a_plot.pdf')
########## WRITING TABLES ##########
### IF THERE IS ONLY ONE COLUMN IN A TABLE (workaround):
## a=np.array([Wert_d[0]])
## b=np.array([Rx_mean])
## c=np.array([Rx_mean_err])
## d=np.array([Lx_mean*1e3])
## e=np.array([Lx_mean_err*1e3])
#
# write('build/Tabelle_b.tex', make_table([a,b,c,d,e],[0, 1, 0, 1, 1])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
# write('build/Tabelle_b_texformat.tex', make_full_table(
# 'Messdaten Kapazitätsmessbrücke.',
# 'table:A2',
# 'build/Tabelle_b.tex',
# [1,2,3,4,5], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# # die Multicolumns sein sollen
# ['Wert',
# r'$C_2 \:/\: \si{\nano\farad}$',
# r'$R_2 \:/\: \si{\ohm}$',
# r'$R_3 / R_4$', '$R_x \:/\: \si{\ohm}$',
# r'$C_x \:/\: \si{\nano\farad}$']))
#
## Aufsplitten von Tabellen, falls sie zu lang sind
# t1, t2 = np.array_split(t * 1e3, 2)
# U1, U2 = np.array_split(U * 1e-3, 2)
# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None])) # type in Nachkommastellen
#
## Verschmelzen von Tabellen (nur Rohdaten, Anzahl der Zeilen muss gleich sein)
# write('build/Tabelle_b_composed.tex', make_composed_table(['build/Tabelle_b_teil1.tex','build/Tabelle_b_teil2.tex']))
########## ARRAY FUNCTIONS ##########
# np.arange(2,10) # Erzeugt aufwärts zählendes Array von 2 bis 10
# np.zeros(15) # Erzeugt Array mit 15 Nullen
# np.ones(15) # Erzeugt Array mit 15 Einsen
#
# np.amin(array) # Liefert den kleinsten Wert innerhalb eines Arrays
# np.argmin(array) # Gibt mir den Index des Minimums eines Arrays zurück
# np.amax(array) # Liefert den größten Wert innerhalb eines Arrays
# np.argmax(array) # Gibt mir den Index des Maximums eines Arrays zurück
#
# a1,a2 = np.array_split(array, 2) # Array in zwei Hälften teilen
# np.size(array) # Anzahl der Elemente eines Arrays ermitteln
########## ARRAY INDEXING ##########
# y[n - 1::n] # liefert aus einem Array jeden n-ten Wert als Array
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"] # Array of value, unit, error
##############Vorarbeit###################
h = 6.62607004*10**(-34)
c = 299792458
d = 201.4*10**(-12)
const = (h*c)/(2*d)
e = 1.6021766208*10**(-19)
R = 13.60569*e
Rd = 10973731
alpha = 7.2973525664*10**(-3)
def Energie(Theta):
"""
Args:
Winkel: Theta [degree]
Returns:
Energie: E [eV]
"""
Theta = Theta/360 * 2*np.pi
E = const/np.sin(Theta)/e
return E
def Sigma(z, E, n, j):
"""
Args:
z: Kernladungszahl
E: Kantenenergie [Joule]
n: Ankunfsschale
j: Herkunftsschale
Returns:
Sigma
"""
return z-np.sqrt( E / ( R*(1/(n**2) - (1/j**2)) ) )
def Sigma_L(Z, E):
"""
Args:
Bullcrap
Returns:
Bullshit
"""
return Z - np.sqrt( 4/alpha * np.sqrt(E/R)- 5*E/R ) * np.sqrt( 1 + 19/32*alpha**2 * E/R )
E_max_roehre = 35000 # kilo
write('build/E_max_t.tex', make_SI(E_max_roehre*10**(-3), r'\kilo\electronvolt', figures=1))
lambda_min_t = c*h/(E_max_roehre*e)
write('build/lambda_min_t.tex', make_SI(lambda_min_t*10**12, r'\pico\metre', figures=1))
#############1#####################
theta = np.genfromtxt('messdaten/mess_1_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_1_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
plt.ylim(np.amin(I)-10, 270)
plt.axvline(14, color='g', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Anzahl gemessener Impulse$')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_1.pdf')
#############2##############
theta = np.genfromtxt('messdaten/mess_2_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_2_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
k_kante_b = 19.875
k_kante_a = 22.2
plt.axvline(k_kante_a, color='b', linestyle='--')
plt.axvline(k_kante_b, color='g', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Anzahl gemessener Impulse$')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_2.pdf')
z = 29
E_k_kante_a = Energie(k_kante_a)*e
E_k_kante_b = Energie(k_kante_b)*e
sigma_1 = z - np.sqrt(E_k_kante_b/R) #wieso auch immer diese Reihenfolge Idk
#sigma_2 = z - 2*np.sqrt((R*(z-sigma_1)**2-E_k_kante_a)/R) #still dont know....
sigma_2 = z - np.sqrt( 4*(z-sigma_1)**2 - 4* E_k_kante_a/R )
sigma_3 = z - np.sqrt( 9*(z-sigma_1)**2 - 9* E_k_kante_b/R )
sigma_3 = 27.387159059532273 # Numerische Probleme, deshalb muss das so angegeben werden *hust*
#sigma_3 = z - np.sqrt( 9*(z-sigma_1)**2 - 9* Energie(k_kante_b)/13.60569 )
# Meine Ansätze, Sigma zu bestimmen - Ich denke, so ist es richtig (Altprotokoll macht komische Sachen)
sigma_2_nach_jay = z-np.sqrt( E_k_kante_b / ( R*(1 - (1/9)) ) )
write('build/sigma_2_nach_jay.tex', make_SI(sigma_2_nach_jay, r' ', figures=2))
sigma_1_nach_jay = z-np.sqrt( E_k_kante_a / ( R*(1 - (1/4)) ) )
write('build/sigma_1_nach_jay.tex', make_SI(sigma_1_nach_jay, r' ', figures=2))
write('build/Theta_k_kante_a.tex', make_SI(k_kante_a, r'\degree', figures=1))
write('build/Theta_k_kante_b.tex', make_SI(k_kante_b, r'\degree', figures=1))
write('build/E_k_kante_a_cu.tex', make_SI(E_k_kante_a*10**(-3)/e, r'\kilo\electronvolt', figures=2))
write('build/E_k_kante_b_cu.tex', make_SI(E_k_kante_b*10**(-3)/e, r'\kilo\electronvolt', figures=2))
write('build/sigma_1_cu.tex', make_SI(sigma_1, r' ', figures=2))
write('build/sigma_2_cu.tex', make_SI(sigma_2, r' ', figures=2))
write('build/sigma_3_cu.tex', make_SI(sigma_3, r' ', figures=2))
E_k_kante_a_lit = 8.04699993*10**3*e
E_k_kante_b_lit = 8.90400028*10**3*e
E_k_lit = 8.97900009*10**3*e #der richtige Wert K
sigma_1_lit = z - np.sqrt(E_k_lit/R) #wieso auch immer diese Reihenfolge Idk
sigma_2_lit = z - np.sqrt( 4*(z-sigma_1_lit)**2 - 4* E_k_kante_a_lit/R )
sigma_3_lit = z - np.sqrt( 9*(z-sigma_1_lit)**2 - 9* E_k_kante_b_lit/R )
write('build/E_k_kante_a_lit_cu.tex', make_SI(E_k_kante_a_lit/e*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/E_k_kante_b_lit_cu.tex', make_SI(E_k_kante_b_lit/e*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/sigma_1_lit_cu.tex', make_SI(sigma_1_lit, r' ', figures=2))
write('build/sigma_2_lit_cu.tex', make_SI(sigma_2_lit, r' ', figures=2))
write('build/sigma_3_lit_cu.tex', make_SI(sigma_3_lit, r' ', figures=2))
E_k_kante_a_rel = abs(E_k_kante_a_lit - E_k_kante_a)/E_k_kante_a_lit * 100
E_k_kante_b_rel = abs(E_k_kante_b_lit - E_k_kante_b)/E_k_kante_b_lit * 100
sigma_1_rel = abs(sigma_1 - sigma_1_lit)/sigma_1_lit * 100
sigma_2_rel = abs(sigma_2 - sigma_2_lit)/sigma_2_lit * 100
sigma_3_rel = abs(sigma_3 - sigma_3_lit)/sigma_3_lit * 100
write('build/E_k_kante_a_rel_cu.tex', make_SI(E_k_kante_a_rel, r'\percent', figures=1))
write('build/E_k_kante_b_rel_cu.tex', make_SI(E_k_kante_b_rel, r'\percent', figures=1))
write('build/sigma_1_rel_cu.tex', make_SI(sigma_1_rel, r'\percent', figures=1))
write('build/sigma_2_rel_cu.tex', make_SI(sigma_2_rel, r'\percent', figures=1))
write('build/sigma_3_rel_cu.tex', make_SI(sigma_3_rel, r'\percent', figures=1))
#############Auflösungsvermögen#############
#Nehme lineare Steigung zwischen Peaks an
theta_1_start = 19.4
theta_1_ende = 20.4
theta_1_peak = 19.9
delta_theta_1_l = theta_1_peak - theta_1_start
delta_theta_1_r = theta_1_ende - theta_1_peak
theta_1_halb_l = theta_1_start + 0.5*delta_theta_1_l
theta_1_halb_r = theta_1_peak + 0.5*delta_theta_1_r
theta_halbwert = theta_1_halb_r - theta_1_halb_l
energie_halbwert = Energie(theta_1_halb_l) - Energie(theta_1_halb_r)
write('build/delta_Energie_1.tex', make_SI(energie_halbwert*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/delta_theta_1.tex', make_SI(theta_halbwert, r'\degree', figures=2))
theta_2_start = 21.6
theta_2_ende = 22.8
theta_2_peak = 22.2
delta_theta_2_l = theta_2_peak - theta_2_start
delta_theta_2_r = theta_2_ende - theta_2_peak
theta_2_halb_l = theta_2_start + 0.5*delta_theta_2_l
theta_2_halb_r = theta_2_peak + 0.5*delta_theta_2_r
theta_halbwert_2 = theta_2_halb_r - theta_2_halb_l
energie_halbwert_2 = Energie(theta_2_halb_l) - Energie(theta_2_halb_r)
write('build/delta_Energie_2.tex', make_SI(energie_halbwert_2*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/delta_theta_2.tex', make_SI(theta_halbwert_2, r'\degree', figures=2))
#############3################
theta = np.genfromtxt('messdaten/mess_3_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_3_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
theta_min = 4.7
plt.axvline(theta_min, color='b', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Anzahl gemessener Impulse$')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_3.pdf')
E_max = Energie(theta_min)
lambda_min = c*h/(E_max*e)
write('build/Theta_min.tex', make_SI(theta_min, r'\degree', figures=1))
write('build/E_max.tex', make_SI(E_max*10**(-3), r'\kilo\electronvolt', figures=1))
write('build/lambda_min.tex', make_SI(lambda_min*10**12, r'\pico\metre', figures=1))
E_max_lit = 35*10**3
E_max_rel = abs(E_max - E_max_lit)/E_max_lit * 100
write('build/E_max_rel.tex', make_SI(E_max_rel, r'\percent', figures=1))
################Germanium################
theta = np.genfromtxt('messdaten/mess_ge_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_ge_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
plt.ylim(np.amin(I), 50)
plt.axvline(16.1, color='k', linestyle='--')
plt.axvline(15.4, color='k', linestyle='--')
kante = (16.1-15.4)/2 + 15.4
plt.axvline(kante, color='b', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Anzahl gemessener Impulse')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_ge.pdf')
E_ge = Energie(kante)
#sigma_ge = Sigma(32,E_ge*e,1,3 )
sigma_ge = 32 - np.sqrt(E_ge*e/R)
write('build/E_ge.tex', make_SI(E_ge*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/sigma_ge.tex', make_SI(sigma_ge, r' ', figures=2))
E_ge_lit = 11.1040001*1000
#write('build/sigma_ge_lit.tex', make_SI(Sigma(32,E_ge_lit*e,1,3 ), r' ', figures=2))
sigma_ge_lit = 32 - np.sqrt(E_ge_lit*e/R)
write('build/sigma_ge_lit.tex', make_SI(sigma_ge_lit, r' ', figures=2))
sigma_ge_rel = abs(sigma_ge - sigma_ge_lit)/sigma_ge_lit * 100
write('build/sigma_ge_rel.tex', make_SI(sigma_ge_rel, r'\percent', figures=1))
##################Zirkonium##############
theta = np.genfromtxt('messdaten/mess_zr_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_zr_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
plt.ylim(np.amin(I), 300)
plt.axvline(9.2, color='k', linestyle='--')
plt.axvline(10, color='k', linestyle='--')
kante = (10-9.2)/2 + 9.2
plt.axvline(kante, color='b', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Anzahl gemessener Impulse')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_zr.pdf')
E_zr_lit = 17.9979992*1000
E_zr = Energie(kante)
#sigma_zr = Sigma(40,E_zr*e,1,3 )
sigma_zr = 40 - np.sqrt(E_zr*e/R)
write('build/E_zr.tex', make_SI(E_zr*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/sigma_zr.tex', make_SI(sigma_zr, r' ', figures=2))
sigma_zr_lit = 40 - np.sqrt(E_zr_lit*e/R)
#write('build/sigma_zr_lit.tex', make_SI(Sigma(40,E_zr_lit*e,1,3 ), r' ', figures=2))
write('build/sigma_zr_lit.tex', make_SI(sigma_zr_lit, r' ', figures=2))
sigma_zr_rel = abs(sigma_zr - sigma_zr_lit)/sigma_zr_lit * 100
write('build/sigma_zr_rel.tex', make_SI(sigma_zr_rel, r'\percent', figures=1))
################Strontium################
theta = np.genfromtxt('messdaten/mess_sr_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_sr_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
plt.ylim(np.amin(I), 180)
plt.axvline(10.3, color='k', linestyle='--')
plt.axvline(11, color='k', linestyle='--')
kante = (11-10.3)/2 + 10.3
plt.axvline(kante, color='b', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Anzahl gemessener Impulse')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_sr.pdf')
E_sr_lit = 16.1049995*1000
E_sr = Energie(kante)
#sigma_sr = Sigma(38,E_sr*e,1,3 )
sigma_sr = 38 - np.sqrt(E_sr*e/R)
write('build/E_sr.tex', make_SI(E_sr*10**(-3), r'\kilo\electronvolt', figures=2))
write('build/sigma_sr.tex', make_SI(sigma_sr, r' ', figures=2))
#write('build/sigma_sr_lit.tex', make_SI(Sigma(38,E_sr_lit*e,1,3 ), r' ', figures=2))
sigma_sr_lit = 38 - np.sqrt(E_sr_lit*e/R)
write('build/sigma_sr_lit.tex', make_SI(sigma_sr_lit
, r' ', figures=2))
sigma_sr_rel = abs(sigma_sr - sigma_sr_lit)/sigma_sr_lit * 100
write('build/sigma_sr_rel.tex', make_SI(sigma_sr_rel, r'\percent', figures=1))
##############Wismut#############
theta = np.genfromtxt('messdaten/mess_wi_winkel.txt', unpack=True)
I = np.genfromtxt('messdaten/mess_wi_rate.txt', unpack=True)
theta = theta/2
plt.clf() # clear actual plot before generating a new one
t_plot = np.linspace(np.amin(theta)-0.1, np.amax(theta)+0.1 , 100)
plt.xlim(np.amin(theta)-0.1, np.amax(theta)+0.1)
plt.ylim(np.amin(I), 160)
plt.axvline(10.9, color='k', linestyle='--')
plt.axvline(12.9, color='k', linestyle='--')
#kante = 13.2-11.2
#plt.axvline(kante, color='b', linestyle='--')
plt.plot(theta, I, 'r.', label=r'Absorbtionsspektrum von Wismut (L-Kanten)$')
plt.xlabel(r'$\Theta \:/\: \si{\degree}$')
plt.ylabel(r'$I \:/\: \text{Impulse}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_wi.pdf')
write('build/theta_wi_1.tex', make_SI(10.9, r'\degree', figures=1))
write('build/theta_wi_2.tex', make_SI(12.9, r'\degree', figures=1))
delta_E_wi = Energie(10.9) - Energie(12.9)
write('build/wi_delta_E.tex', make_SI(delta_E_wi, r'\electronvolt', figures=1))
delta_E_wi = delta_E_wi * e
sigma_wi = Sigma_L(83, delta_E_wi)
write('build/sigma_wi.tex', make_SI(sigma_wi, r' ', figures=2))
delta_E_wi_lit = 15.7080002*1000*e - 13.4180002*1000*e
sigma_wi_lit = Sigma_L(83, delta_E_wi_lit)
write('build/sigma_wi_lit.tex', make_SI(sigma_wi_lit, r' ', figures=2))
sigma_wi_rel = abs(sigma_wi - sigma_wi_lit)/sigma_wi_lit * 100
write('build/sigma_wi_rel.tex', make_SI(sigma_wi_rel, r'\percent', figures=1))
############RYDBERG#########
#Z_array = ([32-1,38-1,40-1])
Z_array = ([32-sigma_ge,38-sigma_sr,40-sigma_zr])
E_array = ([E_ge, E_sr, E_zr])
E_array = np.sqrt(E_array)
plt.clf() # clear actual plot before generating a new one
plt.plot(Z_array, E_array, 'r.', label=r'Messwerte$')
plt.xlabel(r'$Z$')
plt.ylabel(r'$ \sqrt{E \:/\: \si{\joule} } $')
params = ucurve_fit(reg_linear, Z_array, E_array) # linearer Fit
a, b = params
t_plot = np.linspace(30, 42, 1000)
plt.plot(t_plot, a.n*t_plot+b.n, 'b-', label=r'Linearer Fit$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plot_ryd.pdf')
write('build/parameter_a.tex', make_SI(a, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
write('build/parameter_b.tex', make_SI(b, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
ebberg = 4/3 *e* a**2/(h*c)
write('build/ebberg_konstante.tex', make_SI(4/3 * a**2, r'\electronvolt', figures=1)) # type in Anz. signifikanter Stellen
write('build/ebberg_konstante_ry.tex', make_SI(4/3 *e* a**2/(h*c), r'\per\metre', figures=1))
write('build/ebberg_konstante_lit.tex', make_SI(Rd, r'\per\metre', figures=1))
ebberg_rel = abs(ebberg - Rd)/Rd * 100
write('build/ebbergs_rel.tex', make_SI(ebberg_rel.n, r'\percent', figures=1))
|
988,511 | cd526e9f9426fc9fc9040e33661e9a26909af291 | import unittest
import time
from app import BASE_DIR
from script.hr_login import Login
from script.hr_test_emp import TestEmp
from script.hr_test_login import TestIHRMLogin
from tools.HTMLTestRunner import HTMLTestRunner
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Login)) #导入登录员工和员工管理
suite.addTest(unittest.makeSuite(TestEmp))
# suite.addTest(unittest.makeSuite(TestIHRMLogin))
#report_path = BASE_DIR + "/report/ihrm{}.html".format(time.strftime("%Y%m%d_%H%M%S"))
report_path = BASE_DIR + "/report/ihrm.html"
with open(report_path,"wb") as f:
runner = HTMLTestRunner(f,verbosity=1,title="自动化接口测试",description="v1.0")
runner.run(suite)
|
988,512 | c369be26cb4f6dca2cc79937d342339bc30c8b10 | from __future__ import print_function, absolute_import, division
import time
import sys
class PID:
def __init__(self, dt=0.05, Kp=0, Ki=0, Kd=0):
self.dt = dt
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.reset()
def feedback(self, err):
if type(self.err_p) == type(None):
self.err_p = err
self.errsum += err*self.dt
d_err = (err-self.err_p)/self.dt
self.err_p = err
return self.Kp*err+self.Ki*self.errsum+self.Kd*d_err
def reset(self):
self.err_p = None
self.errsum = 0.0 |
988,513 | ac5d0e51859c97185072630dd5e8189d9820366c | import requests, json, os
from pprint import pprint
def busca_dicionario(p, d):
key2 = 'AIzaSyDaY5QRWxXSN-2PCncnGv3R3oyjVJffJPE'
link = "https://maps.googleapis.com/maps/api/directions/json?origin={0}&destination={1}&key={2}&language=pt".format(p, d, key2)
req = requests.get(link)
return json.loads(req.text)
def get_bairros(dicionario):
all_bairros = {}
percurso = (dicionario["routes"][0]["legs"][0]["steps"])
for i in percurso:
geolocation_url = 'https://maps.googleapis.com/maps/api/geocode/json?latlng=%s,%s&key=AIzaSyDaY5QRWxXSN-2PCncnGv3R3oyjVJffJPE' %(i['end_location']['lat'], i['end_location']['lng'])
location = requests.get(geolocation_url)
obj = json.loads(location.text)
for i in obj['results']:
try:
bairro = (i['formatted_address'].split('-')[1].split(',')[0].replace(" ", "").upper())
cidade = i['formatted_address'].split('-')[1].split(',')[1]
try:
int(bairro)
except:
if(not all_bairros.get(bairro) and len(bairro) > 2):
all_bairros[bairro] = cidade.upper()
except:
pass
return all_bairros
if __name__== "__main__":
p = 'rua araripina 419 santo amaro'
d = 'rua são matheus iputinga'
dicionario = busca_dicionario(p, d)
lista_bairros = get_bairros(dicionario)
print(lista_bairros)
|
988,514 | 9a3638d197df206afc415eaf465960829311e54f | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from gettext import gettext as tr
from holidays.calendars.gregorian import (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
MON,
_get_nth_weekday_from,
)
from holidays.groups import ChristianHolidays, InternationalHolidays
from holidays.holiday_base import HolidayBase
class Argentina(HolidayBase, ChristianHolidays, InternationalHolidays):
"""
A subclass of :py:class:`HolidayBase` representing public holidays
in Argentina.
References:
- Based on:
https://es.wikipedia.org/wiki/Anexo:D%C3%ADas_feriados_en_Argentina
- [Ley 24455] Belgrano and San Martin Day as third Monday
- [Ley 27399] - For 2018++
https://www.argentina.gob.ar/normativa/nacional/ley-27399-281835/texto
- [Decreto 1585/2010] - 2011-2013 Bridge Holidays, Movable Holidays Law
- [Decreto 1768/2013] - 2014-2016 Bridge Holidays
- [Decretos 52-80-923/2017] - 2017-2019 Bridge Holidays
- [Decreto 717/2019] - 2020 Bridge Holidays
- [Decreto 297/2020] - Veteran Day moved due to Covid-19
- [Decreto 947/2020] - 2021 Bridge Holidays
- [Decreto 789/2021] - 2022 Bridge Holidays
- [Decreto 764/2022] - 2023 Bridge Holidays
- [Always Update Calendar Year Link]
https://www.argentina.gob.ar/interior/feriados
http://servicios.lanacion.com.ar/feriados
https://www.clarin.com/feriados/
- [Specific Calendar Year]
https://www.lanacion.com.ar/feriados/2024/
https://www.argentina.gob.ar/interior/feriados-nacionales-2023
https://www.argentina.gob.ar/interior/feriados-nacionales-2022
https://www.argentina.gob.ar/interior/feriados-nacionales-2021
https://www.argentina.gob.ar/interior/feriados-nacionales-2020
https://www.cultura.gob.ar/feriados-2019-en-argentina_7326/
https://servicios.lanacion.com.ar/app-mobile/feriados/2019
https://servicios.lanacion.com.ar/app-mobile/feriados/2018
https://servicios.lanacion.com.ar/app-mobile/feriados/2017
https://servicios.lanacion.com.ar/app-mobile/feriados/2016
https://servicios.lanacion.com.ar/app-mobile/feriados/2015
"""
country = "AR"
default_language = "es"
supported_languages = ("en_US", "es", "uk")
# Special Bridge Holidays are given upto 3 days a year
# as long as it's declared 50 days before calendar year's end
# There's no Bridge Holidays declared in 2017
# Bridge Public Holiday.
arg_bridge_public_holiday = tr("Feriado con fines turísticos")
# Bicentenary of the creation and first oath of the national flag.
bicentennial_national_flag = tr(
"Bicentenario de la creación y primera jura de la bandera nacional"
)
# Bicentenary of the Battle of Tucuman.
bicentennial_battle_tucuman = tr("Bicentenario de la Batalla de Tucumán")
# Bicentenary of the inaugural session of the National Constituent Assembly of the year 1813.
bicentennial_assembly_1813 = tr(
"Bicentenario de la sesión inaugural de la Asamblea Nacional Constituyente del año 1813"
)
# Bicentenary of the Battle of Salta.
bicentennial_battle_salta = tr("Bicentenario de la Batalla de Salta")
# National Census Day 2022.
national_census_2022 = tr("Censo nacional 2022")
special_holidays = {
2011: (
(MAR, 25, arg_bridge_public_holiday),
(DEC, 9, arg_bridge_public_holiday),
),
2012: (
(FEB, 27, bicentennial_national_flag),
(APR, 30, arg_bridge_public_holiday),
(SEP, 24, bicentennial_battle_tucuman),
(DEC, 24, arg_bridge_public_holiday),
),
2013: (
(JAN, 31, bicentennial_assembly_1813),
(FEB, 20, bicentennial_battle_salta),
(APR, 1, arg_bridge_public_holiday),
(JUN, 21, arg_bridge_public_holiday),
),
2014: (
(MAY, 2, arg_bridge_public_holiday),
(DEC, 26, arg_bridge_public_holiday),
),
2015: (
(MAR, 23, arg_bridge_public_holiday),
(DEC, 7, arg_bridge_public_holiday),
),
2016: (
(JUL, 8, arg_bridge_public_holiday),
(DEC, 9, arg_bridge_public_holiday),
),
2018: (
(APR, 30, arg_bridge_public_holiday),
(DEC, 24, arg_bridge_public_holiday),
(DEC, 31, arg_bridge_public_holiday),
),
2019: (
(JUL, 8, arg_bridge_public_holiday),
(AUG, 19, arg_bridge_public_holiday),
(OCT, 14, arg_bridge_public_holiday),
),
2020: (
(MAR, 23, arg_bridge_public_holiday),
(JUL, 10, arg_bridge_public_holiday),
(DEC, 7, arg_bridge_public_holiday),
),
2021: (
(MAY, 24, arg_bridge_public_holiday),
(OCT, 8, arg_bridge_public_holiday),
(NOV, 22, arg_bridge_public_holiday),
),
2022: (
(MAY, 18, national_census_2022),
(OCT, 7, arg_bridge_public_holiday),
(NOV, 21, arg_bridge_public_holiday),
(DEC, 9, arg_bridge_public_holiday),
),
2023: (
(MAY, 26, arg_bridge_public_holiday),
(JUN, 19, arg_bridge_public_holiday),
(OCT, 13, arg_bridge_public_holiday),
),
}
def __init__(self, *args, **kwargs):
ChristianHolidays.__init__(self)
InternationalHolidays.__init__(self)
super().__init__(*args, **kwargs)
def _move_holiday(self, dt: date) -> None:
"""
Movable Holidays Laws:
- Decreto 1584/2010: 2010-11-03
- AUG 17, OCT 12, NOV 20 Holidays will always be on MON
- Decreto 52/2017: 2017-01-23 (Reconfirmed in Ley 27399)
- If TUE/WED - observed on previous MON
- If THU/FRI - observed on next MON
"""
if self.observed:
dt_observed = None
if self._is_tuesday(dt) or self._is_wednesday(dt):
dt_observed = _get_nth_weekday_from(-1, MON, dt)
elif self._is_thursday(dt) or self._is_friday(dt):
dt_observed = _get_nth_weekday_from(+1, MON, dt)
if dt_observed:
self._add_holiday(self.tr("%s (Observado)") % self[dt], dt_observed)
self.pop(dt)
def _populate(self, year):
super()._populate(year)
# Fixed Holidays
# Status: In-Use.
# New Year's Day.
self._add_new_years_day(tr("Año Nuevo"))
# Status: In-Use.
# Started in 1956, abandoned in 1976.
# Restarted in 2011 via Decreto 1584/2010.
if 1956 <= year <= 1975 or year >= 2011:
# Carnival.
name = tr("Día de Carnaval")
self._add_carnival_monday(name)
self._add_carnival_tuesday(name)
# Status: In-Use
# Started in 2006, nearly reclassified as Movable Holidays in 2017
if year >= 2006:
self._add_holiday_mar_24(
# Memory's National Day for the Truth and Justice.
tr("Día Nacional de la Memoria por la Verdad y la Justicia")
)
# Status: In-Use.
# Started in 1993 as War Veterans Day via Ley 24160.
# Merged in 2001, confirmed as Fixed Holiday in 2006.
# Superseded "Day of Argentine Sovereignty over the Malvinas".
# Got moved temporary in 2020 (Decreto 297/2020).
if year >= 1993:
name = (
# War Veterans Day.
tr("Día del Veterano de Guerra")
if year <= 2000
# Veterans Day and the Fallen in the Malvinas War.
else tr("Día del Veterano y de los Caidos en la Guerra de Malvinas")
)
if year == 2020:
self._add_holiday_mar_31(name)
else:
self._add_holiday_apr_2(name)
# Good Friday.
self._add_good_friday(tr("Viernes Santo"))
if year >= 1930:
# Labor Day.
self._add_labor_day(tr("Día del Trabajo"))
if year >= 1813:
# May Revolution Day.
self._add_holiday_may_25(tr("Día de la Revolución de Mayo"))
# Status: Defunct.
# Started in 1983 on April 2, moved to June 10 in Decreto 901/1984.
# Abandoned in 2001.
# Superseded by "Veterans Day and the Fallen in the Malvinas War".
if 1983 <= year <= 2000:
# Day of Argentine Sovereignty over the Malvinas, Sandwich and
# South Atlantic Islands.
name = tr(
"Día de los Derechos Argentinos sobre las Islas Malvinas, "
"Sandwich y del Atlántico Sur"
)
if year == 1983:
self._add_holiday_apr_2(name)
else:
self._add_holiday_jun_10(name)
# Also called "National Flag Day" (Día de la Bandera Nacional).
# Status: In-Use.
# Started in 1938 via Ley 12361 as Fixed Holiday.
# Set as 3rd MON of JUN via Ley 24455 in Dec 1994.
# Made Fixed Holiday again in 2011.
if year >= 1938:
# Pass to the Immortality of General Don Manuel Belgrano.
name = tr("Paso a la Inmortalidad del General Don Manuel Belgrano")
if 1995 <= year <= 2010:
self._add_holiday_3rd_mon_of_jun(name)
else:
self._add_holiday_jun_20(name)
if year >= 1816:
# Independence Day.
self._add_holiday_jul_9(tr("Día de la Independencia"))
# Immaculate Conception.
self._add_immaculate_conception_day(tr("Inmaculada Concepción de María"))
# Christmas.
self._add_christmas_day(tr("Navidad"))
# Movable Holidays
# Status: In-Use.
# Started in 2014 for Salta, 2016 for the whole country via Ley 27258.
if year >= 2016:
jun_17 = self._add_holiday_jun_17(
# Pass to the Immortality of General Don Martin Miguel de Guemes.
tr("Paso a la Inmortalidad del General Don Martín Miguel de Güemes"),
)
# If Jun 17 is Friday, then it should move to Mon, Jun 20
# but Jun 20 is Gen. Belgrano holiday
if not self._is_friday(jun_17):
self._move_holiday(jun_17)
# Status: In-Use.
# Started in 1938 via Ley 12387 on Aug 17.
# Set as 3rd MON of AUG via Ley 24455 in Dec 1994.
# Moved to Aug 22 for 2011 (election interfere) via Decreto 521/2011.
# Pass to the Immortality of General Don Jose de San Martin.
name = tr("Paso a la Inmortalidad del General Don José de San Martin")
if year == 2011:
self._add_holiday_aug_22(name)
elif 1938 <= year <= 1994:
self._add_holiday_aug_17(name)
elif 1995 <= year <= 2010:
self._add_holiday_3rd_mon_of_aug(name)
elif year >= 2012:
self._move_holiday(self._add_holiday_aug_17(name))
# Status: In-Use.
# First started in 1917 for Argentina.
# In 2010 the holiday became movable and its name was changed.
if year >= 1917:
name = (
# Respect for Cultural Diversity Day.
tr("Día del Respeto a la Diversidad Cultural")
if year >= 2010
# Columbus Day.
else tr("Día de la Raza")
)
oct_12 = self._add_columbus_day(name)
if year >= 2010:
self._move_holiday(oct_12)
# Status: In-Use.
# First observed with no holiday via Ley 20770 in 1974.
# Started in 2010.
# Moved to Nov 27 for 2015 (election interfere).
# Moved to Nov 28 again for 2016.
if year >= 2010:
# National Sovereignty Day.
name = tr("Día de la Soberanía Nacional")
if year == 2015:
self._add_holiday_nov_27(name)
elif year == 2016:
self._add_holiday_nov_28(name)
else:
self._move_holiday(self._add_holiday_nov_20(name))
class AR(Argentina):
pass
class ARG(Argentina):
pass
|
988,515 | 8ae441cb69282646af2f00abb26bc1f8729dad48 | import random
import tkinter
import pandas
BACKGROUND_COLOR = "#B1DDC6"
current_card = {}
try:
data = pandas.read_csv("words_to_learn.csv")
except FileNotFoundError:
data = pandas.read_csv("Japanese_words.csv")
data_dict = data.to_dict(orient="records")
else:
data_dict = data.to_dict(orient="records")
# ----------------------------------------- FUNCTIONS ----------------------------------------- #
def next_card():
global current_card, flip_timer
window.after_cancel(flip_timer)
current_card = random.choice(data_dict)
canvas.itemconfig(language_text, text="Japanese", fill="black")
canvas.itemconfig(word_text, text=current_card["Japanese"], fill="black")
canvas.itemconfig(japanese_pronunciation, text="", fill="black")
canvas.itemconfig(canvas_image, image=card_front_logo)
flip_timer = window.after(3000, func=flip_card)
def flip_card():
canvas.itemconfig(canvas_image, image=card_back_logo)
canvas.itemconfig(language_text, text="Translation", fill="white")
canvas.itemconfig(word_text, text=current_card["Translation"], fill="white")
canvas.itemconfig(japanese_pronunciation, text=current_card["Japanese_Pronunciation"], fill="white")
def is_known():
data_dict.remove(current_card)
remaining_data = pandas.DataFrame(data_dict)
remaining_data.to_csv("words_to_learn.csv", index=False)
next_card()
# ----------------------------------------- UI SETUP ----------------------------------------- #
window = tkinter.Tk()
window.title("Japanese Flashy")
window.config(bg=BACKGROUND_COLOR, padx=50, pady=50)
flip_timer = window.after(3000, flip_card)
canvas = tkinter.Canvas(width=800, height=526, bg=BACKGROUND_COLOR, highlightthickness=0)
card_front_logo = tkinter.PhotoImage(file="./images/card_front.png")
canvas_image = canvas.create_image(400, 263, image=card_front_logo)
canvas.grid(row=0, column=0, columnspan=2)
language_text = canvas.create_text(400, 100, text="", font=("Arial", 40, "italic"))
word_text = canvas.create_text(400, 250, text="", font=("Arial", 40, "bold"))
japanese_pronunciation = canvas.create_text(400, 400, text="", font=("Arial", 40, "bold"))
card_back_logo = tkinter.PhotoImage(file="./images/card_back.png")
right_image = tkinter.PhotoImage(file="./images/right.png")
known_button = tkinter.Button(image=right_image, highlightthickness=0, bg=BACKGROUND_COLOR, command=is_known, bd=0)
known_button.grid(row=1, column=1)
wrong_image = tkinter.PhotoImage(file="./images/wrong.png")
unknown_button = tkinter.Button(image=wrong_image, highlightthickness=0, bg=BACKGROUND_COLOR, command=next_card,bd=0)
unknown_button.grid(row=1, column=0)
next_card()
window.mainloop()
|
988,516 | 3e8a4a5a4d17d3a0561e182fbff7969e1fa6bec7 | class INIDefaults:
class Bullet:
ALIGN = 0
BACKGROUND_COLOR = 'background.default'
BACKGROUND_COLOR_ON_SWITCH = 'reverse'
BULLET = '●'
BULLET_COLOR = 'foreground.default'
INDENT = 0
MARGIN = 0
PAD_RIGHT = 0
SHIFT = 0
WORD_COLOR = 'foreground.default'
WORD_COLOR_ON_SWITCH = 'reverse' |
988,517 | 3a7952f0da29c61993910e0b4ef20d1efb581f18 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'particularly_search_singer.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.backButton = QtWidgets.QPushButton(self.centralwidget)
self.backButton.setGeometry(QtCore.QRect(20, 480, 91, 41))
font = QtGui.QFont()
font.setPointSize(10)
self.backButton.setFont(font)
self.backButton.setObjectName("backButton")
self.searchButton = QtWidgets.QPushButton(self.centralwidget)
self.searchButton.setGeometry(QtCore.QRect(670, 480, 91, 41))
font = QtGui.QFont()
font.setPointSize(10)
self.searchButton.setFont(font)
self.searchButton.setObjectName("searchButton")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(230, 240, 321, 51))
font = QtGui.QFont()
font.setPointSize(10)
self.lineEdit.setFont(font)
self.lineEdit.setObjectName("lineEdit")
self.mainLabel = QtWidgets.QLabel(self.centralwidget)
self.mainLabel.setGeometry(QtCore.QRect(260, 170, 261, 41))
font = QtGui.QFont()
font.setPointSize(11)
self.mainLabel.setFont(font)
self.mainLabel.setText("")
self.mainLabel.setObjectName("mainLabel")
self.errorLabel = QtWidgets.QLabel(self.centralwidget)
self.errorLabel.setGeometry(QtCore.QRect(230, 340, 361, 131))
font = QtGui.QFont()
font.setPointSize(10)
self.errorLabel.setFont(font)
self.errorLabel.setText("")
self.errorLabel.setObjectName("errorLabel")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.backButton.setText(_translate("MainWindow", "Back"))
self.searchButton.setText(_translate("MainWindow", "Search"))
|
988,518 | 4791ecde2bc4d8b2e93b3056a41e1da64eb8e6d5 | #-*- coding: utf-8 -*-
import os
import smtplib
import mimetypes
from email import encoders
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.utils import parseaddr, formataddr
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr(( Header(name, 'utf-8').encode(), addr.encode('utf-8') if isinstance(addr, unicode) else addr))
def send_mail(from_user, to_user, content):
msg = MIMEMultipart()
to_user_addr = to_user['addr'].split(',')
msg['From'] = _format_addr('%s <%s>' % (from_user['name'].decode('utf8'), from_user['addr']) )
msg['To'] = _format_addr('%s <%s>' % (to_user['name'].decode('utf8'), to_user_addr) )
msg['Subject'] = Header(content['subject'].decode('utf8'))
files_path = content.get('files_path')
cid = []
if files_path:
files_path_list = files_path.split(',')
for i in range(len(files_path_list)):
filepath = files_path_list[i]
filename = os.path.basename(filepath)
filetype = mimetypes.guess_type(filepath)[0].split('/')
with open(filepath, 'rb') as f:
if filetype[0] == 'image': #标记图片的index,以便在网页中插入图片
cid.append((i, 1))
else:
cid.append((i, 0))
# 设置附件的MIME和文件名
mime = MIMEBase(filetype[0] , filetype[1], filename=filename)
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename=filename)
mime.add_header('Content-ID', '<'+ str(i) +'>')
mime.add_header('X-Attachment-Id', str(i))
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
html = '<html><body><h1>'+ content['text'] +'</h1>'
for index,flag in cid:
if flag :
html += '<p><img src="cid:%s"></p>' % str(index)
html += '</body></html>'
msg.attach(MIMEText(html, 'html', 'utf-8'))
try:
server = smtplib.SMTP_SSL(from_user['smtp_server'], 465)
server.login(from_user['addr'], from_user['password'])
server.sendmail(from_user['addr'], to_user_addr, msg.as_string())
server.quit()
return 1
except Exception,e:
print e
return 0
if __name__ == '__main__':
content = {
'subject': '扫描二维码以登陆微信虚拟人',
'text': '当前微信虚拟人【duolahanbao】【已掉线】,请管理员及时扫码进行登陆,以免影响业务谢谢。',
'files_path': '/home/xnr1/xnr_0429/xnr/example_model/fb_xi_jin_ping_si_xiang_other.json,/home/xnr1/xnr_0429/xnr/example_model/fb_xi_si_xiang_3_other.json', #支持多个,以逗号隔开
}
from_user = {
'name': '虚拟人项目(微信)',
'addr': '929673096@qq.com',
'password': 'czlasoaiehchbega',
'smtp_server': 'smtp.qq.com'
}
to_user = {
'name': '管理员',
'addr': '929673096@qq.com' #支持多个,以逗号隔开
}
send_mail(from_user=from_user, to_user=to_user, content=content)
|
988,519 | 2a401ecf44fab5075259d485c234a7bfb2efeb29 | import random, os
running = True
games_history = []
if 'results.txt' in os.listdir('.'):
f = open('results.txt', 'r')
loaded = f.readlines()
f.close()
for items in loaded:
newlist = items.strip('\n')
games_history.append(newlist)
def initilize():
max_num = int(input("what would you like the max number to be? "))
number = random.randint(0,max_num)
return number
def gameloop(num):
global running
guesses = 0
guess = []
print("input a number guess:")
while running == True:
user_guess = int(input(">>"))
guesses += 1
guess.append(user_guess)
if user_guess > num:
print("too high")
elif user_guess < num:
print("too low")
elif user_guess == num:
print("you win!")
games_history.append((str(num) , "number of guesses: " + str(guesses), "guesses: " + str(guess)))
replay = input("play again? (y/n)")
if replay.lower() == "y":
gameloop(initilize())
elif replay.lower() == "n":
print("goodbye!")
running = False
return
gameloop(initilize())
f = open('results.txt', 'w')
for i in games_history:
f.write(str(i) + '\n')
f.close()
|
988,520 | db5d56de1c9bf5d76cda47b5a1dd73cdc51028cd | import numpy as np
def quaternRotate(v, q):
rows, col = v.shape
z = np.zeros((1, col))
return quaternion_multiply(quaternion_multiply(q, np.concatenate((z, v), axis=0)), quaternConj(q))[1:, :]
def quaternion_multiply(quaternion1, quaternion0):
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
h = np.array([- np.multiply(x1, x0) - np.multiply(y1, y0) - np.multiply(z1, z0) + np.multiply(w1, w0),
np.multiply(x1, w0) + np.multiply(y1, z0) - np.multiply(z1, y0) + np.multiply(w1, x0),
- np.multiply(x1, z0) + np.multiply(y1, w0) + np.multiply(z1, x0) + np.multiply(w1, y0),
np.multiply(x1, y0) - np.multiply(y1, x0) + np.multiply(z1, w0) + np.multiply(w1, z0)],
dtype=np.float64)
return h
def quaternConj(quaternion):
return np.array([quaternion[0], -quaternion[1], -quaternion[2], -quaternion[3]])
|
988,521 | 9766b2dde621b13c3b2de074de2ee2c41fe004c8 | from django.contrib import admin
from .models import *
# Register your models here.
class EventAdmin(admin.ModelAdmin):
list_display = ('id','title', 'event_date','event_address','desc')
class BoothAdmin(admin.ModelAdmin):
list_display = ('id','event', 'number','group_name','desc')
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'point')
class User_Favorite_BoothAdmin(admin.ModelAdmin):
list_display = ('user', 'booth')
class CommentAdmin(admin.ModelAdmin):
list_display = ('user', 'content')
admin.site.register(Event,EventAdmin)
admin.site.register(Booth,BoothAdmin)
admin.site.register(UserProfile,UserProfileAdmin)
admin.site.register(Comment,CommentAdmin)
admin.site.register(User_Favorite_Booth,User_Favorite_BoothAdmin) |
988,522 | 59b6663564538d9f94e04b6ed4be3036ec31ccbf | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score, StratifiedShuffleSplit, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv('data/on_screen_look_data.csv')
df.drop_duplicates(keep='first', inplace=True)
df.reset_index(drop=True, inplace=True)
#print(df)
min_max = MinMaxScaler()
df[['x1', 'x2', 'x3']] = min_max.fit_transform(df[['x1', 'x2', 'x3']])
#print(df)
pickle.dump(min_max, open('models/looking-at-screen-detector/normalizer.blob', 'wb'))
#print(df.describe())
#df.hist()
#plt.show()
#train_set, test_set = train_test_split(df, test_size = 0.2, random_state=42)
#print(train_set)
#print(test_set)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_indices, test_indices in split.split(df, df["y"]):
train_set = df.loc[train_indices]
test_set = df.loc[test_indices]
#print(train_set)
#print(test_set)
train_x = train_set.drop("y", axis=1)
train_y = train_set["y"].copy()
test_x = test_set.drop("y", axis=1)
test_y = test_set["y"].copy()
#train_x[['x1', 'x2', 'x3']] = min_max.fit_transform(train_x[['x1', 'x2', 'x3']])
print(train_x)
print(train_y)
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(train_x, train_y)
print(test_x)
print(test_x.loc[[0]])
print(test_y.loc[0])
print(sgd_clf.predict(test_x.loc[[0]]))
print(cross_val_score(sgd_clf, train_x, train_y, cv=3, scoring='accuracy'))
print(train_set.describe())
print(len(train_set[(train_set['y'] == 1)]))
print(len(train_set[(train_set['y'] == 0)]))
print(cross_val_score(sgd_clf, test_x, test_y, cv=3, scoring='accuracy'))
pickle.dump(sgd_clf, open('models/looking-at-screen-detector/looking-at-screen-detector.blob', 'wb'))
|
988,523 | d7ca9b8489a51874be92d99ee1fbd2f92a0d490d | from PIL import Image, ImageDraw, ImageFont
from datetime import datetime
import numpy as np
import time
from .device import Device
from .display import Display
from .widgets.cpu import Cpu
from .widgets.mpd import Mpd
from .widgets.linux import Linux
from .widgets.clock import Clock
from .widgets.nvidia import Nvidia
def status_line(img, name):
draw = ImageDraw.Draw(img)
now = datetime.now()
draw.rectangle(((0,53), (127, 63)), fill=fill)
draw.text((90, 55), name, fill=(255, 255, 255), font=font)
draw.text((2, 55), now.strftime("%Y/%m/%d %H:%M:%S"), fill=(255, 255, 255), font=font)
return img
def img_to_bmp(img):
ary = np.array(img)
# Split the three channels
r,g,b = np.split(ary,3,axis=2)
r = r.reshape(-1)
g = r.reshape(-1)
b = r.reshape(-1)
# Standard RGB to grayscale
bitmap = list(map(lambda x: 0.299*x[0]+0.587*x[1]+0.114*x[2], zip(r,g,b)))
bitmap = np.array(bitmap).reshape([ary.shape[0], ary.shape[1]])
bitmap = np.dot((bitmap > 128).astype(float),255)
return bitmap
def splashscreen():
img = Image.new('RGB', (128,64), (255, 255, 255))
arch_img = Image.open('./images/archlinux.png')
img.paste(arch_img.resize((64, 64)), (30, 0))
draw = ImageDraw.Draw(img)
draw.rectangle(((0, 48), (128, 60)), fill=fill)
draw.text((34, 49), 'lcd4python', fill=(255, 255, 255))
device.display.set_buffer(img_to_bmp(img))
device.render()
time.sleep(2)
fill = (0, 0, 0)
font = ImageFont.truetype('./lcd4python/fonts/misaki_gothic_2nd.ttf', 8)
device = Device('/dev/ttyUSB1', 115200, Display())
#splashscreen()
widgets = [
[
Linux(),
5,
0
],
[
Nvidia(),
5,
0
],
[
Cpu(),
10,
0
],
[
Clock(),
5, # display time
0, # start display time
],
[
Mpd(),
15,
0
]
]
index = 0
def main():
while True:
for widget in widgets:
while True:
image = Image.new('RGB', (128, 64), (255, 255, 255))
device.display.set_buffer(img_to_bmp(status_line(widget[0].render(image), widget[0].name)))
device.render()
if widget[2] == 0:
widget[2] = time.time()
if ((time.time() - widget[2]) > widget[1]):
widget[2] = 0
break
device.close()
|
988,524 | 82801d1f4ce9c723a39fcc4bee4e95c6fe144f8b | import uuid
class Producto:
def __init__(self, nombre, categoria, color, marca, precio, *args, **kargs):
self.nombre = nombre
self.categoria = categoria
self.color = color
self.marca = marca
self.precio = precio
self.id = uuid.uuid4()
def __str__(self):
return f"{self.nombre}---{self.categoria}---{self.color}---{self.marca}---{self.precio}---{self.id}"
def __repr__(self):
return str(self.id)
def cumple(self, especificacion):
dict_producto = self.__dict__
for k in especificacion.get_keys():
if k not in dict_producto or dict_producto[k] != especificacion.get_value(k):
return False
return True |
988,525 | 3a0854bd4ef9025e36eb760820af49c649c0f2ca | def DikdortgenAlanCevreHesapla():
uzun_kenar = int(input("Uzun kenarı girin:"))
kisa_kenar = int(input("Kısa kenarı girin:"))
alan = uzun_kenar * kisa_kenar
cevre = (uzun_kenar * 2) + (kisa_kenar * 2)
print ("Alanı: ", alan)
print ("Çevresi: ", cevre )
while True:
DikdortgenAlanCevreHesapla(); |
988,526 | d465ea9f7a3531e9f161c9b53c1710fbc21f3cc6 | from gobject import timeout_add
from scribes_helpers import weak_connect
from .signals import Signals
class Reseter(object):
def __init__(self, signals, editor):
editor.response()
self.editor = editor
self.signals = signals
self.lines = ()
self.update = True
weak_connect(editor, "reset-buffer", self, 'reset_buffer')
signals.connect_signals(self)
def reset_buffer(self, editor, operation):
if operation == "begin":
self.update = False
else:
self.signals.remove_all.emit(False)
self.signals.bookmark_lines.emit(self.lines)
self.update = True
return False
@Signals.lines
def lines_cb(self, sender, lines):
if self.update:
self.lines = lines
return False
|
988,527 | 765727bbbd7a565638cde5a34762fadb03f78030 | from datetime import datetime
from typing import Type
from bson import ObjectId
from django.urls import reverse
from JellyBot.systemconfig import HostUrl
from models import Model, ExtraContentModel
from mongodb.factory.results import ModelResult, RecordExtraContentResult, WriteOutcome
from tests.base import TestOnModelResult
__all__ = ["TestRecordExtraContentResult"]
class TestRecordExtraContentResult(TestOnModelResult.TestClass):
COID = ObjectId()
TS = datetime.utcnow()
@classmethod
def get_result_class(cls) -> Type[ModelResult]:
return RecordExtraContentResult
@classmethod
def get_constructed_model(cls) -> Model:
return ExtraContentModel(Content="AAAAA", Timestamp=TestRecordExtraContentResult.TS,
ChannelOid=TestRecordExtraContentResult.COID)
def test_get_model_id(self):
oid = ObjectId()
mdl = ExtraContentModel(Content="AAAAA", Timestamp=TestRecordExtraContentResult.TS, ChannelOid=ObjectId())
mdl.set_oid(oid)
r = RecordExtraContentResult(WriteOutcome.O_INSERTED, model=mdl)
self.assertEqual(r.model_id, oid)
r = RecordExtraContentResult(WriteOutcome.X_NOT_EXECUTED)
self.assertIsNone(r.model_id, oid)
def test_get_url(self):
oid = ObjectId()
mdl = ExtraContentModel(Content="AAAAA", Timestamp=TestRecordExtraContentResult.TS, ChannelOid=ObjectId())
mdl.set_oid(oid)
r = RecordExtraContentResult(WriteOutcome.O_INSERTED, model=mdl)
self.assertEqual(r.url, f'{HostUrl}{reverse("page.extra", kwargs={"page_id": str(oid)})}')
r = RecordExtraContentResult(WriteOutcome.X_NOT_EXECUTED)
self.assertEqual(r.url, "")
|
988,528 | 196d778f5ba72a443477d9f476605c6b267f4333 | from django.contrib import admin
from .models import Exercise
admin.site.register(Exercise) |
988,529 | e4dbf10ef31a77c71ed2d6da82b038129504dba3 | # Generated by Django 2.0.3 on 2019-06-06 20:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0009_auto_20190606_2321'),
]
operations = [
migrations.AddField(
model_name='cartitem',
name='unit_price',
field=models.DecimalField(decimal_places=2, default=10.99, max_digits=1000),
),
]
|
988,530 | bea19dd3db03b8e0bba0594c7fabc2129d0a6f59 | #-*-coding:utf-8-*-
import paho.mqtt.client as mqtt #Paho-MQTT 패키지 불러오기
from flask import Flask, render_template, request #Flask 패키지 불러오기
app = Flask(__name__) #Flask 모듈 불러오기
#matt 클라이언트를 생성하여 연결
mqttc=mqtt.Client()
mqttc.connect("localhost",1883, 60)
mqttc.loop_start()
# led 란 딕셔너리를 만듭니다. ‘name’과 ‘state’ 요소를 사용
led = {'name' : 'LED pin', 'state' : 'ON'}
# 웹서버의 URL 주소로 접근하면 아래의 main() 함수를 실행
@app.route("/")
def main():
# led 딕셔너리를 templateData에 저장
templateData = {
'led' : led
}
return render_template('main.html', **templateData)
# URL 주소 끝에 “/LED/<action>”을 붙여서 접근시에 action 값에 따라 동작
@app.route("/LED/<action>")
def action(action):
# 만약에 action 값이 “on”과 같으면 mqtt 메시지를 토픽 “inTopic”에 “1”을 전송
if action == "on":
mqttc.publish("inTopic","1")
led['state'] = "ON"
message = "LED on.“
# 만약에 action 값이 “off”과 같으면 mqtt 메시지를 토픽 “inTopic”에 “0”을 전송
if action == "off":
mqttc.publish("inTopic","0")
led['state'] = "OFF"
message = "LED off."
templateData = {
'message' : message,
'led' : led
}
return render_template('main.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False)
|
988,531 | 9616c478fd73985bfac004b856baba0da199fbdc | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# __author__ someone
import traceback
import os
import json
from multiprocessing import Process
import tornado.ioloop
import tornado.web
#from bson.json_util import dumps
#import bson
from tornado.escape import json_decode
from lib.portscaner import PortScaner
from lib.mongohelp import MongoHelper
from lib.core import subdomain
from lib.core import lsip
from lib.core import wrong_log
from config import WIDTH
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
scan_process = []
class show_targets(tornado.web.RequestHandler):
def get(self):
try:
cron = MongoHelper()
result = cron.show_targets()
_ = []
for i in result:
_.append(i['target'])
self.write(json.dumps(_))
except:
self.write("0")
class show_domains(tornado.web.RequestHandler):
def get(self):
target = self.get_cookie("target")
try:
cron = MongoHelper(target)
result = cron.show_domains()
_ = []
for i in result:
_.append(i['domain'])
self.write(json.dumps(_))
except:
self.write("0")
class add_target(tornado.web.RequestHandler):
def get(self):
target = self.get_argument('target')
try:
cron = MongoHelper()
cron.add_target(target)
self.set_cookie("target", target)
self.write("1")
except:
self.write("0")
class set_target(tornado.web.RequestHandler):
def get(self):
target = self.get_cookie('target')
self.set_cookie("target", target)
self.write("1")
class update_ip_domain(tornado.web.RequestHandler):
def post(self):
data = json_decode.loads(self.request.body)
target= self.get_cookie("target")
try:
cron = MongoHelper(target)
cron.add_target(data['_id'],data['ip'],data['domain'])
self.write("1")
except:
self.write("0")
traceback.print_exc()
class add_domain(tornado.web.RequestHandler):
def get(self):
domain = self.get_argument('domain') #qq.com,tencent.com
target= self.get_cookie("target")
try:
cron = MongoHelper(target)
cron.add_domain(domain)
self.write("1")
except:
self.write("0")
traceback.print_exc()
class get_iplist(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
offset = int(self.get_argument('offset'))
limit = int(self.get_argument('limit'))
try:
cron = MongoHelper(target)
_ ,total= cron.get_iplist(offset,limit)
rows = list(_)
result = {'total':total,'rows':rows}
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class get_ip_port(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
offset = int(self.get_argument('offset'))
limit = int(self.get_argument('limit'))
try:
cron = MongoHelper(target)
_ ,total = cron.get_ip_port(1,offset,limit)
rows = list(_)
result = {'total': total, 'rows': rows}
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class get_service(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
offset = int(self.get_argument('offset'))
limit = int(self.get_argument('limit'))
try:
cron = MongoHelper(target)
_,total = cron.get_service(offset,limit)
rows = list(_)
result = {'total': total, 'rows': rows}
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class get_http_vul(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
offset = int(self.get_argument('offset'))
limit = int(self.get_argument('limit'))
try:
cron = MongoHelper(target)
_, total = cron.get_http_vul(offset,limit)
rows = list(_)
result = {'total': total, 'rows': rows}
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class get_port_vul(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
offset = int(self.get_argument('offset'))
limit = int(self.get_argument('limit'))
try:
cron = MongoHelper(target)
_, total = cron.get_port_vul(offset,limit)
rows = list(_)
result = {'total': total, 'rows': rows}
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class get_tiny_scan(tornado.web.RequestHandler):
def get(self):
target = self.get_cookie("target")
offset = int(self.get_argument('offset'))
limit = int(self.get_argument('limit'))
try:
cron = MongoHelper(target)
_, total = cron.get_tiny_scan(offset, limit)
rows = list(_)
result = {'total': total, 'rows': rows}
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class show_progress(tornado.web.RequestHandler):
def get(self):
target = self.get_cookie("target")
try:
cron = MongoHelper(target)
result = cron.show_progress()
self.write(json.dumps(result))
except:
self.write("0")
traceback.print_exc()
class listip(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
cron = MongoHelper(target)
if (not cron.wait_common('domain').count()):
p = Process(target=lsip, args=(target,WIDTH,))
p.start()
self.write("1")
else:
self.write("0")
pass
class start_sub_domain(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
p = Process(target=subdomain, args=(target,))
p.start()
self.write("1")
#----------------------------------------------------#
def startscan_fun(target):
p = PortScaner(target)
p.run()
class start_scan(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
print 'start scan ' , target
p = Process(target=startscan_fun, args=(target,))
p.start()
self.write("1")
#----------------------------------------------------#
def autoscan_func(target):
subdomain(target)
lsip(target,WIDTH)
p = PortScaner(target)
p.run()
class autoscan(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
p = Process(target=autoscan_func, args=(target,))
p.start()
scan_process.append(p)
self.write("1")
# ----------------------------------------------------#
#----------------------------------------------------#
def scan_again_func(target):
p = PortScaner(target,False)
p.run()
class scan_again(tornado.web.RequestHandler):
def get(self):
target= self.get_cookie("target")
p = Process(target=scan_again_func, args=(target,))
p.start()
scan_process.append(p)
self.write("1")
# ----------------------------------------------------#
"""
class send_to_brute(tornado.web.RequestHandler):
def post(self):
data = json_decode.loads(self.request.body)
#{"ip":"xx","port":"808","type":"mongo"}
target = self.get_cookie("target")
command = 'hydra -L users.txt -P password.txt -t 1 -s %s -vV -e ns %s %s' %(data['ip'],data['ip'],'type')
pass
"""
class index(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
def make_app():
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
return tornado.web.Application([
(r"/show_targets", show_targets),
(r"/add_target", add_target),
(r"/set_target", set_target),
(r"/update_ip_domain", update_ip_domain),
(r"/add_domain", add_domain),
(r"/show_domains", show_domains),
(r"/get_iplist", get_iplist),
(r"/get_ip_port", get_ip_port),
(r"/get_service", get_service),
(r"/get_http_vul", get_http_vul),
(r"/get_tiny_scan", get_tiny_scan),
(r"/get_port_vul", get_port_vul),
(r"/show_progress", show_progress),
#?order=asc&offset=0&limit=10
(r"/start_sub_domain", start_sub_domain),
(r"/listip", listip),
(r"/start_scan", start_scan),
(r"/autoscan", autoscan),
(r"/scan_again", scan_again),
#(r"/send_to_brute", send_to_brute),
#js css html
(r"/index", index),
], **settings)
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start() |
988,532 | 4cace889197961d4ecafca896e11efde0686e09f | # -*- coding: utf-8 -*-
from config import *
import collections
from classes.Main import Main
if __name__ == "__main__":
main = Main()
"""
Example code to import tweets
"""
# file_path = os.path.realpath(PROJECT_ROOT_DIRECTORY+DATASET_TXT_DIR_NAME+"TTNetTweets2012.txt")
# main.retrieve_tweets(file_path)
"""
Example code to generate arff file with given feature parameters
"""
# main.extract_features_and_generate_arff(n=3, analyzer='char', year='2012')
"""
Example code to plot __years' scores
"""
# root path, ../DataSet-Logs/Word/YearsOnly/TTNet/
# root_path_for_years_itself = PROJECT_ROOT_DIRECTORY + DATASET_LOGS_DIR_NAME + FEATURE_TYPE+\
# LOGS_YEARS_ITSELF_DIR_NAME+MODEL_NAME+'/'
# main.plot_years_scores(root_path_for_years_itself)
"""
Example code to plot 2012 vs rest
"""
# root_path_for_2012_vs_rest = PROJECT_ROOT_DIRECTORY + DATASET_LOGS_DIR_NAME + FEATURE_TYPE + \
# LOGS_2012_VS_REST + MODEL_NAME
# main.plot_2012_vs_rest(root_path_for_2012_vs_rest)
"""
Example code to plot top info gain features' frequencies in __years.
"""
# main.plot_top_feature_frequencies_in_years()
"""
Example code to plot __years' intersection scores with each other
"""
# main.plot_years_intersection_scores()
"""
Example code to make experiment
"""
# main.run_experiment(n=1, analyzer='word')
print("Initalizing.")
all_line_scores_of_all_experiments = main.run_experiment_with_scikit_learn(n=1, analyzer='word')
for line_name, line_points in all_line_scores_of_all_experiments.iteritems():
all_line_scores_of_all_experiments[line_name] = collections.OrderedDict(sorted(line_points.items()))
main.plot_all_experiment_results_with_scikit_learn(all_line_scores_of_all_experiments)
"""
Example code to plot experiment results from Weka
"""
# root_path = PROJECT_ROOT_DIRECTORY + DATASET_LOGS_DIR_NAME + MODEL_NAME + '_ALE_' + FEATURE_TYPE + '_SMO' + '/'
# main.plot_experiment_results(root_path)
"""
Example code to import new tweets from csv
"""
# root_path = PROJECT_ROOT_DIRECTORY + DATASET_CSV_DIR_NAME + MODEL_NAME + "/"
# main.import_new_tweets_from_csv(root_path) |
988,533 | a582d1ec280c92eefe062b2ac2c9b94d17f8edd6 | from __future__ import absolute_import
import os
import h5py
import cgp
D = os.path.dirname(os.path.abspath(__file__)) + '/'
geometry_file = D + 'data/geometry.h5'
results_file = D + 'data/geometry-results.h5'
def dataset_name(command, cell_order=None, label=None):
name = command
if cell_order is not None:
name = '%s/cell-order-%d' % (name, cell_order)
if label is not None:
name = '%s/%d' % (name, label)
return name
f = h5py.File(results_file, 'w')
cgp_reader = cgp.GeometryReader(geometry_file, verbose=False)
for cell_order in [0, 1, 2, 3]:
f.create_dataset(dataset_name('max-label', cell_order),
data=cgp_reader.maxLabel(cell_order))
for cell_order in [0, 1, 2, 3]:
for label in range(1, cgp_reader.maxLabel(cell_order)+1):
f.create_dataset(dataset_name('size', cell_order, label),
data=cgp_reader.size(cell_order, label))
for cell_order in [0, 1, 2]:
for label in range(1, cgp_reader.maxLabel(cell_order)+1):
f.create_dataset(dataset_name('bounds', cell_order, label),
data=cgp_reader.bounds(cell_order, label))
for cell_order in [1, 2, 3]:
for label in range(1, cgp_reader.maxLabel(cell_order)+1):
f.create_dataset(dataset_name('bounded-by', cell_order, label),
data=cgp_reader.boundedBy(cell_order, label))
for cell_order in [1, 2, 3]:
for label in range(1, cgp_reader.maxLabel(cell_order)+1):
f.create_dataset(dataset_name('adjacent', cell_order, label),
data=cgp_reader.adjacent(cell_order, label))
for label in range(1, cgp_reader.maxLabel(0)+1):
f.create_dataset(dataset_name('zero-set', None, label),
data=cgp_reader.zeroSet(label))
for label in range(1, cgp_reader.maxLabel(1)+1):
f.create_dataset(dataset_name('one-set', None, label),
data=cgp_reader.oneSet(label))
for label in range(1, cgp_reader.maxLabel(2)+1):
f.create_dataset(dataset_name('two-set', None, label),
data=cgp_reader.twoSet(label))
for label in range(1, cgp_reader.maxLabel(3)+1):
f.create_dataset(dataset_name('three-set', None, label),
data=cgp_reader.threeSet(label),
compression='lzf')
for cell_order in [1, 2, 3]:
for label in range(1, cgp_reader.maxLabel(cell_order)+1):
f.create_dataset(
dataset_name('topological-point-set', cell_order, label),
data=cgp_reader.topologicalPointSet(cell_order, label),
compression='lzf')
f.close()
|
988,534 | a566e96f87af2d58e2f0e162f562f28f01d070aa | '''
Bringing it all together: Festivus!
In this exercise, you will be throwing a party—a Festivus if you will!
You have a list of guests (the names list). Each guest, for whatever reason, has decided to show up to the party in 10-minute increments. For example, Jerry shows up to Festivus 10 minutes into the party's start time, Kramer shows up 20 minutes into the party, and so on and so forth.
We want to write a few simple lines of code, using the built-ins we have covered, to welcome each of your guests and let them know how many minutes late they are to your party. Note that numpy has been imported into your session as np and the names list has been loaded as well.
Let's welcome your guests!
Instructions 1/4
25 XP
1
Use range() to create a list of arrival times (10 through 50 incremented by 10). Create the list arrival_times by unpacking the range object.
2
You realize your clock is three minutes fast. Convert the arrival_times list into a numpy array (called arrival_times_np) and use NumPy broadcasting to subtract three minutes from each arrival time.
3
Use list comprehension with enumerate() to pair each guest in the names list to their updated arrival time in the new_times array. You'll need to use the index variable created from using enumerate() on new_times to index the names list.
4
Use list comprehension with enumerate() to pair each guest in the names list to their updated arrival time in the new_times array. You'll need to use the index variable created from using enumerate() on new_times to index the names list.
'''
SOLUTION
1
# Create a list of arrival times
arrival_times = [*range(10, 60, 10)]
print(arrival_times)
2
# Create a list of arrival times
arrival_times = [*range(10,60,10)]
# Convert arrival_times to an array and update the times
arrival_times_np = np.array(arrival_times)
new_times = arrival_times_np - 3
print(new_times)
3
# Create a list of arrival times
arrival_times = [*range(10,60,10)]
# Convert arrival_times to an array and update the times
arrival_times_np = np.array(arrival_times)
new_times = arrival_times_np - 3
# Use list comprehension and enumerate to pair guests to new times
guest_arrivals = [(names[arrival_times_np],new_times) for arrival_times_np,new_times in enumerate(new_times)]
print(guest_arrivals)
4
# Create a list of arrival times
arrival_times = [*range(10,60,10)]
# Convert arrival_times to an array and update the times
arrival_times_np = np.array(arrival_times)
new_times = arrival_times_np - 3
# Use list comprehension and enumerate to pair guests to new times
guest_arrivals = [(names[i],time) for i,time in enumerate(new_times)]
# Map the welcome_guest function to each (guest,time) pair
welcome_map = map(welcome_guest, guest_arrivals)
guest_welcomes = [*welcome_map]
print(*guest_welcomes, sep='\n') |
988,535 | f901360ead9e788b354a3d94eedff90438d76f5f | import scrapy
import re
from scrapySchool_England.clearSpace import clear_space, clear_lianxu_space
from scrapySchool_England.items import ScrapyschoolEnglandItem1
from scrapySchool_England.getItem import get_item1
from scrapySchool_England.remove_tags import remove_class
from scrapySchool_England.getStartDate import getStartDate
class KingsCollegeLondon_PSpider(scrapy.Spider):
name = "KingsCollegeLondon_P"
start_urls = ["https://www.kcl.ac.uk/study/subject-areas/index.aspx"]
def parse(self, response):
# 获得研究领域链接
subject_area_links = response.xpath("//html//tr/td/p[1]/a/@href").extract()
# print(len(subject_area_links))
subject_area_links = list(set(subject_area_links))
# print(len(subject_area_links))
for sub in subject_area_links:
url = "https://www.kcl.ac.uk" + sub
# print(url, "==========================")
yield scrapy.Request(url, callback=self.parse_url)
def parse_url(self, response):
# 筛选研究生的链接
links = response.xpath("//div[@id='main']/div[@class='contentpage-main-content']/div[@class='wrapper']/table/tbody/tr/td//a/@href").extract()
# print(links)
alllinks = []
for link in links:
strurl = re.findall(r"/study/postgraduate/taught-courses/.*", link)
alllinks.append(''.join(strurl))
# print(response.url)
while '' in alllinks:
alllinks.remove('')
# alllinks = ["https://www.kcl.ac.uk/study/postgraduate/taught-courses/web-intelligence-msc.aspx",
# "https://www.kcl.ac.uk/study/postgraduate/taught-courses/urban-informatics-msc.aspx",
# "https://www.kcl.ac.uk/study/postgraduate/taught-courses/strategic-entrepreneurship-and-innovation-msc.aspx",
# "https://www.kcl.ac.uk/study/postgraduate/taught-courses/religion-ma.aspx",
# "https://www.kcl.ac.uk/study/postgraduate/taught-courses/digital-marketing-msc.aspx", ]
# alllinks = ['https://www.kcl.ac.uk/study/postgraduate/taught-courses/global-affairs-msc.aspx']
for link in alllinks:
url = "https://www.kcl.ac.uk" + link
# url = link
yield scrapy.Request(url, callback=self.parse_data)
def parse_data(self, response):
item = get_item1(ScrapyschoolEnglandItem1)
# item['country'] = "England"
# item["website"] = "https://www.kcl.ac.uk/"
item['university'] = "King's College London"
item['url'] = response.url
# 授课方式
item['teach_type'] = 'taught'
# 学位类型
item['degree_type'] = 2
item['location'] = "Strand, London. WC2R 2LS, United Kingdom"
print("===============================")
print(response.url)
try:
# //div[@id='container']/div[@class='hero clearfix']/div[@class='wrapper']/div[@class='inner']/h1
# 专业、学位类型
programmeDegree = response.xpath("//div[@id='container']/div[@class='hero clearfix']/div[@class='wrapper']/div[@class='inner']/h1//text()").extract()
clear_space(programmeDegree)
programmeDegreeStr = ''.join(programmeDegree).strip()
print(programmeDegreeStr)
# degree_type = list(re.findall(r"(\s\w+)$|(\s\w+\s\(.*\))$|(\s\w+/\w+)$|(\s\w+/\w+/\w+)$", programmeDegreeStr)[0])
degree_type = re.findall(r"(\w+,[\s\w]+,[\s\w]+)$|(\w+/[\w\s]+/[\w\s]+)$|(\s\w+,[\s\w]+)$|(\s\w+/\w+/[\s\w]+)$|(\s\w+/[\w\s]+)$|(PG\sDip)$|(PG\sCert)$|(\s\w+\s\(.*\))$|(\s\w+)$", programmeDegreeStr)
if len(degree_type) > 0:
degree_type = list(degree_type[0])
print("degree_type = ", degree_type)
item['degree_name'] = ''.join(degree_type).strip()
# while '' in degree_type:
# degree_type.remove('')
# # print("degree_type = ", degree_type)
# item['degree_name'] = ''.join(degree_type).strip()
programme = programmeDegreeStr.replace(item['degree_name'], '').strip()
item['programme_en'] = programme
else:
item['programme_en'] = programmeDegreeStr
if item['degree_name'] == "":
print("degree_name 为空")
print("item['degree_name'] = ", item['degree_name'])
print("item['programme_en'] = ", item['programme_en'])
# //div[@id='tabs-key-info']/div[@class='tab tab-1 active-tab']/p[2]/span
duration = response.xpath("//div[@id='tabs-key-info']/div[@class='tab tab-1']/p[2]/span//text()").extract()
durationStr = ''.join(duration)
# print(durationStr)
# duration_re = re.findall(r"([a-zA-Z0-9]+\s)(year|month|week){1}", durationStr, re.I)
duration_re = re.findall(r"([a-zA-Z0-9\.]+\s)(year|month|week|yr|yft){1}|([0-9\.]+)(yr|yft|\-month){1}",
durationStr, re.I)
# print(duration_re)
d_dict = {"One": "1",
"Two": "2",
"Three": "3",
"Four": "4",
"Five": "5",
"Six": "6",
"Seven": "7",
"Eight": "8",
"Nine": "9",
"Ten": "10",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
if len(duration_re) > 0:
d_int = re.findall(r"\d+", ''.join(duration_re[0]))
if len(d_int) > 0:
item['duration'] = int(''.join(d_int))
else:
d = re.findall(
r"(One)|(Two)|(Three)|(Four)|(Five)|(Six)|(Seven)|(Eight)|(Nine)|(Ten)|(one)|(two)|(three)|(four)|(five)|(six)|(seven)|(eight)|(nine)|(ten)",
', '.join(duration_re[0]))
# print("d = ", d)
item['duration'] = int(d_dict.get(''.join(d[0]).strip()))
if "y" in ''.join(duration_re[0]) or "Y" in ''.join(duration_re[0]):
item['duration_per'] = 1
elif "m" in ''.join(duration_re[0]) or "M" in ''.join(duration_re[0]):
item['duration_per'] = 3
elif "w" in ''.join(duration_re[0]) or "W" in ''.join(duration_re[0]):
item['duration_per'] = 4
# print("item['duration'] = ", item['duration'])
# print("item['duration_per'] = ", item['duration_per'])
# //div[@id='tabs-key-info']/div[@class='tab tab-1 active-tab']/p[3]/span
teach_time = response.xpath("//div[@id='tabs-key-info']/div[@class='tab tab-1']/p[3]/span//text()").extract()
# print(teach_time)
if "Full" in ''.join(teach_time):
item['teach_time'] = 'fulltime'
# print("item['teach_time'] = ", item['teach_time'])
# //div[@id='tabs-key-info']/div[@class='tab tab-2']
includeDepartment = response.xpath("//div[@class='tab tab-2']//p[contains(text(), 'Faculty')]/span//text()").extract()
if len(includeDepartment) == 0:
includeDepartment = response.xpath(
"//div[@class='tab tab-2']//p[contains(text(), 'Department')]/span//text()").extract()
clear_space(includeDepartment)
# department = ""
# if "Faculty" in includeDepartment:
# facultyIndex = includeDepartment.index("Faculty")
# department += includeDepartment[facultyIndex+2] + ", "
# if "Department" in includeDepartment:
# departmentIndex = includeDepartment.index("Department")
# department += includeDepartment[departmentIndex+1]
item['department'] = ''.join(includeDepartment).strip()
# if item['department'] == "":
# print("department 为空")
# else:
# print("item['department'] = ", item['department'])
# //div[@id='coursepage-overview']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate']
overview = response.xpath("//div[@id='coursepage-overview']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate']").extract()
item['overview_en'] = remove_class(clear_lianxu_space(overview))
# print("item['overview_en'] = ", item['overview_en'])
# //div[@id='coursepage-course-detail']/div[@class='wrapper clearfix']/div
# modules = response.xpath("//h3[contains(text(),'Course format and assessment')]/preceding-sibling::*").extract()
modules = response.xpath(
"//div[@id='coursepage-course-detail']/div[@class='wrapper clearfix']/div[@class='inner right lop-to-measure']").extract()
item['modules_en'] = remove_class(clear_lianxu_space(modules))
# if item['modules_en'] == "":
# print("modules_en 为空")
# else:
# print("item['modules_en'] = ", item['modules_en'])
assessment_en = response.xpath(
"//b[contains(text(),'Teaching')]/preceding-sibling::*[1]/following-sibling::*[position()<5]|"
"//h3[contains(text(),'Teaching')]/preceding-sibling::*[1]/following-sibling::*|"
"//b[contains(text(),'Teaching')]/../preceding-sibling::*[1]/following-sibling::*[position()<12]|"
"//h3[contains(text(),'Course format and assessment')]/following-sibling::*").extract()
item['assessment_en'] = remove_class(clear_lianxu_space(assessment_en))
# if item['assessment_en'] == "":
# print("assessment为空")
# else:
# print("item['assessment_en'] = ", item['assessment_en'])
# //div[@id='coursepage-fees-and-funding']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate lopped-off']/ul[1]/li[2]
tuition_fee = response.xpath("//div[@id='coursepage-fees-and-funding']/div[@class='wrapper clearfix']/div/ul[1]/li[2]//text()").extract()
# print("tuition_fee = ", ''.join(tuition_fee))
tuition_fee = response.xpath("//li[contains(text(),'Full time overseas fees:')]//text()").extract()
tuition_fee_re = re.findall(r"£\d+,\d+|£\d+|\d+,\d+", ''.join(tuition_fee))
# print(tuition_fee_re)
if len(tuition_fee_re) >= 1:
item['tuition_fee_pre'] = "£"
item['tuition_fee'] = int(tuition_fee_re[0].replace("£", "").replace(",", "").strip())
# print("item['tuition_fee_pre'] = ", item['tuition_fee_pre'])
# print("item['tuition_fee'] = ", item['tuition_fee'])
# //div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate lopped-off expanded']
entry_requirements = response.xpath("//div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[1]//text()").extract()
item['rntry_requirements'] =clear_lianxu_space(entry_requirements)
# print("item['rntry_requirements'] = ", item['rntry_requirements'])
item['require_chinese_en'] = """<p><b>Postgraduate taught courses</b></p>
<p>A four year Bachelor's degree from a recognised university will be considered for programmes requiring a UK Bachelor (Honours) degree at 2:1. </p>
<p>Grade requirements for each course will vary. However, as an approximate guideline our requirements are in the region of:</p>
<table>
<tbody>
<tr><th><b>UK requirement </b></th><th><b>Chinese Universities considered Prestigious (Project 211)</b></th><th><b>Other recognised Chinese universities</b></th></tr>
<tr>
<td>
<p>First Class Bachelor (Honours) degree</p>
</td>
<td>
<p>Average of 88%</p>
</td>
<td>
<p>Average of 90%</p>
</td>
</tr>
<tr>
<td>
<p>High 2:1 Class Bachelor (Honours) degree</p>
</td>
<td>
<p>Average of 85%</p>
</td>
<td>
<p>Average of 88%</p>
</td>
</tr>
<tr>
<td>
<p> 2:1 Class Bachelor (Honours) degree</p>
</td>
<td>
<p>Average of 80%</p>
</td>
<td>
<p>Average of 85%</p>
</td>
</tr>
<tr>
<td>
<p>High 2:2 Class Bachelor (Honours) degree</p>
</td>
<td>
<p>Average of 77% </p>
</td>
<td>
<p>Average of 80%</p>
</td>
</tr>
</tbody>
</table>
<p> If your degree is graded as a Grade Point Average, note that we will normally be looking for a minimum cumulative GPA of 3.3-3.5 on a 4.0 scale.</p>
<p><b>Important note: </b></p>
<ul>
<li>
<p>You will be a stronger candidate for admission if you have high grades and are attending a university considered prestigious (considered to be those considered prestigious by UK NARIC, or within the Project 211 list of institutions).</p>
</li>
<li>
<p>For our most competitive courses at postgraduate level (i.e. those within the King's Business School, the Dickson Poon School of Law, the department of Digital Humanities), please note that offers will usually only be made to applicants from universities considered prestigious.</p>
</li>
<li>
<p>For King's Business School offers will usually be made to applicants from universities considered prestigious and for non-prestigious universities, we generally only consider applicants with 90% or above. </p>
</li>
</ul>"""
# //div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate lopped-off expanded']
IELTS = response.xpath("//th[contains(text(), 'English Language requirements')]/following-sibling::td[1]//text()").extract()
clear_space(IELTS)
# print(IELTS)
item['ielts_desc'] = ''.join(IELTS).strip()
item['toefl_desc'] = item['ielts_desc']
# print("item['ielts_desc'] = ", item['ielts_desc'])
if item['ielts_desc'] == "Band A":
item["ielts"] = 7.5 # float
item["ielts_l"] = 7.0 # float
item["ielts_s"] = 7.0 # float
item["ielts_r"] = 7.0 # float
item["ielts_w"] = 7.0
item["toefl"] = 100 # float
item["toefl_l"] = 25 # float
item["toefl_s"] = 25 # float
item["toefl_r"] = 25 # float
item["toefl_w"] = 27
elif item['ielts_desc'] == "Band B":
item["ielts"] = 7.0 # float
item["ielts_l"] = 6.5 # float
item["ielts_s"] = 6.5 # float
item["ielts_r"] = 6.5 # float
item["ielts_w"] = 6.5
item["toefl"] = 100 # float
item["toefl_l"] = 23 # float
item["toefl_s"] = 23 # float
item["toefl_r"] = 23 # float
item["toefl_w"] = 25
elif item['ielts_desc'] == "Band C":
item["ielts"] = 7.0 # float
item["ielts_l"] = 6.0 # float
item["ielts_s"] = 6.0 # float
item["ielts_r"] = 6.5 # float
item["ielts_w"] = 6.5
item["toefl"] = 100 # float
item["toefl_l"] = 20 # float
item["toefl_s"] = 20 # float
item["toefl_r"] = 23 # float
item["toefl_w"] = 25
elif item['ielts_desc'] == "Band D":
item["ielts"] = 6.5 # float
item["ielts_l"] = 6.0 # float
item["ielts_s"] = 6.0 # float
item["ielts_r"] = 6.0 # float
item["ielts_w"] = 6.0
item["toefl"] = 92 # float
item["toefl_l"] = 20 # float
item["toefl_s"] = 20 # float
item["toefl_r"] = 20 # float
item["toefl_w"] = 23
elif item['ielts_desc'] == "Band E":
item["ielts"] = 6.0 # float
item["ielts_l"] = 5.5 # float
item["ielts_s"] = 5.5 # float
item["ielts_r"] = 5.5 # float
item["ielts_w"] = 5.5
item["toefl"] = 80 # float
item["toefl_l"] = 20 # float
item["toefl_s"] = 20 # float
item["toefl_r"] = 20 # float
item["toefl_w"] = 20
# //div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate lopped-off expanded']/div[@class='requirements uk clearfix']/div[@class='copy'][2]/p[1]
application_fee = response.xpath("//h3[contains(text(), 'Application procedure')]/following-sibling::div[1]//text()").extract()
clear_space(application_fee)
# print(''.join(application_fee))
application_fee_re = re.findall(r"application\sfee.*£\d+", ''.join(application_fee))
# print("apply_fee: ", ''.join(application_fee_re))
af = ''.join(application_fee_re).replace("application fee of", "").replace("£", "").strip()
if len(af) != 0:
item['apply_fee'] = int(af)
item['apply_pre'] = "£"
# print("item['apply_fee'] = ", item['apply_fee'])
# //div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate lopped-off expanded']/div[@class='requirements uk clearfix']/div[@class='copy'][2]/p[1]
application_documents = response.xpath("//h3[contains(text(), 'Personal statement and supporting information')]/following-sibling::div[1]").extract()
item['apply_documents_en'] = remove_class(clear_lianxu_space(application_documents))
# print("item['apply_documents_en'] = ", item['apply_documents_en'])
# //div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate lopped-off expanded']/div[@class='requirements uk clearfix']/div[@class='copy'][2]/p[1]
deadline = response.xpath("//div[@id='coursepage-entry-requirements']/div[@class='wrapper clearfix']/div[1]/div[@class='requirements uk clearfix']/div[@class='copy'][4]//text()").extract()
clear_space(deadline)
print(deadline)
deadline_str = ''.join(deadline).strip()
item['deadline'] = getStartDate(deadline_str)
# if "-000000" in item['deadline']:
item['deadline'].replace("-000000", "").strip()
print(len(item['deadline']))
if len(item['deadline']) > 10:
item['deadline'] = item['deadline'][:7]
print("item['deadline'] = ", item['deadline'])
# //div[@id='coursepage-career-prospect']/div[@class='wrapper clearfix']/div[@class='inner left lop-to-truncate']
career = response.xpath("//div[@id='coursepage-career-prospect']").extract()
item['career_en'] = remove_class(clear_lianxu_space(career))
# print("item['career_en'] = ", item['career_en'])
apply_proces_en = response.xpath(
"//h3[contains(text(),'Application procedure')]/preceding-sibling::*[1]/following-sibling::*[position()<3]").extract()
item['apply_proces_en'] = remove_class(clear_lianxu_space(apply_proces_en))
# print("item['apply_proces_en'] = ", item['apply_proces_en'])
yield item
except Exception as e:
with open("scrapySchool_England/error/" + item['university'] + str(item['degree_type']) + ".txt", 'a+', encoding="utf-8") as f:
f.write(str(e) + "\n" + response.url + "\n========================\n")
print("异常:", str(e))
print("报错url:", response.url)
|
988,536 | baf90b3b7a424965c8e34e80de601f3a340205d2 | for x in [0, 1, 2, 123]:
print("Hello World!!")
for x in [0,1, 2,3, 597]:
print(x, end="")
for x in [3, 4, 5, 6]:
print("x 값은 =", x)
for x in [25, 11, 23, 3]:
print(x, end=" ") |
988,537 | 9b9b0f88b2c12b020e0bdc9d46e6e2624b2f7624 | from typing import Union, List
from sqlalchemy.ext.asyncio import AsyncConnection
from box import Box
from db import mysql_client
from utils import current_time, obj_id
from core.exceptions import NotFoundError
async def create_votes(session: AsyncConnection, data: List, poll: Box) -> List[Box]:
payload = [Box(id=obj_id(),
creator_id=poll.creator_id,
last_user_update=poll.creator_id,
poll_id=poll.id,
created_at=current_time(),
updated_at=current_time(),
count=0,
**vote.__dict__) for vote in data]
await mysql_client.create_votes(session=session, payload=payload)
return payload
async def get_vote(session: AsyncConnection, poll_id: str, vote_id: str) -> Box:
payload = Box(poll_id=poll_id, vote_id=vote_id)
result = await mysql_client.get_vote(session=session, payload=payload)
row, columns_name = result.fetchone(), tuple(result.keys())
if not row:
raise NotFoundError
return Box(zip(columns_name, row))
async def update_vote(session: AsyncConnection, vote: Box, data: Box, user_id: str) -> Box:
payload = Box(title=data.title or vote.title,
image=data.image or vote.image,
id=vote.id,
last_user_update=user_id,
updated_at=current_time())
await mysql_client.update_vote(session=session, payload=payload)
return vote | payload
async def delete_vote(session: AsyncConnection, vote_id: str) -> Box:
payload = Box(id=vote_id)
await mysql_client.delete_vote(session=session, payload=payload)
return payload
|
988,538 | f9d3bdc0f737b2b1b6ce4945304407bfcbf3dfd2 | #Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/iRig_Keys_IO/mixer.py
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import forward_property, liveobj_valid
from ableton.v2.control_surface.components import MixerComponent as MixerComponentBase
from ableton.v2.control_surface.control import ButtonControl
from .scroll import ScrollComponent
class MixerComponent(MixerComponentBase):
track_scroll_encoder = forward_property(u'_track_scrolling')(u'scroll_encoder')
selected_track_arm_button = ButtonControl()
def __init__(self, *a, **k):
super(MixerComponent, self).__init__(*a, **k)
self._track_scrolling = ScrollComponent(parent=self)
self._track_scrolling.can_scroll_up = self._can_select_prev_track
self._track_scrolling.can_scroll_down = self._can_select_next_track
self._track_scrolling.scroll_up = self._select_prev_track
self._track_scrolling.scroll_down = self._select_next_track
@selected_track_arm_button.pressed
def selected_track_arm_button(self, _):
selected_track = self.song.view.selected_track
if liveobj_valid(selected_track) and selected_track.can_be_armed:
new_value = not selected_track.arm
for track in self.song.tracks:
if track.can_be_armed:
if track == selected_track or track.is_part_of_selection and selected_track.is_part_of_selection:
track.arm = new_value
elif self.song.exclusive_arm and track.arm:
track.arm = False
def _can_select_prev_track(self):
return self.song.view.selected_track != self._provider.tracks_to_use()[0]
def _can_select_next_track(self):
return self.song.view.selected_track != self._provider.tracks_to_use()[-1]
def _select_prev_track(self):
selected_track = self.song.view.selected_track
tracks = self._provider.tracks_to_use()
assert selected_track in tracks
index = list(tracks).index(selected_track)
self.song.view.selected_track = tracks[index - 1]
def _select_next_track(self):
selected_track = self.song.view.selected_track
tracks = self._provider.tracks_to_use()
assert selected_track in tracks
index = list(tracks).index(selected_track)
self.song.view.selected_track = tracks[index + 1]
|
988,539 | f735a8612b0a21e97af4257a7f96a8c018a67049 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import enum
from typing import Iterator, IO
UNARY_OPS = ["neg","not"]
BINARY_OPS = ["add","sub","and","or"]
CMP_OPS = ["gt","lt","eq"]
ARITHMETIC_OPS = UNARY_OPS + BINARY_OPS + CMP_OPS
class CmdType(enum.Enum):
ARITH = enum.auto()
PUSH = enum.auto()
POP = enum.auto()
LABEL = enum.auto()
GOTO = enum.auto()
IF = enum.auto()
FUNCTION = enum.auto()
RET = enum.auto()
CALL = enum.auto()
# expected # of args, including command itself
# default is 2 for all commands not listed here
EXPECTED_ARGS = {
CmdType.ARITH: 1,
CmdType.PUSH: 3,
CmdType.POP: 3,
CmdType.FUNCTION: 3,
CmdType.CALL: 3,
CmdType.RET: 1,
CmdType.IF: 2,
CmdType.LABEL: 2,
CmdType.GOTO: 2
}
class Command:
def __init__(self, cmd_str: str) -> None:
self.args = cmd_str.split()
cmd = self.args[0]
if cmd in ARITHMETIC_OPS:
self.cmd_type = CmdType.ARITH
elif cmd == "push":
self.cmd_type = CmdType.PUSH
elif cmd == "pop":
self.cmd_type = CmdType.POP
elif cmd == "label":
self.cmd_type = CmdType.LABEL
elif cmd == "goto":
self.cmd_type = CmdType.GOTO
elif cmd == "if-goto":
self.cmd_type = CmdType.IF
elif cmd == "function":
self.cmd_type = CmdType.FUNCTION
elif cmd == "call":
self.cmd_type = CmdType.CALL
elif cmd == "return":
self.cmd_type = CmdType.RET
else:
raise ValueError("Unrecognized command {}".format(cmd))
#make sure we have the right number of arguments
if len(self.args) != EXPECTED_ARGS[self.cmd_type]:
raise ValueError("Expected {} arguments in command {}, but got {}".format(EXPECTED_ARGS[self.cmd_type], cmd_str, len(self.args)))
@property
def arg1(self) -> str:
if self.cmd_type == CmdType.ARITH:
return self.args[0]
elif self.cmd_type == CmdType.RET:
raise RuntimeError("RET command does not have arguments")
else:
return self.args[1]
@property
def arg2(self) -> str:
if (self.cmd_type == CmdType.PUSH or
self.cmd_type == CmdType.POP or
self.cmd_type == CmdType.FUNCTION or
self.cmd_type == CmdType.CALL):
return self.args[2]
else:
raise RuntimeError("{} command does not have two arguments".format(self.cmd_type))
class Parser:
def __init__(self, file: IO[str]) -> None:
self.file: Iterator[str] = file
def advance(self) -> None:
cmd_str = next(self.file).strip()
#skip comments and empty lines
while (not cmd_str) or cmd_str.startswith("//"):
cmd_str = next(self.file).strip()
#remove end of line comment if there is one
try:
cmd_str, _ = cmd_str.split("//")
cmd_str = cmd_str.strip()
except ValueError:
#no comment to remove
pass
self.cmd: Command = Command(cmd_str) |
988,540 | bee4c4f5177b34f8368bd3e959610492e350f66d | import math
import random
import numpy as np
random.seed(1)
def Failure():
global Clock # simulation clock
global NextFailure # time of next failure event
global NextRepair # time of next repair event
global S # system state
global Slast # previous value of the system state
global Tlast # time of previous state change
global Area # area under S(t) curve
# Failure event
# Update state and schedule future events
S = S - 1;
if (S == 1):
NextFailure = Clock + math.ceil(6 * np.random.uniform(0, 1))
z = np.random.uniform(0,1)
if(z <= 0.65):
NextRepair = Clock + 2.5
else:
NextRepair = Clock + 1.5
# Update area under the S(t) curve
Area = Area + Slast * (Clock - Tlast)
Tlast = Clock
Slast = S
def Repair():
global Clock # simulation clock
global NextFailure # time of next failure event
global NextRepair # time of next repair event
global S # system state
global Slast # previous value of the system state
global Tlast # time of previous state change
global Area # area under S(t) curve
# Repair event
# Update state and schedule future events
S = S + 1
if (S == 1):
z = np.random.uniform(0,1)
if(z <= 0.65):
NextRepair = Clock + 2.5
else:
NextRepair = Clock + 1.5
NextFailure = Clock + math.ceil(6 * np.random.uniform(0, 1))
Area = Area + Slast * (Clock - Tlast)
Tlast = Clock
Slast = S
def Timer():
Infinity = 1000000;
global Clock # simulation clock
global NextFailure # time of next failure event
global NextRepair # time of next repair event
# Determine the next event and advance time
if NextFailure < NextRepair:
y = 'Failure'
Clock = NextFailure
NextFailure = Infinity
else:
y = 'Repair'
Clock = NextRepair
NextRepair = Infinity
return y
Infinity = 1000000
# Define and initialize replication variables
SumS = 0
SumY = 0
for Rep in range(100):
# Initialize the state and statistical variables
S = 2
Slast = 2
Clock = 0
Tlast = 0
Area = 0
# Schedule the initial failure event
NextFailure = math.ceil(6 * np.random.uniform(0,1))
NextRepair = Infinity;
# Advance time and execute events until the system fails
while (S != 0):
NextEvent = Timer()
if NextEvent == 'Failure':
Failure()
else:
Repair()
# Accumulate replication statistics
SumS = SumS + Area / Clock;
SumY = SumY + Clock;
# Display output
print('Average failure at time: ', SumY / 100 );
|
988,541 | 8df3cd40b1f3735819380deaf146ed005d895b1c | #!usr/bin/env python
# -*- coding:utf-8 -*-
'''Mask R-CNN coco inference
Using the trained model to detect and segment objects.'''
import os
import sys
import random
import colorsys
import skimage.io
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.patches import Polygon
import time
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
import mrcnn.model as modellib
from mrcnn.config import Config
from mrcnn import visualize
# Import COCO config
# sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# save or not
SAVE = True
if __name__ == '__main__':
# set the config and show
config = InferenceConfig()
config.display()
# Local path to trained weights file
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(MODEL_DIR, 'mask_rcnn_coco.h5')
# Directory of images to run detection on
IMAGE_DIR = '/home/ason/datasets/images'
'''Create Model and Load Trained Weights'''
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on cityscapes
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
'''Run Object Detection'''
# get the path of images folder
file_names = next(os.walk(IMAGE_DIR))[2]
id = 0 # image id
# # Run detection
all_start = time.time()
for f in file_names:
# Load images from the images folder
input_path = os.path.abspath(os.path.join(IMAGE_DIR, f))
image = skimage.io.imread(input_path)
print("input_path:", input_path)
detect = model.detect
results = detect([image], verbose=1)
r = results[0]
# Save and show results
if config.SAVE == True:
# output file path
output_path = input_path.replace("images", "images_results")
print("output_path:", output_path)
print('\n')
visualize.save_instances(output_path = output_path, image=image,
boxes=r['rois'], masks=r['masks'], class_ids=r['class_ids'],
class_names=class_names, scores=r['scores'], title = "")
# Show results only
else:
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
id += 1
all_end = time.time()
average_time = (all_end - all_start) / id
print("inference runtime per image : {:.3f} s".format(average_time))
|
988,542 | 8f52a60ec1b8f8849364753488ebfcd9dc3baca7 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2019/8/13 14:30
# @Author: Tu Xinglong
# @File : DataSource.py
# 内置模块
import os
import sys
import warnings
from datetime import datetime
warnings.filterwarnings("ignore")
import tushare as ts
import pandas as pd
CurrentPath = os.path.abspath(os.path.dirname(__file__)) # 设置绝对路径
Pre_path = os.path.abspath(os.path.dirname(CurrentPath))
sys.path += [Pre_path, Pre_path + '\\Engine']
from IndustryConst import (ZXFirstIndus, ZXSecondIndus, ZXThirdIndus)
"""中信一级行业成分股更新"""
def ZXClass(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\ZXClass\\"
ZX_FirstIndusCode = ['0' + str(x) for x in range(15001, 15030, 1)]
for induscode in ZX_FirstIndusCode:
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
df = c.sector(induscode, dt)
date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0,
0) for i in range(int(len(df.Data) / 2))
]
stock_code = [x[0:6] for x in df.Data[::2]]
stock_name = df.Data[1::2]
zx_class = pd.DataFrame([date_time, stock_code, stock_name],
index=['EndDate', 'SecuCode',
'SecuAbbr']).T
zx_class.index = zx_class['EndDate'].tolist()
if not os.path.isdir(data_path + induscode):
os.mkdir(data_path + induscode)
zx_class.to_hdf(data_path + induscode + '\\' + date + '.h5',
date)
else:
zx_class.to_hdf(data_path + induscode + '\\' + date + '.h5',
date)
print("中信一级行业更新完毕!")
"""中信一、二、三级行业成分股更新"""
def ZXClass2(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\ZXClass2\\"
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
indus_data = []
for induscode in ZXThirdIndus:
zx_class = {}
df = c.sector(induscode, dt)
if df.ErrorCode != 10000009:
zx_class["SecuCode"] = [x[0:6] for x in df.Data[::2]]
zx_class["SecuAbbr"] = df.Data[1::2]
zx_class["FirstIndusCode"] = [induscode[0:6]] * (int(
len(df.Data) / 2))
zx_class["FirstIndusName"] = [ZXFirstIndus[induscode[0:6]]
] * (int(len(df.Data) / 2))
zx_class["SecondIndusCode"] = [induscode[0:9]] * (int(
len(df.Data) / 2))
zx_class["SecondIndusName"] = [ZXSecondIndus[induscode[0:9]]
] * (int(len(df.Data) / 2))
zx_class["ThirdIndusCode"] = [induscode] * (int(
len(df.Data) / 2))
zx_class["ThirdIndusName"] = [ZXThirdIndus[induscode]] * (int(
len(df.Data) / 2))
zx_class = pd.DataFrame(zx_class,
columns=[
'SecuCode', 'SecuAbbr',
'FirstIndusCode', 'FirstIndusName',
'SecondIndusCode',
'SecondIndusName',
'ThirdIndusCode', 'ThirdIndusName'
])
else:
continue
indus_data.append(zx_class)
indus_data = pd.concat(indus_data).sort_values(by="SecuCode")
indus_data.index = date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0)
] * indus_data.shape[0]
indus_data.to_hdf(data_path + date + '.h5', date)
print("中信行业信息更新完毕!")
"""全部A成分股更新"""
def Stock_A(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\Stock_A_info\\"
# 001004 全部A股
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
df = c.sector("001004", dt)
date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0)
for i in range(int(len(df.Data) / 2))
]
stock_code = [x[0:6] for x in df.Data[::2]]
stock_name = df.Data[1::2]
stock_a = pd.DataFrame([date_time, stock_code, stock_name],
index=['EndDate', 'SecuCode', 'SecuAbbr']).T
stock_a.index = stock_a['EndDate'].tolist()
stock_a.to_hdf(data_path + date + '.h5', date)
print("全A成分股更新完毕!")
"""A股风险警示股票更新"""
def Stock_ST(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\Stock_ST\\"
# 001023 全部风险警示股票
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
df = c.sector("001023", dt)
date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0)
for i in range(int(len(df.Data) / 2))
]
stock_code = [x[0:6] for x in df.Data[::2]]
stock_name = df.Data[1::2]
stock_st = pd.DataFrame([date_time, stock_code, stock_name],
index=['EndDate', 'SecuCode', 'SecuAbbr']).T
stock_st.index = stock_st['EndDate'].tolist()
stock_st = stock_st[~stock_st.SecuAbbr.str.contains('B')]
stock_st.to_hdf(data_path + date + '.h5', date)
print("风险警示股票更新完毕!")
"""A股每日涨跌停股票统计"""
def Stock_Limit(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\Stock_Limit\\"
# 001023 全部风险警示股票
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
df = c.css(
c.sector("001004", dt).Data[::2],
"ISSURGEDLIMIT,ISDECLINELIMIT,NAME", "TradeDate=" + dt)
data = pd.DataFrame(df.Data,
index=["LimitUp", "LimitDown", "SecuAbbr"]).T
data = data[data.LimitUp.str.contains("是")
| data.LimitDown.str.contains("是")]
data["SecuCode"] = [x[0:6] for x in data.index.tolist()]
date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0)
for i in range(data.shape[0])
]
data["EndDate"] = date_time
data = data.reindex(columns=[
'EndDate', 'SecuCode', 'SecuAbbr', "LimitUp", "LimitDown"
])
data.index = data['EndDate'].tolist()
data.to_hdf(data_path + date + '.h5', date)
print("每日涨跌停股票更新完毕!")
"""A股每日收益率更新"""
def Stock_Ret(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\Stock_Ret\\"
# 001023 全部风险警示股票
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
df = c.css(
c.sector("001004", dt).Data[::2], "DIFFERRANGE", "TradeDate=" + dt)
data = pd.DataFrame(df.Data, index=["Ret"]).T
data["SecuCode"] = [x[0:6] for x in data.index.tolist()]
date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0)
for i in range(data.shape[0])
]
data["EndDate"] = date_time
data = data.reindex(columns=['EndDate', 'SecuCode', 'Ret'])
data.index = data['EndDate'].tolist()
data.to_hdf(data_path + date + '.h5', date)
print("全市场股票收益率更新完毕!")
"""每日停复牌信息统计"""
def Stock_Suspend(trading_day):
# 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\Stock_Suspend\\"
for date in trading_day:
dt = date[0:4] + "-" + date[4:6] + "-" + date[6:8]
# 截面行情接口
df = c.css(
c.sector("001004", dt).Data[::2], "TRADESTATUS", "TradeDate=" + dt)
# 条件选股接口
# df = c.cps("B_001004","TRADESTATUS,TRADESTATUS," + dt ,"CONTAINALL([TRADESTATUS],停牌) ",
# "orderby=ra([TRADESTATUS]),top=max([TRADESTATUS],10000),sectordate="+dt)
data = pd.DataFrame(df.Data, index=["Status"]).T
data["SecuCode"] = [x[0:6] for x in data.index.tolist()]
date_time = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0, 0)
for i in range(data.shape[0])
]
data["EndDate"] = date_time
data.index = data['EndDate'].tolist()
data = data[data.Status.str.contains('连续停牌')
| data.Status.str.contains('停牌一天')]
data = data.reindex(columns=['EndDate', 'SecuCode'])
data.to_hdf(data_path + date + '.h5', date)
print("每日停复牌股票更新完毕!")
"""中信行业哑变量因子更新"""
def Industry_ZX(trading_day):
# # 设置文件路径
data_path = "E:\\HistoryData\\MarketInfo\\Daily\\"
ZX_FirstIndusCode = ['0' + str(x) for x in range(15001, 15030, 1)]
for induscode in ZX_FirstIndusCode:
for date in trading_day:
stock_a = [
x for x in pd.read_hdf(
data_path + "Stock_A_info\\" + date +
".h5", date)["SecuCode"].tolist() if x[0:3] != "688"
]
zx_data = pd.read_hdf(
data_path + "ZXClass\\" + induscode + "\\" + date + ".h5",
date)["SecuCode"].tolist()
indux = [1 if x in zx_data else 0 for x in stock_a]
zxindus = pd.DataFrame(indux, index=stock_a).T
zxindus.index = [
datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), 0, 0,
0)
]
if not os.path.isdir(data_path + "ZXIndus\\" + induscode):
os.mkdir(data_path + "ZXIndus\\" +induscode)
zxindus.to_hdf(data_path + "ZXIndus\\" + induscode + "\\" + date + ".h5",
date)
else:
zxindus.to_hdf(data_path + "ZXIndus\\" + induscode + "\\" + date + ".h5",
date)
print("中信行业哑变量更新完毕!")
if __name__ == "__main__":
"""
Choice数据源
"""
from EmQuantAPI import * # noqa
c.start() # noqa
"""设置下载日期"""
pro = ts.pro_api()
TradeDate = pd.read_csv(Pre_path + "\\TradeDate.csv")
start_date = str(TradeDate.iloc[0, 0])
end_date = str(TradeDate.iloc[-1, 0])
trading_day = [
x for x in pro.trade_cal(exchange='SSE',
start_date=start_date,
end_date=end_date,
is_open='1')['cal_date'].tolist()
]
"""行业更新"""
ZXClass(trading_day)
ZXClass2(trading_day)
"""股票更新"""
Stock_A(trading_day)
Stock_Limit(trading_day)
Stock_ST(trading_day)
Stock_Suspend(trading_day)
Stock_Ret(trading_day)
"""哑变量更新"""
Industry_ZX(trading_day)
|
988,543 | 75cd55bdc1128437a8f30d35841a6259916b911c | import json
import os
class ERC20:
abi = None
def __init__(self, web3) -> None:
self.web3 = web3
with open(
"{dir}/{file}".format(dir=os.path.dirname(__file__), file="abi.json")
) as f:
self.abi = json.load(f)
|
988,544 | 38323e8d7a16f70b8ff15b0c59776b251041dbd6 | import json
import core.neural_network.builder.building_strategy as BuildingStrategy
from core.neural_network.drawer.main import draw_net
class IBuilder:
def build_net(self, building_data):
raise NotImplementedError()
class Builder(IBuilder):
def __init__(self, building_strategy=BuildingStrategy.CoefsToWeightsBuilding()):
self.building_strategy = building_strategy
self.building_data = {}
def build_net(self, building_data):
return self.building_strategy.build_net(building_data)
def parse_json(self, path='SimpleTest.json' ):
with open(path, 'r', encoding='utf-8') as file:
read_data = file.read()
self.building_data = json.loads(read_data)
return self.building_data
builder = Builder()
net = builder.build_net(builder.parse_json())
net.print_net()
draw_net(net) |
988,545 | 7eaa56638de830329ecf41e1fef90d8a15dfb763 | import time
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, transforms, utils
from tensorboardX import SummaryWriter
from util_ import *
from entropy_model import *
from PIL import Image
from tqdm import tqdm
from data import *
parser = argparse.ArgumentParser()
# data I/O
parser.add_argument('-i', '--data_dir', type=str,
default='data', help='Location for the dataset')
parser.add_argument('-o', '--save_dir', type=str, default='models',
help='Location for parameter checkpoints and samples')
parser.add_argument('-d', '--dataset', type=str,
default='cifar', help='Can be either cifar|mnist')
parser.add_argument('-p', '--print_every', type=int, default=50,
help='how many iterations between print statements')
parser.add_argument('-t', '--save_interval', type=int, default=10,
help='Every how many epochs to write checkpoint/samples?')
parser.add_argument('-r', '--load_params', type=str, default=None,
help='Restore training from previous model checkpoint?')
# model
parser.add_argument('-q', '--nr_resnet', type=int, default=2,
help='Number of residual blocks per stage of the model')
parser.add_argument('-n', '--nr_filters', type=int, default=32,
help='Number of filters to use across the model. Higher = larger model.')
parser.add_argument('-m', '--nr_logistic_mix', type=int, default=10,
help='Number of logistic components in the mixture. Higher = more flexible model')
parser.add_argument('-l', '--lr', type=float,
default=0.0002, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995,
help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-b', '--batch_size', type=int, default=64,
help='Batch size during training per GPU')
parser.add_argument('-x', '--max_epochs', type=int,
default=5000, help='How many epochs to run in total?')
parser.add_argument('-s', '--seed', type=int, default=1,
help='Random seed to use')
parser.add_argument("--f1", type=int)
parser.add_argument("--f2", type=int)
parser.add_argument('--idx', type=int)
args = parser.parse_args()
# reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
sample_batch_size = 25
obs = (1, 64, 64)
input_channels = obs[0]
rescaling = lambda x: (x - .5) * 2.
rescaling_inv = lambda x: .5 * x + .5
kwargs = {'num_workers': 10, 'pin_memory': True, 'drop_last': True}
ds_transforms = transforms.Compose([transforms.ToTensor(), rescaling])
loss_op = lambda real, fake: discretized_mix_logistic_loss_1d(real, fake)
def _input(filename):
prices = {}
names = {}
for line in open(filename).readlines():
(name, src, dst, price) = line.rstrip().split()
name = int(name.replace('M', ''))
src = int(src.replace('C', ''))
dst = int(dst.replace('C', ''))
price = int(price)
t = (src, dst)
if t in prices and prices[t] <= price:
continue
prices[t] = price
names[t] = name
return prices, names
def _load(arcs, weights):
g = {}
for (src, dst) in arcs:
if src in g:
g[src][dst] = weights[(src, dst)]
else:
g[src] = {dst: weights[(src, dst)]}
return g
def reverse(graph):
r = {}
for src in graph:
for (dst, c) in graph[src].items():
if dst in r:
r[dst][src] = c
else:
r[dst] = {src: c}
return r
def getCycle(n, g, visited=None, cycle=None):
if visited is None:
visited = set()
if cycle is None:
cycle = []
visited.add(n)
cycle += [n]
if n not in g:
return cycle
for e in g[n]:
if e not in visited:
cycle = getCycle(e, g, visited, cycle)
return cycle
def mergeCycles(cycle, G, RG, g, rg):
allInEdges = []
minInternal = None
minInternalWeight = 100000
# find minimal internal edge weight
for n in cycle:
for e in RG[n]:
if e in cycle:
if minInternal is None or RG[n][e] < minInternalWeight:
minInternal = (n, e)
minInternalWeight = RG[n][e]
continue
else:
allInEdges.append((n, e))
# find the incoming edge with minimum modified cost
minExternal = None
minModifiedWeight = 0
for s, t in allInEdges:
u, v = rg[s].popitem()
rg[s][u] = v
w = RG[s][t] - (v - minInternalWeight)
if minExternal is None or minModifiedWeight > w:
minExternal = (s, t)
minModifiedWeight = w
u, w = rg[minExternal[0]].popitem()
rem = (minExternal[0], u)
rg[minExternal[0]].clear()
if minExternal[1] in rg:
rg[minExternal[1]][minExternal[0]] = w
else:
rg[minExternal[1]] = {minExternal[0]: w}
if rem[1] in g:
if rem[0] in g[rem[1]]:
del g[rem[1]][rem[0]]
if minExternal[1] in g:
g[minExternal[1]][minExternal[0]] = w
else:
g[minExternal[1]] = {minExternal[0]: w}
# --------------------------------------------------------------------------------- #
def mst(root, G):
""" The Chu-Lui/Edmond's algorithm
arguments:
root - the root of the MST
G - the graph in which the MST lies
returns: a graph representation of the MST
Graph representation is the same as the one found at:
http://code.activestate.com/recipes/119466/
Explanation is copied verbatim here:
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge. This is related to the representation in
<http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs
as dictionaries mapping vertices to lists of neighbors,
however dictionaries of edges have many advantages
over lists: they can store extra information (here,
the lengths), they support fast existence tests,
and they allow easy modification of the graph by edge
insertion and removal. Such modifications are not
needed here but are important in other graph algorithms.
Since dictionaries obey iterator protocol, a graph
represented as described here could be handed without
modification to an algorithm using Guido's representation.
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
"""
RG = reverse(G)
if root in RG:
RG[root] = {}
g = {}
for n in RG:
if len(RG[n]) == 0:
continue
minimum = 1000000
s, d = None, None
for e in RG[n]:
if RG[n][e] < minimum:
minimum = RG[n][e]
s, d = n, e
if d in g:
g[d][s] = RG[s][d]
else:
g[d] = {s: RG[s][d]}
cycles = []
visited = set()
for n in g:
if n not in visited:
cycle = getCycle(n, g, visited)
cycles.append(cycle)
rg = reverse(g)
for cycle in cycles:
if root in cycle:
continue
mergeCycles(cycle, G, RG, g, rg)
return g
def adj2dict(mat):
d = dict()
for i in range(mat.shape[0]):
for j in range(mat.shape[0]):
if i == j:
continue
if i not in d.keys():
d[i] = {j: float(mat[i,j])}
else:
d[i].update( {j: float(mat[i, j])})
return d
def dict2adj(d,num):
mat = np.zeros((num,num))
for i in d:
for j in d[i]:
mat[i,j] = 1
return mat
def loss_fn(recon_x, x):
BCE = torch.nn.functional.binary_cross_entropy(
recon_x.view(-1, 64 * 64), x.view(-1, 64 * 64), size_average=False)
return (BCE) / x.size(0)
print('starting training')
#mat = np.load('mat.npy')
writes = 0
node_num = 20
mat = np.ones((node_num,node_num)) * 100
train_loss = 0.0
for i in range(node_num):
a = np.zeros((node_num))
for j in range(node_num):
if i== j:
continue
train_dataset_pair = Moving_MNIST_Frame(i,j)
train_loader_pair = torch.utils.data.DataLoader(dataset=train_dataset_pair, batch_size=64, shuffle=True)
model = PixelCNN_C(nr_resnet=args.nr_resnet, nr_filters=args.nr_filters,
input_channels=input_channels, nr_logistic_mix=args.nr_logistic_mix)
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=args.lr_decay)
train_loss = 0.
for epoch in range(1):
model.train(True)
torch.cuda.synchronize()
time_ = time.time()
model.train()
cnt = 0.0
for batch_idx, (x, y) in enumerate(tqdm(train_loader_pair)):
if x.size(0)!=64:
continue
optimizer.zero_grad()
y = y.cuda()
y = y.unsqueeze(1)
x = x.cuda()
x = x.unsqueeze(1)
output = model(y, x)
loss = loss_fn(output,y)
#loss = loss_op(y, output)
loss.backward()
optimizer.step()
train_loss += loss.data
cnt += x.size(0)
# if batch_idx % 50 == 0:
# print("idx : {}, loss : {}".format(batch_idx, train_loss / cnt))
# decrease learning rate
scheduler.step()
print('i: {}, j {}, H loss : {}'.format(i, j, train_loss / len(train_loader_pair.dataset)))
mat[i, j] = train_loss / len(train_loader_pair.dataset)
a[j] = train_loss / len(train_loader_pair.dataset)
np.save('frames_'+str(i)+'_fix.npy',a)
np.save('sto_mat',mat)
#mat = np.load('fix_mat.npy')
node_num = 20
g = adj2dict(mat)
min_tree = 1000000
tree_dict = dict()
for i in range(node_num):
h = mst(int(i), g)
val = 0.0
s_ = []
flag = False
for s in h:
s_ += [s]
for k in h[s]:
val += mat[s, k]
# if k in s_:
# flag = True
# if flag:
# continue
if val < (min_tree - 0.01):
min_tree = val
tree_dict[0] = h
print(tree_dict[0])
T = dict2adj(tree_dict[0], node_num)
print(T)
|
988,546 | a740ede2dcd1b80ddc6b21dc51cff0ec68839f55 | import sys
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
from utils import helpers
from tensorflow.keras.backend import set_learning_phase
from tensorflow.keras.models import load_model
def annotate_image(file_path, coordinates):
"""
Annotates supplied image from predicted coordinates.
Args:
file_path: path
System path of image to annotate
coordinates: list
Predicted body part coordinates for image
"""
# Load raw image
from PIL import Image, ImageDraw
image = Image.open(file_path)
image_width, image_height = image.size
image_side = image_width if image_width >= image_height else image_height
# Annotate image
image_draw = ImageDraw.Draw(image)
image_coordinates = coordinates[0]
image = helpers.display_body_parts(image, image_draw, image_coordinates, image_height=image_height,
image_width=image_width, marker_radius=int(image_side / 150))
image = helpers.display_segments(image, image_draw, image_coordinates, image_height=image_height,
image_width=image_width, segment_width=int(image_side / 100))
# Save annotated image
image.save(os.path.normpath(file_path.split('.')[0] + '_tracked.png'))
# (RT, I, II, III or IV)
model_variant = 'RT'
set_learning_phase(0)
model = load_model(os.path.join('models', 'keras', 'EfficientPose{0}.h5'.format(model_variant.upper())),
custom_objects={'BilinearWeights': helpers.keras_BilinearWeights,
'Swish': helpers.Swish(helpers.eswish), 'eswish': helpers.eswish,
'swish1': helpers.swish1})
# file_path = f'./utils/MPII.jpg'
file_path = f'./utils/golf.jpeg'
img = cv2.imread(file_path)
h, w = img.shape[0], img.shape[1]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)
img /= 255.
img = img[np.newaxis, ...]
output = model.predict(img)[-1]
# output = output[0]
# output = np.sum(output, axis=-1)
# output = output[..., np.newaxis]
# output = np.repeat(output, 3, axis=-1)
coord = [helpers.extract_coordinates(output[0,...], h, w)]
annotate_image(file_path, coord)
# plt.imshow(output, cmap='hot')
# plt.show()
|
988,547 | 9951fa118724303290be136663c132805d9daa83 | import _init_paths
import os
import numpy as np
import h5py
import time
import datetime
import sg_utils
from caffe.proto import caffe_pb2
import google.protobuf as pb2
import traceback as tb
import code
try:
from python_layers.loss_tracking_layer import loss_tracker_dict;
# global loss_tracker_dict;
except:
print 'FAILED at loss_tracker_dict'
time.sleep(1)
__author__ = "Ishan Misra <ishanmisra@gmail.com>"
__date__ = "2016.07.24"
#Utilities to train models and log different aspects
#Based on train_net.py from Ross Girshick's Fast-RCNN codebase
class WatchTrainer():
""" A class to watch training and keep track of a bunch of things like activations, weight norms, diffs """
def __init__(self, solverPath, solver=None, checkSolver=True, verbose=True):
assert( os.path.isfile(solverPath) ), 'solver: %s does not exist'%(solverPath);
assert( solver is not None), 'none solver is not implemented yet';
#TODO: add none solver option, if solver is none then init solver using caffe
self.solverPath = solverPath;
self.parse_solver();
self.solver = solver;
if checkSolver:
self.check_solver();
self.logNames = {};
self.isLogging = False;
self.prevWts = None;
self.verbose = verbose;
def parse_solver(self):
solverPath = self.solverPath;
self.expName = os.path.split(solverPath)[-1].split('_')[0];
self.expDir = os.path.split(solverPath)[0];
self.solver_param = caffe_pb2.SolverParameter();
with open(self.solverPath, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
allLines = [x.strip() for x in open(solverPath,'r')];
snapPath = self.solver_param.snapshot_prefix;
snapExp = os.path.split(snapPath)[-1];
snapPath = os.path.split(snapPath)[0];
sg_utils.mkdir(snapPath);
assert( os.path.isdir(snapPath) ), '%s does not exist'%(snapPath);
self.snapPath = snapPath;
assert( self.snapPath == os.path.split(self.solver_param.snapshot_prefix)[0] );
def check_solver(self):
#assumes solver has the following first 2 lines
#train_net: "blah"
#snapshot: "blah"
#check if solver points to the correct train proto
solverPath = self.solverPath;
expName = os.path.split(solverPath)[-1].split('_')[0];
allLines = [x.strip() for x in open(solverPath,'r')];
trainNet = allLines[0].split(':')[1].strip();
trainNet = os.path.split(trainNet)[-1];
trainExp = trainNet.split('_')[0].replace('"','');
assert( expName == trainExp ), 'train proto: %s %s'%(expName, trainExp);
snapPath = self.solver_param.snapshot_prefix;
snapExp = os.path.split(snapPath)[-1];
snapExp = snapExp.split('_')[0];
assert( expName == snapExp ), 'snapshot name: %s %s'%(expName, snapExp);
print 'solver paths seem correct'
print 'will snap to ', snapPath
def get_time_str(self):
ts = time.time();
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
return st;
def init_logging(self):
if bool(self.logNames):
#dict is not empty, so we already have lognames set
return;
st = self.get_time_str();
self.logNames['weight_norms'] = os.path.join(self.expDir,\
'logs', self.expName + '_' + st + '_weight-norm_.h5')
self.logNames['weight_activations'] = os.path.join(self.expDir,\
'logs', self.expName + '_' + st + '_weight-activations_.h5')
self.logNames['weight_diffs'] = os.path.join(self.expDir,\
'logs', self.expName + '_' + st + '_weight-diffs_.h5')
self.logNames['weight_meta'] = os.path.join(self.expDir,\
'logs', self.expName + '_weight-meta_' + '.pkl')
self.logNames['loss_tracker'] = os.path.join(self.expDir,\
'logs', self.expName + '_' + st + '_loss-tracker' + '.pkl')
self.isLogging = True;
def model_weight_activations(self):
net = self.solver.net;
layers = net.blobs.keys();
currActivMeans=[];
for layer in layers:
meanActiv = net.blobs[layer].data.mean();
currActivMeans.append(meanActiv);
currActivMeans=np.array(currActivMeans);
if os.path.isfile(self.logNames['weight_activations']):
ss = sg_utils.load(self.logNames['weight_activations']);
currActivMeans = np.dstack( (ss['activmeans'], currActivMeans) );
fh = h5py.File(self.logNames['weight_activations'],'w');
fh.create_dataset('activmeans',data=currActivMeans,dtype=np.float32);
fh.close();
def model_weight_diffs(self):
net = self.solver.net;
layers = net.params.keys(); #params is an ordered dict, so keys are ordered
currwtdiffs = [];
for layer in layers:
numObj = len(net.params[layer]);
means = {};
medians = {};
for b in range(numObj):
wtdiff = net.params[layer][b].diff;
currwtdiffs.append(np.linalg.norm(wtdiff));
currwtdiffs = np.array(currwtdiffs);
if os.path.isfile(self.logNames['weight_diffs']):
ss = sg_utils.load(self.logNames['weight_diffs']);
currwtdiffs = np.dstack( (ss['wtdiffs'], currwtdiffs) );
fh = h5py.File(self.logNames['weight_diffs'],'w')
fh.create_dataset('wtdiffs',data=currwtdiffs,dtype=np.float32);
fh.close();
def model_weight_stats(self):
net = self.solver.net;
outFile = self.logNames['weight_norms'];
prevWts = self.prevWts;
layers = net.params.keys(); #params is an ordered dict, so keys are ordered
currmeans = [];
currnorms = [];
currwtdiffnorm = [];
for layer in layers:
numObj = len(net.params[layer]);
means = {};
medians = {};
for b in range(numObj):
wtsshape = net.params[layer][b].data.shape;
wts = net.params[layer][b].data.astype(np.float32, copy=False);
if prevWts is not None:
wtdiff = prevWts[layer][b] - wts;
wtdiffnorm = np.linalg.norm(wtdiff);
currwtdiffnorm.append(wtdiffnorm);
wtsmean = wts.mean();
wtsnorm = np.linalg.norm(wts);
currmeans.append(wtsmean);
currnorms.append(wtsnorm);
currmeans=np.array(currmeans);
currnorms=np.array(currnorms);
currwtdiffnorm=np.array(currwtdiffnorm);
if os.path.isfile(outFile):
ss = sg_utils.load(outFile);
means = ss['means'];
norms = ss['norms'];
means = np.dstack((means, currmeans));
norms = np.dstack((norms, currnorms));
if prevWts is not None:
if 'wtdiffnorms' in ss:
wtdiffnorms = ss['wtdiffnorms'];
wtdiffnorms = np.dstack((wtdiffnorms, currwtdiffnorm));
else:
wtdiffnorms = currwtdiffnorm;
else:
means=currmeans;
norms=currnorms;
wtdiffnorms=currwtdiffnorm;
if self.verbose:
print '%d writing to weight_norms ... '%(self.solver.iter),
try:
fh = h5py.File(outFile,'w'); #overwrite!!
fh.create_dataset('means',data=means,dtype=np.float32)
fh.create_dataset('norms',data=norms,dtype=np.float32)
if prevWts is not None:
fh.create_dataset('wtdiffnorms',data=wtdiffnorms,dtype=np.float32)
fh.close();
except:
tb.print_stack();namespace = globals().copy();namespace.update(locals());code.interact(local=namespace)
try:
fh.close();
print 'error when writing to log'
except:
print 'error when writing to log'
pass;
if self.verbose:
print 'success';
def model_track_loss(self):
if not self.track_indiv_loss:
return;
from python_layers.loss_tracking_layer import loss_tracker_dict;
indiv_losses = np.array(loss_tracker_dict['indiv_losses'])
indiv_labs = np.array(loss_tracker_dict['indiv_labs']).astype(np.int32)
indiv_preds = np.array(loss_tracker_dict['indiv_preds']).astype(np.int32)
indiv_probs = np.array(loss_tracker_dict['indiv_probs']).astype(np.float32)
if os.path.exists(self.logNames['loss_tracker']):
dt = sg_utils.load(self.logNames['loss_tracker']);
indiv_losses = np.concatenate((dt['indiv_losses'], indiv_losses));
indiv_labs = np.concatenate((dt['indiv_labs'], indiv_labs));
indiv_preds = np.concatenate((dt['indiv_preds'], indiv_preds));
indiv_probs = np.concatenate((dt['indiv_probs'], indiv_probs));
try:
sg_utils.save(self.logNames['loss_tracker'], [indiv_losses, indiv_labs, indiv_preds, indiv_probs],\
['indiv_losses', 'indiv_labs', 'indiv_preds', 'indiv_probs'], overwrite=True)
loss_tracker_dict['indiv_losses'] = [];
loss_tracker_dict['indiv_labs'] = [];
loss_tracker_dict['indiv_probs'] = [];
loss_tracker_dict['indiv_preds'] = [];
print 'saved losses'
except:
print 'error with loss tracker'
def get_model_weights(self):
net = self.solver.net;
srcWeights = {};
for layer in net.params:
srcWeights[layer] = [];
for b in range(len(net.params[layer])):
srcWeights[layer].append( net.params[layer][b].data.astype(dtype=np.float32, copy=True));
return srcWeights;
def snapshot(self):
net = self.solver.net
if not os.path.exists(self.snapPath):
sg_utils.mkdir(self.snapPath);
filename = self.expName + '_snapshot_' + 'iter_{:d}'.format(self.offset_iter + self.solver.iter) + '.caffemodel';
filename = os.path.join(self.snapPath, filename);
net.save(str(filename))
print 'Wrote snapshot to: {:s}'.format(filename)
return filename;
def train_model(self, max_iters, log_iter, snapshot_iter, track_indiv_loss=False, offset_iter=0):
last_snapshot_iter = -1;
self.offset_iter = offset_iter;
assert snapshot_iter % log_iter == 0, 'logging and snapshotting must be multiples';
if self.isLogging:
layers = self.solver.net.params.keys(); #params is an ordered dict, so keys are ordered
layer_param_shapes = {};
for layer in self.solver.net.params:
layer_param_shapes[layer] = [];
for b in range(len(self.solver.net.params[layer])):
layer_param_shapes[layer].append(self.solver.net.params[layer][b].data.shape)
sg_utils.save(self.logNames['weight_meta'], [layers, layer_param_shapes], ['layer_names', 'layer_param_shapes'], overwrite=True);
#setup losstracker
if track_indiv_loss:
self.track_indiv_loss = track_indiv_loss;
check_loss_tracker = True;
else:
check_loss_tracker = False;
#try snapshotting
tmp = self.offset_iter;
self.offset_iter = -1;
print 'trying snapshot'
filename = self.snapshot();
# os.remove(filename);
self.offset_iter = tmp;
print 'snapshotting worked: %s'%(filename);
while self.solver.iter < max_iters:
if self.isLogging and \
(self.solver.iter % log_iter == 0 or self.solver.iter == 0):
self.model_weight_stats()
self.model_weight_activations();
self.model_weight_diffs();
self.prevWts = self.get_model_weights();
self.solver.step(log_iter)
if self.solver.iter % snapshot_iter == 0 or check_loss_tracker:
last_snapshot_iter = self.solver.iter
self.snapshot()
self.model_track_loss()
check_loss_tracker = False;
if last_snapshot_iter != self.solver.iter:
self.snapshot()
|
988,548 | 2478c48acfe55bf777b0cf4f26ce307df5532d37 | '''
Created on 2021-05-31
@author: Bartosz Walewski
'''
import requests
import json
from PyQt5 import QtGui
import data_operations
headers = {
'x-rapidapi-key': "ENTER_KEY_HERE",
'x-rapidapi-host': "weatherapi-com.p.rapidapi.com"
}
def get_history_weather(date):
url = "https://weatherapi-com.p.rapidapi.com/history.json"
with open("current_city.json", "r") as read:
city = json.load(read)
querystring = {"q":city, "dt":date, "lang":"en"}
try:
response = requests.request("GET", url, headers=headers, params=querystring)
return response.json()
except:
return None
def get_weather(query = "Opole"):
query = data_operations.query_normalize(query)
url = "https://weatherapi-com.p.rapidapi.com/forecast.json"
querystring = {"q": query, "days": "3"}
try:
response = requests.request("GET", url, headers=headers, params=querystring)
with open("current_weather.json", mode='w+') as put:
put.write(json.dumps(response.json()))
with open("current_city.json", mode='w+') as put:
put.write(json.dumps(query))
return response.json()
except:
return None
def search_localization(query):
query = data_operations.query_normalize(query)
url = "https://weatherapi-com.p.rapidapi.com/search.json"
querystring = {"q": query}
try:
response = requests.request("GET", url, headers=headers, params=querystring)
return response.json()
except:
return None
def get_icon(link):
try:
image = QtGui.QImage()
image.loadFromData(requests.get(link).content)
return image
except:
return None |
988,549 | bc4dd434af9d6b8eb6855a664379870d0a9bbb98 | # Apparently cattle needs python 2.7.8 (2.7.11 doesn't work)
import os
import time
import argparse
import json
from wranglib.cattle import init as initcattle
from wranglib.cattle import report
from AdAnalysis import AdAnalysis
def parse_args():
parser = argparse.ArgumentParser(
description='Push data in the specified directory to Wrangler')
parser.add_argument('data_dir', type=str,
help="The directory containing the data to be pushed to Wrangler.")
parser.add_argument('num_samples_in_set', type=int,
help="The number of data samples (page loads) taken for each page.")
parser.add_argument('log_filename', type=str,
help="The filename of the log associated with the data collection.")
return parser.parse_args()
def wrapUp(start_time, succeeded_list):
with open('wrangler_log-4.txt', 'w') as f:
json.dump(succeeded_list, f)
f.write('\n')
end_time = time.time()
time_elapsed = (end_time - start_time)
print("time_elapsed: "+str(time_elapsed))
if __name__ == "__main__":
# Get arguments
start_time = time.time()
args = parse_args()
data_dir = args.data_dir
num_samples_in_set = args.num_samples_in_set
log_fname = args.log_filename
initcattle(project="web-ads", workingdir=".", port=8088, use_ssl=True)
ad_compare_dict = {}
succeeded_list = []
raw_data_dir = os.path.join(data_dir, "raw")
summaries_dir = os.path.join(data_dir, "summaries")
raw_data_file_list = os.listdir(raw_data_dir)
summaries_file_list = os.listdir(summaries_dir)
aa = AdAnalysis(summaries_file_list)
# loop through summary files and build dicts
for summary_file in summaries_file_list:
data_dict = {}
if aa.isBlocking(summary_file):
blocking_summary_file = summary_file
# map summary files with ad-blocker to the 4 files in that group (non-blocking summary, blocking summary, non-blocking raw, blocking raw)
nonblocking_summary_file = aa.getAdFileMatch(blocking_summary_file, summaries_file_list)
ad_compare_dict[blocking_summary_file] = {'blocking_summary': blocking_summary_file, 'nonblocking_summary': nonblocking_summary_file}
blocking_raw_file = aa.getRawFromSummary(blocking_summary_file, raw_data_file_list)
nonblocking_raw_file = aa.getAdFileMatch(blocking_raw_file, raw_data_file_list)
ad_compare_dict[blocking_summary_file]['blocking_raw_file'] = blocking_raw_file
ad_compare_dict[blocking_summary_file]['nonblocking_raw_file'] = nonblocking_raw_file
# blocking summary file
bsf_path = os.path.join(summaries_dir, blocking_summary_file)
bsf = open(bsf_path, 'r')
bs_obj = json.load(bsf)
data_dict['blocking_summary'] = bs_obj
bsf.close()
# non-blocking summary file
nsf_path = os.path.join(summaries_dir, nonblocking_summary_file)
nsf = open(nsf_path, 'r')
ns_obj = json.load(nsf)
data_dict['nonblocking_summary'] = ns_obj
nsf.close()
# meta data
data_dict['meta_data'] = {}
data_dict['meta_data']['try_num'] = 4
data_dict['meta_data']['time_submitted'] = time.time()
data_dict['meta_data']['sample_num'] = aa.getSampleNum(blocking_summary_file)
data_dict['meta_data']['num_samples_in_set'] = num_samples_in_set
data_dict['meta_data']['log_fname'] = log_fname
data_dict['meta_data']['location'] = aa.getLocation(blocking_summary_file)
data_dict['meta_data']['device'] = aa.getDevice(blocking_summary_file)
data_dict['meta_data']['networkType'] = aa.getNetworkType(blocking_summary_file)
data_dict['meta_data']['hostname'] = aa.getHostname(blocking_summary_file)
data_dict['meta_data']['pageURL'] = bs_obj['pageURL']
data_dict['meta_data']['page_categories_and_ranks'] = bs_obj['categories_and_ranks']
data_dict['meta_data']['cutoffTime'] = bs_obj['cutoffTime']
data_dict['meta_data']['debugPort'] = bs_obj['debugPort']
data_dict['meta_data']['orig_directory_name'] = data_dir
# blocking raw file
brf_path = os.path.join(raw_data_dir, blocking_raw_file)
brf = open(brf_path, 'r')
event_list = [json.loads(line) for line in brf.readlines()]
data_dict['blocking_raw'] = {}
data_dict['blocking_raw']['event_list'] = event_list
brf.close()
# nonblocking raw file
nrf_path = os.path.join(raw_data_dir, nonblocking_raw_file)
nrf = open(nrf_path, 'r')
event_list = [json.loads(line) for line in nrf.readlines()]
data_dict['nonblocking_raw'] = {}
data_dict['nonblocking_raw']['event_list'] = event_list
nrf.close()
resource = "page-loads" # this is actually the dataset
isAsync = False
project = "/web-ads"
response = report(resource, data_dict, async=isAsync, project=project)
if response[0] == False:
print("report failed.")
print(response)
print("Trying a second time")
response = report(resource, data_dict, async=isAsync, project=project)
if response[0] == False:
print("report failed again.")
print(response)
print(data_dict['blocking_summary']['rawDataFile'])
wrapUp(start_time, succeeded_list)
else:
succeeded_list.append(blocking_summary_file)
wrapUp(start_time, succeeded_list)
|
988,550 | 0bd51f78823c5a38e232dfbe3a4e2e8207296e6c | from bs4 import BeautifulSoup
import requests
import json
import config
class DataExtraction:
def html_tags(self, element_arr): # bool function checks if first element of data is html tag
if(element_arr[0] == 'p' or element_arr[0] == 'h1' or element_arr[0] == 'h2' or element_arr[0] == 'a' or element_arr[0] == 'li' or element_arr[0] == 'ul' or element_arr[0] == 'ol' or element_arr[0] == 'span'):
return True
else:
return False
def get_start_URL(self,file):
with open(file) as f:
config.sitemap = json.load(f) # loads sitemap variable with json
config.startUrl = config.sitemap['startUrl'][0]
def get_soup(self, file): # open file and get soup
DataExtraction.get_start_URL(DataExtraction, file)
config.source = requests.get(config.startUrl).text
config.soup = BeautifulSoup(config.source, 'lxml')
# def write_csv_headers(self): # writes csv headers
# num_selectors = len(config.sitemap['selectors'])
# i = num_selectors
# with open('sitemap_data.csv', 'w') as f:
# # for selector in config.sitemap['selectors']:
# # i = i-1
# # if(i != 0):
# # f.write(selector['id'] + ',')
# # else:
# # f.write(selector['id'])
# for selector in config.sitemap['selectors']:
# f.write(selector['id'] + ',')
def create_extraction_elements(self, file): # algorithm to use given selectors to convert into string which is used in each Selectortype class
DataExtraction.get_soup(DataExtraction, file)
#DataExtraction.write_csv_headers(DataExtraction)
for selector in config.sitemap['selectors']:
if(selector['parentSelectors'][0] != '_root'):
for selector2 in config.sitemap['selectors']:
if(selector['parentSelectors'][0] == selector2['id']):
if(selector2['type'] == 'SelectorLink'):
print(1)
selector_arr = [selector['selector']]
selector_arr = [char for element in selector_arr for char in element.split(', ')]
for element in selector_arr:
element_arr = [element]
element_arr = [x for y in element_arr for x in y.split(' ')]
element_arr = [x for y in element_arr for x in y.split('.')]
for string in element_arr:
if(string == ''):
element_arr.pop(element_arr.index(string))
#print(element_arr)
if(len(element_arr) == 1):
config.firstels[selector['type']].append(element_arr[0])
element_arr.pop(0)
else:
if(DataExtraction.html_tags(DataExtraction, element_arr)):
element_arr.pop(0)
#print(element_arr)
config.firstels[selector['type']].append(element_arr[0])
element_arr.pop(0)
string_arr = ['item']
while(len(element_arr) > 0):
string_arr.append('.')
string_arr.append(element_arr[0])
element_arr.pop(0)
string = ''.join(string_arr)
config.output_dict[selector['type']][selector['id']].append(string)
#print(config.output_dict)
|
988,551 | fce90b25227f407902728bbc7f1d7fc6ff10aceb | import paramiko
from os import path
from .command import SSHCommand, TEST_FAIL, TEST_SUCCESS
class SSHChannelException(Exception):
pass
class SSHChannel:
def __init__(self, host: str, port: int, username: str, password: str = None, keyfile: str = None):
# error check
if not isinstance(port, int):
raise ValueError('Port must be an integer')
if password is None and keyfile is None:
raise ValueError('Supply password or keyfile path')
self.__keyauth = False
if keyfile is not None:
self.__keyauth = True
if not path.exists(path.dirname(keyfile)):
raise ValueError(f'Key path is not a valid path! Got {keyfile}')
self.host = host
self.port = port
self.username = username
self.password = password # save password for sudo
self.auth = keyfile or password
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
def connect(self):
try:
key = None
if self.__keyauth:
key = paramiko.RSAKey.from_private_key_file(self.auth)
self.client.connect(self.host, self.port, self.username, password=self.auth if not self.__keyauth else None,
pkey=key, timeout=5)
except TimeoutError as e:
raise SSHChannelException(f'Timed out: {e.__str__()}')
except paramiko.ssh_exception.AuthenticationException as e:
raise SSHChannelException(f'Auth problem: {e.__str__()}')
except FileNotFoundError:
raise SSHChannelException(f'Key file not found.')
except paramiko.ssh_exception.SSHException as e:
raise SSHChannelException(f'SSH Error: {e.__str__()}')
def __execute_command(self, cmd: str, elevated=False, stdin=None):
text = cmd
if elevated:
text = f"sudo -S bash -c \' {cmd} \'"
sin, sout, serr = self.client.exec_command(text)
used = False
if elevated:
sin.write(self.password + '\n')
sin.flush()
used = True
if stdin is not None:
for line in stdin:
sin.write(line + '\n')
sin.flush()
used = True
if used:
sin.close()
return sin, sout, serr
def generate_output(self, listener, sout, serr):
while not sout.channel.exit_status_ready() or not serr.channel.exit_status_ready():
if (line := sout.readline().strip()) != '':
listener(f'OUT: {line}')
if (line := serr.readline().strip()) != '':
listener(f'ERR: {line}')
if (line := sout.readline().strip()) != '':
listener(f'OUT: {line}')
if (line := serr.readline().strip()) != '':
listener(f'ERR: {line}')
def rollback_command(self, cmd: SSHCommand, listener=None) -> bool:
if cmd.rollback is None:
return True
sin, sout, serr = self.__execute_command(cmd.rollback, elevated=cmd.elevated, stdin=cmd.stdin)
if listener is not None:
self.generate_output(listener, sout, serr)
if sout.channel.recv_exit_status() != 0:
return False
return True
def execute_command(self, cmd: SSHCommand, listener=None) -> bool:
sin, sout, serr = self.__execute_command(cmd.cmd, elevated=cmd.elevated, stdin=cmd.stdin)
if listener is not None:
self.generate_output(listener, sout, serr)
if sout.channel.recv_exit_status() != 0:
return False
return True
def check_command(self, cmd: SSHCommand) -> bool:
if cmd.test is None:
return False
sin, sout, serr = self.__execute_command(cmd.test, elevated=cmd.elevated, stdin=cmd.stdin)
data = sout.read().decode()
if TEST_SUCCESS in data:
return True
elif TEST_FAIL in data:
return False
else:
raise ValueError(f'Test for command {cmd} failed to return an expected value.') |
988,552 | 3b25174f6323b4e960d3daa2c616523140dc615c | # Generated by Django 2.2.4 on 2019-08-20 13:41
from django.db import migrations, models
def nullEmptyNonces(apps, schema_editor):
Person = apps.get_model('issuer', 'Person')
for row in Person.objects.filter(nonce=''):
row.nonce = None
row.save()
class Migration(migrations.Migration):
dependencies = [
('issuer', '0006_auto_20190815_1856'),
]
operations = [
migrations.AlterField(
model_name='person',
name='nonce',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.RunPython(nullEmptyNonces),
migrations.AlterField(
model_name='person',
name='nonce',
field=models.CharField(blank=True, max_length=50, null=True, unique=True)
)
]
|
988,553 | f4d54db839a6ea070d261f8937649f66a4a05a34 | n = int(input())
if n % 2 == 1 :
print("Weird")
elif n % 2 == 0 and 2 <= n <= 5 :
print("Not Weird")
elif n % 2 == 0 and 6 <= n <= 20 :
print("Weird")
else:
print("Not Weird")
"""
Python If-Else - Hacker Rank Solution
Python If-Else - Hacker Rank Solution
Task
Given an integer, , perform the following conditional actions:
If is odd, print Weird
If is even and in the inclusive range of 2 to 5, print Not Weird
If is even and in the inclusive range of 6 to 20, print Weird
If is even and greater than 20 , print Not Weird
Input Format
A single line containing a positive integer, n .
Constraints
1<= n <= 100
Output Format
Print Weird if the number is weird; otherwise, print Not Weird.
Sample Input 0
3
Sample Output 0
Weird
Sample Input 1
24
Sample Output 1
Not Weird
Explanation
Sample Case 0:n=3
n is odd and odd numbers are weird, so we print Weird.
Sample Case 1:n=24
n>20 and n is even, so it isn't weird. Thus, we print Not Weird.
""" |
988,554 | 7bd9e39a5349a5f7ff9793c90e38ce0bafaa5799 | # 1. Crie uma coleção que suportará números inteiros. Após isso, adicione a esta lista cinco valores inteiros.
# Em sequência, exiba estes valores. Por fim, ordene os valores e remove o elemento da posição 3. Exiba
# novamente a lista.
def exercicio1():
lista = [2, 1, 3, 4, 0]
print(lista)
listaOrdenada = sorted(lista)
listaCortada = listaOrdenada.pop(2)
print(listaCortada)
|
988,555 | 4991767f8010193839daa6959c559740d0df72e4 | pendingMessagesMap = dict() # holds an array containing all pending messages by tuples: (client id source, msg)
usersMap = dict() # holds users and their public keys
def register_user(client_id, public_key):
print("Registering user!")
usersMap[client_id] = public_key
def get_public_key(client_id):
print("fetch public key for user!")
if usersMap.get(client_id) is not None:
return usersMap[client_id]
def pull_messages(client_id):
print("pulling messages!")
if pendingMessagesMap.get(client_id) is None:
pendingMessagesMap[client_id] = []
return pendingMessagesMap[client_id]
def send_message(src_id, dst_id, content):
# fetch the pending messages array
if pendingMessagesMap.get(src_id) is None:
pendingMessagesMap[src_id] = []
pendingMessagesMap.get(src_id).append((src_id, content))
print(f"stored a msg from: {src_id} to {dst_id}")
|
988,556 | 385fb75466f922d4ed4305437126a68ecf4faeb0 | __author__ = 'ckomurlu'
import utils.properties
import numpy as np
import types
import datetime
import os
def standard_error_depr(sample):
flat = sample.flatten()
stddev = flat.std()
sample_size = flat.shape[0]
return stddev/(sample_size**.5)
def standard_error(sample_array, axis=0):
stddev = np.std(sample_array, axis=axis)
size = sample_array.shape[axis]
return stddev/(size**.5)
def print_experiment_parameters_to_file():
file_name = utils.properties.outputDirPath + 'experimentParameters' + utils.properties.timeStamp + '.txt'
if not os.path.exists(utils.properties.outputDirPath):
os.makedirs(utils.properties.outputDirPath)
f = open(file_name,'wb')
# f.write('mh_sample')
# for key in utils.properties.__dict__:
# print key, utils.properties.__dict__[key]
properties_dict = utils.properties.__dict__
for item in dir(utils.properties):
if not item.startswith('__') and \
not isinstance(properties_dict[item], types.ModuleType) and \
item != 'ts':
f.write(item + ': ' + str(properties_dict[item]) + '\n')
elif item == 'ts':
f.write(item + ': ' + str(properties_dict[item]) + ' ' +
datetime.datetime.fromtimestamp(properties_dict[item]).strftime('%H:%M %m/%d/%Y') + '\n') |
988,557 | 3cadac75e2399bd9fc6b694457e65cf5c654da2e | import os, sys
sys.path.append('/home/ruihan/coiltraine/')
import yaml
import torch
from network.models.coil_icra import CoILICRA
from coilutils import AttributeDict
# from attribute_dict import AttributeDict
# # Sample from PyTorch docs: https://pytorch.org/tutorials/beginner/saving_loading_models.html#warmstarting-model-using-parameters-from-a-different-model
# # save
# torch.save(modelA.state_dict(), PATH)
# # load
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# modelB = TheModelBClass(*args, **kwargs)
# modelB.load_state_dict(torch.load(PATH, map_location = device), strict=False)
# # Sample load a pretrained model
# load part of the pre trained model
# save
# torch.save(pre_model.state_dict(), PATH)
# # load
# pretrained_dict = torch.load(PATH)
# model = TheModelClass(*args, **kwargs)
# model_dict = model.state_dict()
# # 1. filter out unnecessary keys
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# # 2. overwrite entries in the existing state dict
# model_dict.update(pretrained_dict)
# # 3. load the new state dict
# model.load_state_dict(model_dict)
torch.set_default_dtype(torch.float32)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# read yaml file
yaml_filename = 'coil_configs.yaml'
with open(yaml_filename, 'r') as f:
# TODO: combine all know configuraitons into one file and load it into a dict
yaml_file = yaml.load(f, Loader=yaml.FullLoader)
yaml_cfg = AttributeDict(yaml_file)
# # load checkpoint dict
# checkpoint = torch.load(os.path.join('/home/ruihan/scenario_runner/models/CoIL/'+str(180000)+'.pth'))
# # load model
# model = CoILModel(yaml_cfg.MODEL_TYPE, yaml_cfg.MODEL_CONFIGURATION)
# model.cuda()
# checkpoint_iteration = checkpoint['iteration']
# print("Pretrained CoIL loaded ", checkpoint_iteration)
# model.load_state_dict(checkpoint['state_dict'])
# model.eval()
# torch.save(model.state_dict(), '/home/ruihan/scenario_runner/models/CoIL/CoIL_180000.pth' )
print("load empty CoIlModel")
modelB = CoILICRA(yaml_cfg.MODEL_CONFIGURATION)
for param_tensor in modelB.state_dict():
print(param_tensor, "\t", modelB.state_dict()[param_tensor].size())
param_tensor = 'branches.branched_modules.0.layers.0.0.weight'
print(param_tensor, "\t", modelB.state_dict()[param_tensor])
print("try to copy pretrained model to B")
modelB.load_state_dict(torch.load('models/CoIL/CoIL_180000.pth'))
print(param_tensor, "\t", modelB.state_dict()[param_tensor])
modelB.eval()
# TODO: The structure is specified in coil_icra.
# check which module you want to reuse and create your own.
# then load the state_dict with `strict=False`
class FC_coil_cut(nn.Module):
"""
copy the full-connectin network from coil, adpted for MLP controller
"""
def __init__(self, nx=106, ny=2, nh=53, p=0.2):
"""
original coil (512-256-3)
input: latent_embeddings dim_z = 106
one hidden layer: 64
output: dim_u = 3
p: possibility for dropout
"""
super(FC_coil, self).__init__()
self.layers = nn.Sequential(
nn.Linear(nx, nh),
nn.Dropout2d(p=p),
nn.ReLU(),
nn.Linear(nh, ny),
nn.Dropout2d(p=p)
)
self.sig = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layers(x)
# throttle = self.sig(x[:, 0]).view(x.shape[0],-1)
# steer = self.tanh(x[:, 1]).view(x.shape[0],-1)
# brake = self.sig(x[:, 2]).view(x.shape[0],-1)
# return torch.cat([throttle, steer, brake], dim=1)
return self.sig(x) |
988,558 | 0758033cf6f6d29b6423737d5784b53b70f2c8ab | #!/usr/bin/env python
import os
from setuptools import setup
from setuptools import find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name = 'Talkmore SMS',
version = "0.0.1",
description = "Talkmore SMS library for Python 3",
author = "Kristoffer Dalby",
author_email = "kradalby@kradalby.no",
url = "https://github.com/kradalby/talkmoresms",
keywords = ["sms", 'talkmore'],
classifiers = [
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description='\n\n'.join([read('README.md')]),
test_suite = 'tests',
install_requires=['requests'],
packages=['talkmoresms'],
)
|
988,559 | a00a9e1850a530efb7e73d3e3d3743dfc5d8d532 | """Version module."""
__version__ = '1.2.3'
|
988,560 | eecc78e7136a8f505a141a01081bacf29253df96 | class Solution:
def reverse(self, x: int) -> int: #todo reasearch functions with self
# print("\n")
# #
# # x = 1234
# # print(str(x))
# Code #1
# string_name = "3245"
# # Iterate over the string
# for element in string_name:
# print(element, end=' ')
# #print("\n") |
988,561 | 782e0f847a2eb86756e68396afcc19160ae1cc81 | # Discrete time observation inference
# packages
import numpy as np
from scipy.stats import skellam
from scipy.stats import poisson
from scipy.stats import gaussian_kde
import matplotlib.pyplot as plt
from random import choices
from scipy.integrate import simps
import seaborn as sns
from matplotlib.gridspec import GridSpec
#%% MCMC help functions
def shuffle_reactions(pop0,pop1,S2,r,w):
r1 = r[0:2]
r2_o = r[2]
# pertubate r2 and update r1
r2 = r2_o + skellam.rvs(w,w)
#r1 = np.matmul(np.linalg.inv(S1), pop1-pop0-S2*r2) # not needed, S1 is unity matrix
r1 = pop1-pop0-S2*r2
if np.any(r1<0) or r2<0:
while True:
# pertubate r2 and update r1
while True:
r2 = r2_o + skellam.rvs(w,w,size=5)
if np.any(r2>=0):
r2 = r2[r2>=0][0]
break
r1 = pop1-pop0-S2*r2
if np.all(r1>=0):
break
return np.array(np.append(r1,r2),dtype=int)
def get_reactions(pop0,pop1,S2):
[da,db] = pop1-pop0
if da>0: #(db<0 not possible)
temp = np.array([da,db,0])
else:
temp = np.array([0,db+2*da,-da])
return shuffle_reactions(pop0,pop1,S2,temp,5)
def get_population_list(reacts,pop0):
# create trajectories from reaction events
pop = np.zeros((len(reacts),2),dtype=int)
temp = pop0
for i in range(0,len(reacts)):
if reacts[i,0]==0: # A->AA
temp = temp+[1,0]
elif reacts[i,0]==1: # A->AB
temp = temp+[0,1]
elif reacts[i,0]==2: # A->BB
temp = temp+[-1,2]
pop[i,:] = temp
pop = np.vstack([pop0,pop]) # add pop0 as top row
return pop
def sample_rates(prior,pop0,reacts,dT,SMPL_LIM,SMPL_DENS):
# We assume a flat prior, thats why Posterior = normalised Lhood
pop = get_population_list(reacts,pop0) # get population trajectory data
par_new = []
for i in range(0,3): # loop over parameter
r_num = np.sum(reacts[:,0]==i) # number of reactions
# if reaction happened in interval
if len(reacts)>0:
integr = pop[0,0]*reacts[0,1] # integral: int[0,T] g(N) dt, where hazardf = c*g(N)
for j in range(1,len(reacts[:,1])):
integr += pop[j,0]*(reacts[j,1]-reacts[j-1,1])
integr += pop[-1,0]*(dT-reacts[-1,1])
# if nothing happened -> Lhood_P = Lhood_CD
else:
integr = pop[0,0]*dT
lhd = np.zeros(0)
for c in np.arange(0,SMPL_LIM,SMPL_DENS):
lhd = np.append(lhd,(c**r_num)*np.exp(-c*integr))
post = lhd*prior/simps(lhd*prior, dx=SMPL_DENS) # normalise likelihood
rate = choices(np.arange(0,SMPL_LIM,SMPL_DENS), post)[0] # draw rate from posterior distr
par_new.append(rate)
return par_new
def get_true_lhd(T,reacts,pop0,par):
# get population trajectory data
pop = get_population_list(reacts,pop0)
total_rate = np.sum(par) # par=[L_aa,L_ab,L_bb]
# calcultate full-data likelihood
react_type = int(reacts[0,0])
temp = pop[0,0]*par[react_type]*np.exp(-pop[0,0]*total_rate*reacts[0,1])
v = reacts[:,0]
for i in range(1,len(reacts)):
t_i = reacts[i,1]
t_i1 = reacts[i-1,1]
react_type = int(v[i])
temp = temp*pop[i,0]*par[react_type]*np.exp(-pop[i,0]*total_rate*(t_i-t_i1))
Lhood_T = temp*np.exp(-pop[-1,0]*total_rate*(T-reacts[-1,1]))
return Lhood_T
#%% lin Poisson Model
# simulate homogenous Poisson process and then transform time to get lin. inhom. Poisson process
def get_reaction_times_linPois(pop0,pop1,r,dT):
# calculate reaction hazards for all reactions at T0 and T1
rtype = np.zeros(0,dtype=int)
rtime_inhom = np.zeros(0)
for i in range(0,3):
rtype = np.append(rtype,i*np.ones(r[i],dtype=int))
# reaction times are uniform distributed
rtime_hom = np.random.uniform(0,dT,r[i])
h0 = pop0[0] # set hazards just to population size, since prefactor (rate) canceles in expression
h1 = pop1[0]
# nonlinear trafo of time to get inhom reaction times
if h0!=h1:
rtime_inhom = np.append( rtime_inhom, (np.sqrt(h0**2+(h1**2 - h0**2)*rtime_hom/dT)-h0)*dT/(h1-h0) )
else: # inhom Poisson process == hom Poisson process
rtime_inhom = np.append( rtime_inhom, rtime_hom)
reacts = np.array([rtype,rtime_inhom]).transpose()
reacts = reacts[rtime_inhom.argsort()] # sort in increasing time order
return reacts
def get_lin_Poisson_lhd(rhaz0,rhaz1,T,reacts):
# calculate inhom Poisson likelihood
temp = 1
for i in range(0,len(reacts)):
[v,t] = reacts[i,:]
temp = temp* ( (1-t/T)*rhaz0[int(v)] + t/T*rhaz1[int(v)] )
Lhood_P = temp*np.exp(-T*(np.sum(rhaz0)+np.sum(rhaz1))/2)
return Lhood_P
def get_lin_Poisson_PMF(rhaz0,rhaz1,r,T):
# calculate PMF of Poisson process given observed reactions
q = 1
for i in range(0,3):
mu = T*(rhaz0[i]+rhaz1[i])/2
q = q*poisson.pmf(r[i], mu)
return q
def get_MH_ratio_linPois(pop0,pop1,reacts,par_new,r,T):
[L_aa,L_ab,L_bb] = par_new
rhaz0 = [ L_aa*pop0[0], L_ab*pop0[0], L_bb*pop0[0] ]
rhaz1 = [ L_aa*pop1[0], L_ab*pop1[0], L_bb*pop1[0] ]
# if reaction happened in interval
if len(reacts)>0:
q = get_lin_Poisson_PMF(rhaz0,rhaz1,r,T)
Lhood_P = get_lin_Poisson_lhd(rhaz0,rhaz1,T,reacts)
Lhood_T = get_true_lhd(T,reacts,pop0,par_new)
# if nothing happened -> Lhood_P = Lhood_CD
else:
q = get_lin_Poisson_PMF(rhaz0,rhaz1,r,T)
Lhood_P = np.exp(-T*(np.sum(rhaz0)+np.sum(rhaz1))/2)
Lhood_T = np.exp(-pop1[0]*(L_aa+L_ab+L_bb)*T)
if np.abs(1-Lhood_P/Lhood_T)>0.01: # just to make sure...
print('Error!')
# if reaction rate given is 0, but still proces observed: return 0
if q==0 and Lhood_T==0 and Lhood_P==0:
return 0
else:
return q*Lhood_T/Lhood_P
#%% exp Poisson Model
# simulate homogenous Poisson process and then transform time to get exp inhom. Poisson process
def get_reaction_times_expPois(pop0,pop1,r,T):
# calculate reaction hazards for all reactions at T0 and T1
rtype = np.zeros(0,dtype=int)
rtime_inhom = np.zeros(0)
N0 = pop0[0] # A-type population at start and end of interval
N1 = pop1[0]
for i in range(0,3):
rtype = np.append(rtype,i*np.ones(r[i],dtype=int))
unif_sample = np.random.uniform(size=r[i])
# nonlinear trafo of time to get reaction times for inhom process
if N0!=N1:
dlogN = (np.log(N1)-np.log(N0))
inhom_sample = np.log(1+unif_sample*(np.exp(dlogN)-1))*T/dlogN
rtime_inhom = np.append( rtime_inhom,inhom_sample)
else: # inhom Poisson process == hom Poisson process
rtime_inhom = np.append( rtime_inhom, unif_sample)
reacts = np.array([rtype,rtime_inhom]).transpose()
reacts = reacts[rtime_inhom.argsort()] # sort in increasing time order
return reacts
def get_exp_Poisson_lhd(T,reacts,pop0,pop1,par):
# calcultate exp Poisson likelihood
# only A-type population relevant
N0 = pop0[0]
N1 = pop1[0]
# iterate over all reaction events
react_type = int(reacts[0,0])
temp = par[react_type]*N0*(N1/N0)**(reacts[0,1]/T)
v = reacts[:,0]
for i in range(1,len(reacts)):
t_i = reacts[i,1]
react_type = int(v[i])
temp = temp*par[react_type]*N0*(N1/N0)**(t_i/T)
if N0!=N1:
dN = N1-N0
dlogN = np.log(N1)-np.log(N0)
Lhood_P = temp*np.exp(-T*np.sum(par)*dN/dlogN)
else:
Lhood_P = temp*np.exp(-T*np.sum(par)*N0)
return Lhood_P
def get_exp_Poisson_PMF(pop0,pop1,par,r,T):
# calculate PMF of Poisson process given observed reactions
N0 = pop0[0]
N1 = pop1[0]
if N0!=N1:
dN = N1-N0
dlogN = np.log(N1)-np.log(N0)
q = 1
for i in range(0,3):
mu = T*par[i]*dN/dlogN
q = q*poisson.pmf(r[i], mu)
return q
else:
q = 1
for i in range(0,3):
mu = T*par[i]*N0
q = q*poisson.pmf(r[i], mu)
return q
def get_MH_ratio_expPois(pop0,pop1,reacts,par_new,r,T):
# if reaction happened in interval
if len(reacts)>0:
q = get_exp_Poisson_PMF(pop0,pop1,par_new,r,T)
Lhood_P = get_exp_Poisson_lhd(T,reacts,pop0,pop1,par_new)
Lhood_T = get_true_lhd(T,reacts,pop0,par_new)
# if nothing happened -> Lhood_P = Lhood_CD
else:
q = get_exp_Poisson_PMF(pop0,pop1,par_new,r,T)
Lhood_P = np.exp(-pop1[0]*np.sum(par_new)*T)
Lhood_T = np.exp(-pop1[0]*np.sum(par_new)*T)
# if reaction rate given is 0, but still proces observed: return 0
if q==0 and Lhood_T==0 and Lhood_P==0:
return 0
else:
return q*Lhood_T/Lhood_P
#%% MCMC function with homogenous spacing
def get_posterior(pop_SS,dT,T,prior,N,burn_in,w,plot,OUTPUTPATH,SMPL_LIM,SMPL_DENS):
# splitting transition matrix into a quadratic, inveratble part S1 and the rest S2, r the same
# S1 = S[:,0:2] not necessary, since unity matrix here
S = np.array([[1,0,-1],[0,1,2]])
S2 = S[:,2]
# guess initial parameter
par_init = [1,1,1] #r_old_sum/(T*(pop_SS[0,0]+pop_SS[-1,0])/2) # guess 'good' starting point (~ 1/mean A cells)
print('Start MCMC with par_init: ',par_init)
# generate initial trajectory and calculate MH ratio
reacts_old = []
ratio_old = np.zeros(len(pop_SS)-1)
r_old = np.zeros((len(pop_SS)-1,3))
# loop over intervals (number of snapshots-1)
for k in range(0,len(pop_SS)-1):
pop0 = pop_SS[k,:]
pop1 = pop_SS[k+1,:]
# determine initial combination of allowed transitions
r = get_reactions(pop0,pop1,S2)
# generate trajectories for interval and aggregate reactions and r
# make sure that initial trajectory is valid!!! (ratio > 0)
while True:
path_temp = get_reaction_times_expPois(pop0,pop1,r,dT)
ratio_temp = get_MH_ratio_expPois(pop0,pop1,path_temp,par_init,r,dT)
if ratio_temp>0:
break
ratio_old[k] = ratio_temp
reacts_old.append(path_temp)
r_old[k] = r
# loop over MCMC iterations
output_par = np.zeros((0,3))
output_traj = []
acc_prob_ls = []
reacts = reacts_old
if plot==True:
plt.figure(figsize=(12,8))
for i in range(0,N):
if i%100==0:
print(i)
# sample rate constants given current sample path (aggregate sample path to one array first)
reacts_seq = np.zeros((0,2))
for j in range(0,len(reacts)):
temp = reacts[j].copy()
temp[:,1] += j*dT # adjust times since get_path samples times from [0,dT]
reacts_seq = np.vstack([reacts_seq,temp])
par = sample_rates(prior,pop_SS[0,:],reacts_seq,T,SMPL_LIM,SMPL_DENS)
# just for plotting the sampled trajectories
if plot==True:
traj = get_population_list(reacts_seq,pop_SS[0,:])
Acell = traj[:,0]
times = np.hstack([0,reacts_seq[:,1]])
plt.step(times,Acell,c='b',lw=0.5)
for l in range(1,int(T/dT)):
plt.vlines(l*dT,0,np.max(Acell),linestyle='--')
# generate new trajectories for intervals
reacts = []
for k in range(0,len(pop_SS)-1):
pop0 = pop_SS[k,:]
pop1 = pop_SS[k+1,:]
# propose new trajectory given current rate constants
r = shuffle_reactions(pop0,pop1,S2,r_old[k],w)
# generate trajectories for interval
reacts_new = get_reaction_times_expPois(pop0,pop1,r,dT)
ratio_new = get_MH_ratio_expPois(pop0,pop1,reacts_new,par,r,dT)
# accept/reject it with MH-step
acc_prob = np.min([1,ratio_new/ratio_old[k]])
if np.random.rand()<acc_prob:
r_old[k] = r
ratio_old[k] = ratio_new
reacts_old[k] = reacts_new
reacts.append(reacts_old[k])
acc_prob_ls.append(acc_prob)
if i>=burn_in:
output_par = np.vstack([output_par,par])
output_traj.append(reacts_seq)
print('Finished. Average acceptance prob: ',np.mean(acc_prob_ls))
if plot==True:
plt.xlabel('t')
plt.ylabel(r'$N_{A}$')
plt.savefig(OUTPUTPATH+'traj_T'+str(T)+'-dT'+str(dT)+'.png', bbox_inches="tight",dpi=300)
return [output_par,output_traj,np.mean(acc_prob_ls)]
#%% MCMC function with inhom spacing
def get_posterior_inhom(pop_SS,T_arr,prior,N,burn_in,w,plot,OUTPUTPATH,SMPL_LIM,SMPL_DENS):
# splitting transition matrix into a quadratic, inveratble part S1 and the rest S2, r the same
# S1 = S[:,0:2] not necessary, since unity matrix here
S = np.array([[1,0,-1],[0,1,2]])
S2 = S[:,2]
# guess initial parameter
par_init = [1,1,1] #r_old_sum/(T*(pop_SS[0,0]+pop_SS[-1,0])/2) # guess 'good' starting point (~ 1/mean A cells)
print('Start MCMC with par_init: ',par_init)
# generate initial trajectory and calculate MH ratio
reacts_old = []
ratio_old = np.zeros(len(pop_SS)-1)
r_old = np.zeros((len(pop_SS)-1,3))
# loop over intervals (number of snapshots-1)
for k in range(0,len(pop_SS)-1):
dT = T_arr[k+1]-T_arr[k]
pop0 = pop_SS[k,:]
pop1 = pop_SS[k+1,:]
# determine initial combination of allowed transitions
r = get_reactions(pop0,pop1,S2)
# generate trajectories for interval and aggregate reactions and r
# make sure that initial trajectory is valid!!! (ratio > 0)
while True:
path_temp = get_reaction_times_expPois(pop0,pop1,r,dT)
ratio_temp = get_MH_ratio_expPois(pop0,pop1,path_temp,par_init,r,dT)
if ratio_temp>0:
break
ratio_old[k] = ratio_temp
reacts_old.append(path_temp)
r_old[k] = r
# loop over MCMC iterations
output_par = np.zeros((0,3))
output_traj = []
acc_prob_ls = []
reacts = reacts_old
if plot==True:
plt.figure(figsize=(12,8))
for i in range(0,N):
if i%100==0:
print(i)
# sample rate constants given current sample path (aggregate sample path to one array first)
reacts_seq = np.zeros((0,2))
for j in range(0,len(reacts)):
temp = reacts[j].copy()
temp[:,1] += T_arr[j] # adjust times since get_path samples times from [0,dT]
reacts_seq = np.vstack([reacts_seq,temp])
par = sample_rates(prior,pop_SS[0,:],reacts_seq,T_arr[-1],SMPL_LIM,SMPL_DENS)
# just for plotting the sampled trajectories
if plot==True:
traj = get_population_list(reacts_seq,pop_SS[0,:])
Acell = traj[:,0]
times = np.hstack([0,reacts_seq[:,1]])
plt.step(times,Acell,c='b',lw=0.5)
for l in range(1,len(T_arr)-1):
plt.vlines(T_arr[l],0,np.max(Acell),linestyle='--')
# generate new trajectories for intervals
reacts = []
for k in range(0,len(pop_SS)-1):
dT = T_arr[k+1]-T_arr[k]
pop0 = pop_SS[k,:]
pop1 = pop_SS[k+1,:]
# propose new trajectory given current rate constants
r = shuffle_reactions(pop0,pop1,S2,r_old[k],w)
# generate trajectories for interval
reacts_new = get_reaction_times_expPois(pop0,pop1,r,dT)
ratio_new = get_MH_ratio_expPois(pop0,pop1,reacts_new,par,r,dT)
# accept/reject it with MH-step
acc_prob = np.min([1,ratio_new/ratio_old[k]])
if np.random.rand()<acc_prob:
r_old[k] = r
ratio_old[k] = ratio_new
reacts_old[k] = reacts_new
reacts.append(reacts_old[k])
acc_prob_ls.append(acc_prob)
if i>=burn_in:
output_par = np.vstack([output_par,par])
output_traj.append(reacts_seq)
print('Finished. Average acceptance prob: ',np.mean(acc_prob_ls))
if plot==True:
plt.xlabel('t')
plt.ylabel(r'$N_{A}$')
plt.savefig(OUTPUTPATH+'traj_T'+str(T_arr)+'.png', bbox_inches="tight",dpi=300)
return [output_par,output_traj,np.mean(acc_prob_ls)]
# %% for Model data generation and CD Likelihood
def trajectory_X(par,N_init,T):
[L_aa,L_ab,L_bb] = par
[N_a,N_b] = N_init
rtype = np.zeros(1)
rtime = np.zeros(1)
while True:
# generate random reaction times
with np.errstate(divide='ignore', invalid='ignore'): # Zum filtern der unendlichen Werte
times = -np.log(np.random.rand(3))/np.array([N_a*L_aa, N_a*L_ab, N_a*L_bb])
times[np.logical_or(np.logical_or(times==np.inf, times==0), np.logical_or(times==-np.inf, np.isnan(times)))] = T+1
t_min = np.min(times)
rtime = np.append(rtime,rtime[-1]+t_min)
# A -> AA
if(t_min == times[0]):
N_a = N_a+1
N_b = N_b
rtype = np.append(rtype,0)
# A -> AB
elif(t_min == times[1]):
N_a = N_a
N_b = N_b+1
rtype = np.append(rtype,1)
# A -> BB
elif(t_min == times[2]):
N_a = N_a-1
N_b = N_b+2
rtype = np.append(rtype,2)
if rtime[-1]>T:
rtype = rtype[1:-1] # first remove last entries
rtime = rtime[1:-1]
break
return np.array([rtype,rtime]).transpose()
# calculate complete-data likelihood (func a bit different from Bayes_TS.py script)
def lhood_CD(p_data,t_data,T,SMPL_LIM): # c is parameter of interest, transition rate
lhd = np.zeros((0,SMPL_LIM*100))
lh_max = np.zeros(3)
integr = 0 # integral: int[0,T] N_A dt, where hazardf = c*N_A
for j in range(1,len(p_data)):
integr += p_data[j-1,0]*(p_data[j,2]-p_data[j-1,2])
integr += p_data[-1,0]*(T-p_data[-1,2])
for i in [0,1,2]: # loop over parameter
r_num = np.sum(t_data[:,0]==i) # number of reactions
temp = np.zeros(0)
print('Number of type ',i,' reactions: ',r_num)
print('Max likelihood for rate ',i,': ',r_num/integr)
for c in np.arange(0,SMPL_LIM,0.01):
temp = np.append(temp,(c**r_num)*np.exp(-c*integr))
lhd = np.append(lhd,[temp],axis=0)
lh_max[i] = r_num/integr
return [lhd,lh_max]
# Returns z-scores for MCMC convergence diagnostics. Converged chain: z should oscillate [-1,1]
# Ref: Geweke. Evaluating the accuracy of sampling-based approaches to calculating posterior moments.
def geweke(ls,first,last,nint):
intervals = np.split(ls,nint)
z = np.zeros(len(intervals))
for i in range(0,len(z)):
seq = intervals[i]
s_start = seq[:int(len(seq)*first)]
s_end = seq[int(len(seq)*last):]
z[i] = (np.mean(s_start)-np.mean(s_end))/np.sqrt(np.var(s_start)+np.var(s_end))
return z
# %% Testing shit with homogenous spacing
def plot_all(output_par,mA,OUTPUTPATH,SMPL_LIM,pop,par,traj,T,dT):
# Plot complete data Likelihood
x = np.arange(0,SMPL_LIM,0.01)
k0 = gaussian_kde(output_par[:,0])
k1 = gaussian_kde(output_par[:,1])
k2 = gaussian_kde(output_par[:,2])
v0_MC = simps((x**2)*k0(x),dx=0.01)-(simps(x*k0(x),dx=0.01))**2
v1_MC = simps((x**2)*k1(x),dx=0.01)-(simps(x*k1(x),dx=0.01))**2
v2_MC = simps((x**2)*k2(x),dx=0.01)-(simps(x*k2(x),dx=0.01))**2
# calculate Likelihood given Complete data
[lhd_CD,lhd_CD_max] = lhood_CD(pop,traj,T,SMPL_LIM)
lhd_CD[0,:] = lhd_CD[0,:]/simps(lhd_CD[0,:], dx=0.01) # normalise likelihoods
lhd_CD[1,:] = lhd_CD[1,:]/simps(lhd_CD[1,:], dx=0.01) # (L_func uses 0.01 as sampling density)
lhd_CD[2,:] = lhd_CD[2,:]/simps(lhd_CD[2,:], dx=0.01)
v0_CD = simps((x**2)*lhd_CD[0,:],dx=0.01)-(simps(x*lhd_CD[0,:],dx=0.01))**2
v1_CD = simps((x**2)*lhd_CD[1,:],dx=0.01)-(simps(x*lhd_CD[1,:],dx=0.01))**2
v2_CD = simps((x**2)*lhd_CD[2,:],dx=0.01)-(simps(x*lhd_CD[2,:],dx=0.01))**2
fig = plt.figure(figsize=(12,4))
fig.suptitle(r'Snapshot vs complete data: Parameter estimation, Time: [0,'+str(T)+'], Snapshots: '+str(int(T/dT)), fontsize=12,y=1.05)
ax = plt.subplot(131)
ax.set_title('Rel Bias: '+str(round(np.abs(lhd_CD_max[0]-x[np.argmax(k0(x))])/lhd_CD_max[0],2))+', Var ratio: '+str(round(v0_MC/v0_CD,2)))
ax.plot(x,lhd_CD[0,:],c='r')
ax.plot(x,k0(x))
ax.fill_between(x, k0(x), interpolate=True, color='blue',alpha=0.25)
plt.vlines(par[0],0,max(lhd_CD[0,:]))
if np.max(lhd_CD[0,:])>6*np.max(k0(x)):
ax.set_ylim(0,np.max(k0(x))*3)
ax.set_xlabel(r'$\lambda_{AA}$')
ax = plt.subplot(132)
ax.set_title('Rel Bias: '+str(round(np.abs(lhd_CD_max[1]-x[np.argmax(k1(x))])/lhd_CD_max[1],2))+', Var ratio: '+str(round(v1_MC/v1_CD,2)))
ax.plot(x,lhd_CD[1,:],c='r')
ax.plot(x,k1(x))
ax.fill_between(x, k1(x), interpolate=True, color='blue',alpha=0.25)
ax.vlines(par[1],0,max(lhd_CD[1,:]))
if np.max(lhd_CD[1,:])>6*np.max(k1(x)):
ax.set_ylim(0,np.max(k1(x))*3)
ax.set_xlabel(r'$\lambda_{AB}$')
ax = plt.subplot(133)
ax.set_title('Rel Bias: '+str(round(np.abs(lhd_CD_max[2]-x[np.argmax(k2(x))])/lhd_CD_max[2],2))+', Var ratio: '+str(round(v2_MC/v2_CD,2)))
ax.plot(x,lhd_CD[2,:],c='r')
ax.plot(x,k2(x))
ax.fill_between(x, k2(x), interpolate=True, color='blue',alpha=0.25)
ax.vlines(par[2],0,max(lhd_CD[2,:]))
if np.max(lhd_CD[2,:])>6*np.max(k2(x)):
ax.set_ylim(0,np.max(k2(x))*3)
ax.set_xlabel(r'$\lambda_{BB}$')
plt.tight_layout()
plt.savefig(OUTPUTPATH+'CDLH_T'+str(T)+'-dT'+str(dT)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
# Plot MCMC results
# Plot time series
fig = plt.figure(figsize=(10,10))
fig.suptitle(r'MCMC Parameter timeseries. Avrg Acceptance prob: '+str(int(1000*mA)/10)+'%', fontsize=12)
gs = GridSpec(3,4)
ax_joint = fig.add_subplot(gs[0,0:3])
ax_marg_y = fig.add_subplot(gs[0,3])
ax_joint.plot(output_par[:,0],lw=0.3)
ax_marg_y.hist(output_par[:,0],orientation="horizontal",bins=30)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_joint.get_xticklabels(), visible=False)
ax_joint.set_ylabel(r'$\lambda_{AA}$')
ax_joint = fig.add_subplot(gs[1,0:3])
ax_marg_y = fig.add_subplot(gs[1,3])
ax_joint.plot(output_par[:,1],lw=0.3)
ax_marg_y.hist(output_par[:,1],orientation="horizontal",bins=30)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_joint.get_xticklabels(), visible=False)
ax_joint.set_ylabel(r'$\lambda_{AB}$')
ax_joint = fig.add_subplot(gs[2,0:3])
ax_marg_y = fig.add_subplot(gs[2,3])
ax_joint.plot(output_par[:,2],lw=0.3)
ax_marg_y.hist(output_par[:,2],orientation="horizontal",bins=30)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_joint.set_xlabel('Iteration')
ax_joint.set_ylabel(r'$\lambda_{BB}$')
plt.savefig(OUTPUTPATH+'TS_T'+str(T)+'-dT'+str(dT)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
# Custom the inside plot: options are: “scatter” | “reg” | “resid” | “kde” | “hex”
# Gaussian KDE (kernel density estimate)
ax1 = sns.jointplot(x=output_par[:,0], y=output_par[:,1], kind='kde')
ax1.set_axis_labels(r'$\lambda_{AA}$', r'$\lambda_{AB}$', fontsize=16)
ax1.ax_joint.plot([par[0]],[par[1]],'ro')
ax1.ax_joint.plot([lhd_CD_max[0]],[lhd_CD_max[1]],'rx')
ax1.ax_marg_x.axvline(par[0], ls='--')
ax1.ax_marg_y.axhline(par[1], ls='--')
ax1.savefig(OUTPUTPATH+'MCLH12_T'+str(T)+'-dT'+str(dT)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
ax2 = sns.jointplot(x=output_par[:,0], y=output_par[:,2], kind='kde')
ax2.set_axis_labels(r'$\lambda_{AA}$', r'$\lambda_{BB}$', fontsize=16)
ax2.ax_joint.plot([par[0]],[par[2]],'ro')
ax2.ax_joint.plot([lhd_CD_max[0]],[lhd_CD_max[2]],'rx')
ax2.ax_marg_x.axvline(par[0], ls='--')
ax2.ax_marg_y.axhline(par[2], ls='--')
ax2.savefig(OUTPUTPATH+'MCLH13_T'+str(T)+'-dT'+str(dT)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
ax3 = sns.jointplot(x=output_par[:,1], y=output_par[:,2], kind='kde')
ax3.set_axis_labels(r'$\lambda_{AB}$', r'$\lambda_{BB}$', fontsize=16)
ax3.ax_joint.plot([par[1]],[par[2]],'ro')
ax3.ax_joint.plot([lhd_CD_max[1]],[lhd_CD_max[2]],'rx')
ax3.ax_marg_x.axvline(par[1], ls='--')
ax3.ax_marg_y.axhline(par[2], ls='--')
ax3.savefig(OUTPUTPATH+'MCLH23_T'+str(T)+'-dT'+str(dT)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
return [round(v0_MC/v0_CD,2),round(v1_MC/v1_CD,2),round(v2_MC/v2_CD,2)]
def plot_all_inhom(output_par,mA,OUTPUTPATH,SMPL_LIM,pop,par,traj,T_arr):
# Plot complete data Likelihood
x = np.arange(0,SMPL_LIM,0.01)
k0 = gaussian_kde(output_par[:,0])
k1 = gaussian_kde(output_par[:,1])
k2 = gaussian_kde(output_par[:,2])
v0_MC = simps((x**2)*k0(x),dx=0.01)-(simps(x*k0(x),dx=0.01))**2
v1_MC = simps((x**2)*k1(x),dx=0.01)-(simps(x*k1(x),dx=0.01))**2
v2_MC = simps((x**2)*k2(x),dx=0.01)-(simps(x*k2(x),dx=0.01))**2
# calculate Likelihood given Complete data
[lhd_CD,lhd_CD_max] = lhood_CD(pop,traj,T_arr[-1],SMPL_LIM)
lhd_CD[0,:] = lhd_CD[0,:]/simps(lhd_CD[0,:], dx=0.01) # normalise likelihoods
lhd_CD[1,:] = lhd_CD[1,:]/simps(lhd_CD[1,:], dx=0.01) # (L_func uses 0.01 as sampling density)
lhd_CD[2,:] = lhd_CD[2,:]/simps(lhd_CD[2,:], dx=0.01)
v0_CD = simps((x**2)*lhd_CD[0,:],dx=0.01)-(simps(x*lhd_CD[0,:],dx=0.01))**2
v1_CD = simps((x**2)*lhd_CD[1,:],dx=0.01)-(simps(x*lhd_CD[1,:],dx=0.01))**2
v2_CD = simps((x**2)*lhd_CD[2,:],dx=0.01)-(simps(x*lhd_CD[2,:],dx=0.01))**2
fig = plt.figure(figsize=(12,4))
fig.suptitle(r'Snapshot vs complete data: Parameter estimation, Snapshots: '+str(T_arr), fontsize=12,y=1.05)
ax = plt.subplot(131)
ax.set_title('Rel Bias: '+str(round(np.abs(lhd_CD_max[0]-x[np.argmax(k0(x))])/lhd_CD_max[0],2))+', Var ratio: '+str(round(v0_MC/v0_CD,2)))
ax.plot(x,lhd_CD[0,:],c='r')
ax.plot(x,k0(x))
ax.fill_between(x, k0(x), interpolate=True, color='blue',alpha=0.25)
plt.vlines(par[0],0,max(lhd_CD[0,:]))
if np.max(lhd_CD[0,:])>6*np.max(k0(x)):
ax.set_ylim(0,np.max(k0(x))*3)
ax.set_xlabel(r'$\lambda_{AA}$')
ax = plt.subplot(132)
ax.set_title('Rel Bias: '+str(round(np.abs(lhd_CD_max[1]-x[np.argmax(k1(x))])/lhd_CD_max[1],2))+', Var ratio: '+str(round(v1_MC/v1_CD,2)))
ax.plot(x,lhd_CD[1,:],c='r')
ax.plot(x,k1(x))
ax.fill_between(x, k1(x), interpolate=True, color='blue',alpha=0.25)
ax.vlines(par[1],0,max(lhd_CD[1,:]))
if np.max(lhd_CD[1,:])>6*np.max(k1(x)):
ax.set_ylim(0,np.max(k1(x))*3)
ax.set_xlabel(r'$\lambda_{AB}$')
ax = plt.subplot(133)
ax.set_title('Rel Bias: '+str(round(np.abs(lhd_CD_max[2]-x[np.argmax(k2(x))])/lhd_CD_max[2],2))+', Var ratio: '+str(round(v2_MC/v2_CD,2)))
ax.plot(x,lhd_CD[2,:],c='r')
ax.plot(x,k2(x))
ax.fill_between(x, k2(x), interpolate=True, color='blue',alpha=0.25)
ax.vlines(par[2],0,max(lhd_CD[2,:]))
if np.max(lhd_CD[2,:])>6*np.max(k2(x)):
ax.set_ylim(0,np.max(k2(x))*3)
ax.set_xlabel(r'$\lambda_{BB}$')
plt.tight_layout()
plt.savefig(OUTPUTPATH+'CDLH_T'+str(T_arr)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
# Plot MCMC results
# Plot time series
fig = plt.figure(figsize=(10,10))
fig.suptitle(r'MCMC Parameter timeseries. Avrg Acceptance prob: '+str(int(1000*mA)/10)+'%', fontsize=12)
gs = GridSpec(3,4)
ax_joint = fig.add_subplot(gs[0,0:3])
ax_marg_y = fig.add_subplot(gs[0,3])
ax_joint.plot(output_par[:,0],lw=0.3)
ax_marg_y.hist(output_par[:,0],orientation="horizontal",bins=30)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_joint.get_xticklabels(), visible=False)
ax_joint.set_ylabel(r'$\lambda_{AA}$')
ax_joint = fig.add_subplot(gs[1,0:3])
ax_marg_y = fig.add_subplot(gs[1,3])
ax_joint.plot(output_par[:,1],lw=0.3)
ax_marg_y.hist(output_par[:,1],orientation="horizontal",bins=30)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_joint.get_xticklabels(), visible=False)
ax_joint.set_ylabel(r'$\lambda_{AB}$')
ax_joint = fig.add_subplot(gs[2,0:3])
ax_marg_y = fig.add_subplot(gs[2,3])
ax_joint.plot(output_par[:,2],lw=0.3)
ax_marg_y.hist(output_par[:,2],orientation="horizontal",bins=30)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_joint.set_xlabel('Iteration')
ax_joint.set_ylabel(r'$\lambda_{BB}$')
plt.savefig(OUTPUTPATH+'TS_T'+str(T_arr)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
# Custom the inside plot: options are: “scatter” | “reg” | “resid” | “kde” | “hex”
# Gaussian KDE (kernel density estimate)
ax1 = sns.jointplot(x=output_par[:,0], y=output_par[:,1], kind='kde')
ax1.set_axis_labels(r'$\lambda_{AA}$', r'$\lambda_{AB}$', fontsize=16)
ax1.ax_joint.plot([par[0]],[par[1]],'ro')
ax1.ax_joint.plot([lhd_CD_max[0]],[lhd_CD_max[1]],'rx')
ax1.ax_marg_x.axvline(par[0], ls='--')
ax1.ax_marg_y.axhline(par[1], ls='--')
ax1.savefig(OUTPUTPATH+'MCLH12_T'+str(T_arr)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
ax2 = sns.jointplot(x=output_par[:,0], y=output_par[:,2], kind='kde')
ax2.set_axis_labels(r'$\lambda_{AA}$', r'$\lambda_{BB}$', fontsize=16)
ax2.ax_joint.plot([par[0]],[par[2]],'ro')
ax2.ax_joint.plot([lhd_CD_max[0]],[lhd_CD_max[2]],'rx')
ax2.ax_marg_x.axvline(par[0], ls='--')
ax2.ax_marg_y.axhline(par[2], ls='--')
ax2.savefig(OUTPUTPATH+'MCLH13_T'+str(T_arr)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
ax3 = sns.jointplot(x=output_par[:,1], y=output_par[:,2], kind='kde')
ax3.set_axis_labels(r'$\lambda_{AB}$', r'$\lambda_{BB}$', fontsize=16)
ax3.ax_joint.plot([par[1]],[par[2]],'ro')
ax3.ax_joint.plot([lhd_CD_max[1]],[lhd_CD_max[2]],'rx')
ax3.ax_marg_x.axvline(par[1], ls='--')
ax3.ax_marg_y.axhline(par[2], ls='--')
ax3.savefig(OUTPUTPATH+'MCLH23_T'+str(T_arr)+'_aa'+str(par[0])+'_ab'+str(par[1])+'_bb'+str(par[2])+'.png', bbox_inches="tight",dpi=300)
return [round(v0_MC/v0_CD,2),round(v1_MC/v1_CD,2),round(v2_MC/v2_CD,2)]
|
988,562 | 3b653f4b5c4a6ccf060572f2a8f314c72306a5a0 | import requests
from spellchecker import SpellChecker
def escape_meme_text(text):
"""
Replaces special characters in text for use with the
memegen.link API
"""
replacements = {
" ": "_",
"?": "~q",
"%": "~p",
"#": "~h",
"/": "~s",
"''": "\"",
}
for r in replacements.keys():
text = text.replace(r, replacements[r])
return text
def generate_meme(top_text, bottom_text, meme_type):
top_text = escape_meme_text(top_text)
bottom_text = escape_meme_text(bottom_text)
url = f"https://memegen.link/{meme_type}/{top_text}/{bottom_text}.jpg"
res = requests.get(url)
return res.content
def correct_spelling(text):
spell_checker = SpellChecker()
return " ".join([spell_checker.correction(w) for w in text.split(" ")])
|
988,563 | fb58f9af5f0c7f5b03cb4465ec9c19b4784a690a | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findTarget(self, root: TreeNode, k: int):
if not root:
return
leftNode, rightNode = root.left, root.right
l1, l2 = [root], [root]
left, right = True, True
while True:
if left:
while leftNode or len(l1) > 0 or leftNode != rightNode:
if not leftNode:
leftNode = l1.pop()
break
else:
l1.append(leftNode)
leftNode = leftNode.left
if right:
while rightNode or len(l2) > 0 or leftNode != rightNode:
if not leftNode:
rightNode = l2.pop()
break
else:
l2.append(rightNode)
rightNode = rightNode.right
if leftNode == rightNode:
break
if leftNode.val + rightNode.val == k:
return True
elif leftNode.val + rightNode.val < k:
leftNode = leftNode.right
left, right = True, False
else:
rightNode = rightNode.left
left, right = False, True
return False
|
988,564 | a5f08c6d7fb0a7baee13d646e45846a601a86130 | # https://matplotlib.org/gallery/user_interfaces/embedding_in_gtk3_sgskip.html
# Though a FigureCanvasGTK3Agg is used, the default renderer is still TkAgg
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import matplotlib as mpl
mpl.use('GTK3Agg')
print(mpl.rcParams['backend'])
from matplotlib.backends.backend_gtk3agg import (
FigureCanvasGTK3Agg as FigureCanvas)
from matplotlib.figure import Figure
import numpy as np
win = Gtk.Window()
win.connect("delete-event", Gtk.main_quit)
win.set_default_size(400, 300)
win.set_title("Embedding in GTK")
f = Figure(figsize=(5, 4), dpi=100)
a = f.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2*np.pi*t)
a.plot(t, s)
sw = Gtk.ScrolledWindow()
win.add(sw)
# A scrolled window border goes outside the scrollbars and viewport
sw.set_border_width(10)
canvas = FigureCanvas(f) # a Gtk.DrawingArea
canvas.set_size_request(800, 600)
# win.add(canvas)
sw.add_with_viewport(canvas)
win.show_all()
Gtk.main()
|
988,565 | 07ceb5ebdb77a4b62a717d3079c23829c574ef44 | CMA = input("input :")
cma = CMA.replace(",", "")
print(int(cma) + 100)
|
988,566 | 828ddbd8893ae2b38aaa2b9386fb7b33cd19351b | #!/usr/bin/python3
# stefan (dot) huber (at) stusta (dot) de
# http://adventofcode.com/2017/day/10
def getInput(input):
result = ""
with open(input, 'r') as inputfile:
for line in inputfile:
result = result + line.strip()
return result
def inputPart1(inpt):
result = []
for number in inpt.split(','):
result.append(int(number))
return result
def inputPart2(inpt):
result = []
for char in inpt:
result.append(ord(char))
# now add fixed lengths to the sequence
for number in [17, 31, 73, 47, 23]:
result.append(number)
return result
def reverseSublist(lst, start, end):
for i in range(int((end - start + 1)/2)):
tmp = lst[(start + i) % len(lst)]
lst[(start + i) % len(lst)] = lst[(end - i) % len(lst)]
lst[(end - i) % len(lst)] = tmp
return lst
def sparseHash(size, lengths, rounds):
pos = 0 # current pointer position
skipSize = 0 # current skipSize
result = 0 # result is numbers[0] * numbers[1]
numbers = [] # list of numbers
for i in range(size):
numbers.append(i)
for j in range(rounds):
for i in range(len(lengths)):
length = lengths[i]
# reverse numbers[pos:pos+length]
start = pos
end = (pos + length - 1)
numbers = reverseSublist(numbers, start, end)
pos = pos + length + skipSize
skipSize = skipSize + 1
return numbers
# to check the process, you can multiply the first tho numbers.
def hashPart1(lst):
return lst[0] * lst[1]
# Once the rounds are complete, you will be left with the numbers from 0 to 255
# in some order, called the sparse hash. Your next task is to reduce these to a
# list of only 16 numbers called the dense hash. To do this, use numeric
# bitwise XOR to combine each consecutive block of 16 numbers in the sparse
# hash (there are 16 such blocks in a list of 256 numbers). So, the first
# element in the dense hash is the first sixteen elements of the sparse hash
# XOR'd together, the second element in the dense hash is the second sixteen
# elements of the sparse hash XOR'd together, etc.
def densifyHash(sparse_hash, factor):
result = []
for i in range(int(len(sparse_hash) / factor)):
tmp = 0
for j in range(factor):
if j == 0:
tmp = sparse_hash[(i*factor)+j]
else:
tmp = tmp ^ sparse_hash[(i*factor)+j]
result.append(tmp)
return result
def hexOfHash(dense_hash):
result = ""
for i in range(len(dense_hash)):
number = dense_hash[i]
result = result + ("%0.2x" % number)
return result
def main():
inpt = getInput("input")
inptPart1 = inputPart1(inpt)
sparse_hash = sparseHash(256, inptPart1, 1)
print("knot hash for part 1:", hashPart1(sparse_hash))
inptPart2 = inputPart2(inpt)
sparse_hash = sparseHash(256, inptPart2, 64)
dense_hash = densifyHash(sparse_hash, 16)
hash_representation = hexOfHash(dense_hash)
print("knot hash for part 2: %s"% hash_representation)
if __name__ == "__main__":
main()
|
988,567 | 47139fdfc1a36cf503880de89291af90d12f23a0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 BroadTech IT Solutions.
# (http://wwww.broadtech-innovations.com)
# contact@boradtech-innovations.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import models, fields
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED
import time
class pos_order(models.Model):
_inherit = 'pos.order'
return_order = fields.Boolean('Returned', readonly=True, help='To identify the order is returned or not!')
def create(self, cr, user, vals, context=None):
for val in vals.get('lines'):
for key in val:
if isinstance(key, dict):
order_id = key.get('order_id')
if order_id:
refund_reference = self.browse(cr, user, order_id, context).pos_reference
if refund_reference:
vals.update({'pos_reference': 'Refund'+' '+refund_reference,
'return_order': True})
cr._cnx.set_isolation_level(ISOLATION_LEVEL_READ_COMMITTED)
return super(pos_order, self).create(cr, user, vals, context)
def _order_fields(self, cr, uid, ui_order, context=None):
fields = {
'name': ui_order['name'],
'user_id': ui_order['user_id'] or False,
'session_id': ui_order['pos_session_id'],
'lines': ui_order['lines'],
'pos_reference':ui_order['name'],
'partner_id': ui_order['partner_id'] or False,
}
if ui_order['return_status'] == 'active':
fields.update({'return_order': ui_order['return_order']})
return fields
def create_from_ui(self, cr, uid, orders, context=None):
# Keep only new orders
submitted_references = [o['data']['name'] for o in orders]
existing_order_ids = self.search(cr, uid, [('pos_reference', 'in', submitted_references)], context=context)
existing_orders = self.read(cr, uid, existing_order_ids, ['pos_reference'], context=context)
existing_references = set([o['pos_reference'] for o in existing_orders])
orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]
order_ids = []
for tmp_order in orders_to_save:
to_invoice = tmp_order['to_invoice']
order = tmp_order['data']
if order['return_status'] == 'active':
order.update({'return_order': True})
order_id = self._process_order(cr, uid, order, context=context)
order_ids.append(order_id)
try:
self.signal_workflow(cr, uid, [order_id], 'paid')
except Exception as e:
_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))
if to_invoice:
self.action_invoice(cr, uid, [order_id], context)
order_obj = self.browse(cr, uid, order_id, context)
self.pool['account.invoice'].signal_workflow(cr, uid, [order_obj.invoice_id.id], 'invoice_open')
return order_ids
pos_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
988,568 | d4898bb5b8c54dffba660e6cae8b36789f13f3e7 |
# coding: utf-8
# In[185]:
# bulk of Dependencies
import tweepy
import base64
import pandas as pd
import requests
import time
import json
import dateutil.parser as parser
# API Keys
from config import (consumer_key,
consumer_secret,
access_token,
access_token_secret)
from config2 import geo_api_key
from flask import Flask, jsonify, render_template
import pymongo
# In[186]:
#set up connection to mlab
import pymongo
app = Flask(__name__)
DB_NAME = 'manufacture_consent'
DB_HOST = 'ds255403.mlab.com'
DB_PORT = 55403
DB_USER = 'edwardwisejr'
DB_PASS = 'Flender77!'
connection = pymongo.MongoClient(DB_HOST, DB_PORT)
db = connection[DB_NAME]
db.authenticate(DB_USER, DB_PASS)
# In[76]:
# get bearer token for twitter api
bearer_token_credentials = base64.urlsafe_b64encode(
'{}:{}'.format(consumer_key, consumer_secret).encode('ascii')).decode('ascii')
url = 'https://api.twitter.com/oauth2/token'
headers = {
'Authorization': 'Basic {}'.format(bearer_token_credentials),
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
data = 'grant_type=client_credentials'
response = requests.post(url, headers=headers, data=data)
response_data = response.json()
if response_data['token_type'] == 'bearer':
bearer_token = response_data['access_token']
else:
raise RuntimeError('unexpected token type: {}'.format(response_data['token_type']))
# In[95]:
#fromDate= '201811210000'
#toDate= '201811220000'
endpoint = "https://api.twitter.com/1.1/tweets/search/fullarchive/dev.json"
headers = {"Authorization":'Bearer {}'.format(bearer_token), "Content-Type": "application/json"}
data ='{"query":"drain swamp place:wisconsin", "fromDate": "201602010000", "toDate": "201602280000"}'
#'{"query":"clinton email place:wisconsin", "fromDate": "201811210000", "toDate": "201811220000"}'
#''{query:{} place:wisconsin, fromDate: {}, toDate: {}}".format(quer, fromDate, toDate)
public_tweets = requests.post(endpoint,data=data,headers=headers).json()
# In[108]:
endpoint = "https://api.twitter.com/1.1/tweets/search/fullarchive/dev.json"
headers = {"Authorization":'Bearer {}'.format(bearer_token), "Content-Type": "application/json"}
data ='{"query":"economy place:wisconsin", "fromDate": "201611010000", "toDate": "201611300000"}'
#'{"query":"clinton email place:wisconsin", "fromDate": "201811210000", "toDate": "2018112320000"}'
#''{query:{} place:wisconsin, fromDate: {}, toDate: {}}".format(quer, fromDate, toDate)
public_tweets = requests.post(endpoint,data=data,headers=headers).json()
locations_collection = db['mc_locations']
locations = locations_collection.find({}, {"location": 1, "_id": 0}).distinct("location")
query = "economy"
new_locations=[]
events = {}
tweets_collection = db['mc_tweets']
import pprint as pp
for tweet in public_tweets['results']:
if tweet['user']['location'] not in locations:
new_locations.append(tweet['user']['location'])
if 'retweeted_status' not in tweet:
if 'extended_tweet' in tweet:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['extended_tweet']['full_text']
,"location":tweet['user']['location']
,"retweet_location": None
,"retweet_user": None
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
else:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['text']
,"location":tweet['user']['location']
,"retweet_location": None
,"retweet_user": None
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
else:
if 'extended_tweet' in tweet:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['extended_tweet']['full_text']
,"location":tweet['user']['location']
,"retweet_location":tweet['retweeted_status']['user']['location']
,"retweet_user":tweet['retweeted_status']['user']['name']
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
else:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['text']
,"location":tweet['user']['location']
,"retweet_location":tweet['retweeted_status']['user']['location']
,"retweet_user":tweet['retweeted_status']['user']['name']
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
geojson = []
place=[]
places={}
for location in new_locations:
geocode_url = "https://maps.googleapis.com/maps/api/geocode/json?address={}".format(location)
geocode_url = geocode_url + "&key={}".format(geo_api_key)
results = requests.get(geocode_url)
results = results.json()
for place in results['results']:
if len(place) > 0:
spot={
"location": location
,"lat": str(place['geometry']['location']['lat'])
,"long": str(place['geometry']['location']['lng'])
,"address": place['formatted_address']}
post_id = locations_collection.insert_one(spot).inserted_id
# In[97]:
for location in new_locations:
geocode_url = "https://maps.googleapis.com/maps/api/geocode/json?address={}".format(location)
geocode_url = geocode_url + "&key={}".format(geo_api_key)
results = requests.get(geocode_url)
results = results.json()
for place in results['results']:
if len(place) > 0:
spot={
"location": location
,"lat": str(place['geometry']['location']['lat'])
,"long": str(place['geometry']['location']['lng'])
,"address": place['formatted_address']}
post_id = locations_collection.insert_one(spot).inserted_id
# In[1]:
#records = json.loads(df.T.to_json()).values()
#db.myCollection.insert(records)
tweets_df=pd.DataFrame(list(tweets_collection.find({})))
tweets_df
# In[115]:
places_df = pd.DataFrame(list(locations_collection.find({})))
locations_collection = db['mc_locations']
locations = locations_collection.find({}, {"location": 1, "_id": 0}).distinct("location")
query = "wall/immigration"
new_locations=[]
events = {}
tweets_collection = db['mc_tweets']
# In[190]:
locations_collection = db['mc_locations']
locations_df = pd.DataFrame(list(locations_collection.find({})))
tweets_collection=db['mc_tweets']
tweets_df =pd.DataFrame(list(tweets_collection.find({})))
tweets_to_plot_df = pd.merge(tweets_df,places_df,on="location",how="left")
tweets_to_plot_df = tweets_to_plot_df[['buzz_word','date','source','lat','long']]
#tweets_to_plot_json = tweets_to_plot_df.to_json(orient='index')
# with open('data.txt', 'w') as outfile:
# json.dump(tweets_to_plot_json , outfile)
tweets_to_plot_df.to_csv(test.csv)
# In[118]:
records = json.loads(tweets_to_plot_df.T.to_json()).values()
db.mc_tweets_to_plot.insert(records)
# In[36]:
#tweets_collection = db['mc_tweets']tweets_df
tweets_collection = db['mc_tweets']
tweets=pd.DataFrame(list(tweets_collection.find({})))
tweets['name'] = tweets['name'] .apply(lambda x: ''.join([" " if ord(i) < 32 or ord(i) > 126 else i for i in x]))
tweets['text'] = tweets['text'] .apply(lambda x: ''.join([" " if ord(i) < 32 or ord(i) > 126 else i for i in x]))
tweets = tweets[['buzz_word','text','date','location','name','retweet_location','retweet_user','screen_name','source','year']]
# In[37]:
records = json.loads(tweets.to_json()).values()
# In[50]:
db.mc_test.insert(records)
# In[156]:
#test =db.mc_test.find(filter={"location": "Massachusetts"})
tweets_collection=db['mc_tweets']
#tweets_collection.update_many({"buzz_word":"wall/immigration"},{ '$set': { "buzz_word": "wall or immigration" }})
tweets_df =pd.DataFrame(list(tweets_collection.find({"screen_name":"wxow"})))
tweets_df
# In[ ]:
"@kbjr6news" OR "@WAOW" OR "@MeTV" OR "@nbc26" OR "@fox6now" OR "@WKOW" OR "@wqow" OR "@tmj4" OR "@tbn" OR "@wxow"
# In[157]:
#endpoint2 = "https://api.twitter.com/1.1/tweets/search/30day/dev.json"
headers = {"Authorization":'Bearer {}'.format(bearer_token), "Content-Type": "application/json"}
data ='{"query":"@kbjr6news", "fromDate": "201611010000", "toDate": "201611300000"}'
#'{"query":"clinton email place:wisconsin", "fromDate": "201811210000", "toDate": "2018112320000"}'
#''{query:{} place:wisconsin, fromDate: {}, toDate: {}}".format(quer, fromDate, toDate)
public_tweets = requests.post(endpoint,data=data,headers=headers).json()
public_tweets
# In[ ]:
public_tweets
# In[169]:
endpoint = "https://api.twitter.com/1.1/tweets/search/fullarchive/dev.json"
headers = {"Authorization":'Bearer {}'.format(bearer_token), "Content-Type": "application/json"}
data ='{"query":"economy from:kbjr6news", "fromDate": "201601010000", "toDate": "201611300000"}'
#'{"query":"clinton email place:wisconsin", "fromDate": "201811210000", "toDate": "2018112320000"}'
#''{query:{} place:wisconsin, fromDate: {}, toDate: {}}".format(quer, fromDate, toDate)
public_tweets = requests.post(endpoint,data=data,headers=headers).json()
locations_collection = db['mc_locations']
locations = locations_collection.find({}, {"location": 1, "_id": 0}).distinct("location")
query = "clinton email or hillary email"
new_locations=[]
events = {}
tweets_collection = db['mc_test']
import pprint as pp
for tweet in public_tweets['results']:
if tweet['user']['location'] not in locations:
new_locations.append(tweet['user']['location'])
if 'retweeted_status' not in tweet:
if 'extended_tweet' in tweet:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['extended_tweet']['full_text']
,"location":tweet['user']['location']
,"retweet_location": None
,"retweet_user": None
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
else:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['text']
,"location":tweet['user']['location']
,"retweet_location": None
,"retweet_user": None
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
else:
if 'extended_tweet' in tweet:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['extended_tweet']['full_text']
,"location":tweet['user']['location']
,"retweet_location":tweet['retweeted_status']['user']['location']
,"retweet_user":tweet['retweeted_status']['user']['name']
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
else:
stuff ={
"buzz_word": query
, "source": "Public"
,"date":time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
, "year":parser.parse(time.strftime('%Y-%m-%d %H:%M:%S',time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))).year
,"name":tweet['user']['name']
,"screen_name":tweet['user']['screen_name']
,"text":tweet['text']
,"location":tweet['user']['location']
,"retweet_location":tweet['retweeted_status']['user']['location']
,"retweet_user":tweet['retweeted_status']['user']['name']
#,"place": tweet['place']['name']
}
events[tweet['id']]=stuff
post_id = tweets_collection.insert_one(stuff).inserted_id
geojson = []
place=[]
places={}
stuff
# In[170]:
public_tweets
# In[172]:
import plotly as py
import plotly.graph_objs as go
import pandas as pd
import numpy as np
N = 40
x = np.linspace(0, 1, N)
y = np.random.randn(N)
df = pd.DataFrame({'x': x, 'y': y})
df.head()
data = [
go.Bar(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y']
)
]
# IPython notebook
# py.iplot(data, filename='pandas-bar-chart')
url = py.plot(data, filename='pandas-bar-chart')
# In[197]:
data=db['mc_tweets_to_plot']
data_df =pd.DataFrame(list(data.find({})))
#tweets_to_plot_json = tweets_to_plot_df.to_json(orient='index')
# with open('data.txt', 'w') as outfile:
# json.dump(tweets_to_plot_json , outfile)
data_df.to_csv('twitterdata.csv')
|
988,569 | 92d14c1c38c3fc2b5d6327591583c12fad1dd0ed | #!/usr/bin/env python
# Step 1. Generate phonetic name
# Step 2. Figure out how to spell it.
#//**********************************************************************
#//
#// Name Generator
#//
#// C - starting consonant
#// V - vowel
#// D - multi-consant
#// c - end consonant
#// d - ending multi-consonant
#//
#//**********************************************************************
nameTypes = WeightedTuple( [
"Cv", 1,
"CVc", 1,
"CVcCVc", 1,
"CVcCVd", 1,
"CVcDVc", 1,
"CVcDVd", 1,
"CVCv", 1,
"CVCVc", 1,
"CVCVCv", 1,
"CVCVd", 1,
"CVd", 1,
"DVcCVc", 1,
"DVcCVd", 1,
"DVcDVc", 1,
"DVcDVd", 1,
"DVd", 1,
"Vc", 1,
"VCCv", 1,
"VCv", 1,
"VCVc", 1,
"VCVCv", 1,
"VCVDv", 1,
"Vd", 1,
"VDVc", 1,
"VDVCv", 1,
"VDVDv", 1,
] )
startingConsonant = WeightedTuple( [
"B", 1,
"CH", 2,
"D", 2,
"F", 1,
"G", 2,
"J", 1,
"K", 1,
"L", 3,
"M", 3,
"N", 3,
"P", 2,
"R", 4,
"S", 4,
"SH", 2,
"T", 4,
"TH", 3,
"V", 2,
"W", 1,
"Y", 1,
"Z", 1,
] )
startingMultiConsonant = WeightedTuple( [
"BL", 1,
"BR", 1,
"DR", 1,
"FL", 1,
"FR", 1,
"GL", 1,
"GR", 1,
"KL", 1,
"KR", 1,
"PL", 1,
"PR", 1,
"SK", 1,
"SKL", 1,
"SKR", 1,
"ST", 1,
"STR", 1,
"THR", 1,
"TR", 1,
] )
vowel = WeightedTuple( [
"A", 2,
"E", 2,
"I", 1,
"O", 3,
"U", 1,
"a", 3,
"e", 4,
"i", 3,
"o", 2,
"u", 2,
"au", 1,
] )
endingConsonant = WeightedTuple( [
"B", 1,
"CH", 1,
"D", 2,
"F", 1,
"G", 2,
"J", 1,
"K", 2,
"L", 3,
"M", 3,
"N", 3,
"P", 1,
"R", 4,
"S", 4,
"SH", 2,
"T", 4,
"TH", 4,
"V", 2,
"Z", 1,
] )
endingMultiConsonant = WeightedTuple( [
"LB", 1,
"LCH", 2,
"LD", 3,
"LG", 1,
"LJ", 1,
"LK", 2,
"LM", 2,
"LN", 2,
"LP", 2,
"LS", 2,
"LSH", 2,
"LST", 2,
"LT", 3,
"LTH", 2,
"NCH", 2,
"ND", 3,
"NG", 3,
"NJ", 2,
"NK", 2,
"NS", 2,
"NSH", 1,
"NST", 2,
"NT", 5,
"NTH", 2,
"RCH", 2,
"RD", 1,
"RG", 2,
"RJ", 1,
"RLB", 1,
"RLD", 1,
"RLG", 1,
"RLJ", 1,
"RLK", 1,
"RLM", 1,
"RLN", 1,
"RLSH", 1,
"RLST", 1,
"RLTH", 1,
"RM", 1,
"RN", 2,
"RP", 2,
"RS", 2,
"RSH", 1,
"RST", 2,
"RT", 2,
"RTH", 2,
"SHK", 1,
"SHT", 1,
"SK", 2,
"ST", 5,
] )
|
988,570 | 4d4f228e15a7a28bb8ed70e514f36bae6e2a4c0b | mystring = 'hello'
mylist = [letter for letter in mystring]
print(mylist)
mylist = [num**2 for num in range(0,11)]
print(mylist)
mylist = [x for x in range(0,11) if x%2 == 0]
print(mylist)
celcius = [0,10,23,34]
fahrenheit = [( (9/5)*temp +32) for temp in celcius]
print(fahrenheit)
results = [x if x%2==0 else 'ODD' for x in range(0,11)]
print(results)
#nested loop
mylist = []
for x in [2,4,6]:
for y in [100,200,300]:
mylist.append(x*y)
print(mylist)
mylist = [x*y for x in [2,4,6] for y in [1,10,100]]
print(mylist) |
988,571 | 08791666fdebb10c7eca5623b527851811d1bee9 | """Return order of pop, push ops to make given arr
:input:
8
4
3
6
8
7
5
2
1
:return:
+
+
+
+
-
-
+
+
-
+
+
-
-
-
-
-
url: https://www.acmicpc.net/problem/1874
"""
import sys
sys.setrecursionlimit(10 ** 6)
class Stack:
def __init__(self, arr=[]):
self._stack = list(arr)
self._len = len(self._stack)
def pop(self):
if self._len == 0:
raise ValueError("Stack is empty")
v = self._stack.pop()
self._len -= 1
return v
def push(self, v):
self._stack.append(v)
self._len += 1
def is_empty(self):
return self._len == 0
def calc_order(arr):
N = len(arr)
orders = []
stack = Stack()
possible = True
def generate(i, now):
nonlocal possible
if i == N:
return
if now <= arr[i]:
while now <= arr[i]:
stack.push(now)
orders.append('+')
now += 1
stack.pop()
orders.append('-')
generate(i+1, now)
else:
popped = stack.pop()
orders.append('-')
if popped != arr[i]:
possible = False
return
else:
generate(i+1, now)
generate(0, 1)
return (possible, orders) if possible else (possible, [])
if __name__ == '__main__':
N = int(input())
arr = []
for _ in range(N):
arr.append(int(input()))
possible, ans = calc_order(arr)
if possible:
for n in ans:
print(n)
else:
print('NO')
|
988,572 | 25f5dfefd652623046e4e8c91d703a318d0f48a0 | import sys
import collections
tests = int(input())
while(tests > 0):
size = int(input())
inEvents = list(map(int, input().split()))
outEvents = list(map(int, input().split()))
inC = collections.Counter(inEvents)
oC = collections.Counter(outEvents)
inEvents = set(inEvents)
outEvents = set(outEvents)
totalEs = inEvents.union(outEvents)
totalEs = list(totalEs)
totalEs.sort()
mGuest = 0
guestCur = 0
mTime = 0
for event in totalEs:
if(event in inC.keys()):
guestCur += inC[event]
if(guestCur > mGuest):
mGuest = guestCur
mTime = event
if(event in oC.keys()):
guestCur -= oC[event]
print(mGuest, mTime)
tests -= 1
|
988,573 | e25023ba806b46a00ba95ac6fa220e17c3764496 | from django.db import models
from django.db.models.constraints import UniqueConstraint
from django.contrib.postgres.fields import ArrayField
from django.db.models.fields import BigIntegerField
class SmsUser(models.Model):
email = models.EmailField(max_length=100, unique=True)
password = models.CharField(max_length=50)
is_admin = models.BooleanField(default=False, blank=True)
sms_count = models.BigIntegerField()
def __str__(self):
return self.email
class SmsLogModel(models.Model):
user_id = models.UUIDField(unique=True)
message = ArrayField(ArrayField(models.CharField(max_length=100)))
count = models.IntegerField(null=True)
contact_no = ArrayField(BigIntegerField(models.BigIntegerField()))
def __str__(self):
return self.user_id
|
988,574 | a40ac6dde22875f9ab8a95156ff10fcca0827ddd | # 请定义一个队列并实现函数 max_value 得到队列里的最大值,要求函数max_value、push_back 和 pop_front 的均摊时间复杂度都是O(1)。
#
# 若队列为空,pop_front 和 max_value 需要返回 -1
#
# 示例 1:
#
# 输入:
# ["MaxQueue","push_back","push_back","max_value","pop_front","max_value"]
# [[],[1],[2],[],[],[]]
# 输出: [null,null,null,2,1,2]
# 示例 2:
#
# 输入:
# ["MaxQueue","pop_front","max_value"]
# [[],[],[]]
# 输出: [null,-1,-1]
from collections import deque
class MaxQueue:
def __init__(self):
self.queue = deque() # 也可以用列表模拟,但是出队pop(0)是O(n)复杂度
self.max_queue = deque() # 用另一个双端队列,存储最大值,保证队头永远是当前的最大值,即单调递减队列
def max_value(self) -> int:
return self.max_queue[0] if self.max_queue else -1
def push_back(self, value: int) -> None: # 重点在这
while self.max_queue and self.max_queue[-1] < value:
self.max_queue.pop() # 保证最大值队列永远单调递减。小于value的全部删除,然后value入队
self.max_queue.append(value)
self.queue.append(value)
def pop_front(self) -> int:
if not self.queue: # 队列空了返回-1
return -1
else:
if self.queue[0] == self.max_queue[0]: # 若要出队的元素刚好是最大值,则将其在最大值队列中一并删除
self.max_queue.popleft()
return self.queue.popleft()
# Your MaxQueue object will be instantiated and called as such:
# obj = MaxQueue()
# param_1 = obj.max_value()
# obj.push_back(value)
# param_3 = obj.pop_front() |
988,575 | 0a34517c7a9e968ac4e1a30ece6e2bab06d9013e | """
FapolicydRules - file ``/etc/fapolicyd/rules.d/*.rules``
========================================================
"""
from insights import parser
from insights.core import LogFileOutput
from insights.specs import Specs
@parser(Specs.fapolicyd_rules)
class FapolicydRules(LogFileOutput):
"""
Parse the content of ``/etc/fapolicyd/rules.d/*.rules`` file.
.. note::
The rules do not require to get the parsed result currently.
It just need to check if it contains specific lines, so use
:class:`insights.core.LogFileOutput` as the base class.
Sample input::
deny_audit perm=any pattern=ld_so : all
deny_audit perm=any pattern=ld_preload : all
Examples:
>>> from insights.parsers.fapolicyd_rules import FapolicydRules
>>> FapolicydRules.last_scan('ld_so_deny_audit_test', 'deny_audit perm=any pattern=ld_so : all')
>>> type(fapolicyd_rules)
<class 'insights.parsers.fapolicyd_rules.FapolicydRules'>
>>> fapolicyd_rules.ld_so_deny_audit_test.get('raw_message')
'deny_audit perm=any pattern=ld_so : all'
"""
pass
|
988,576 | 4cd4d9ef0ea2f6f07e1fba070875f898ddcdeb38 | from pyvolcafm import *
voice = Voice()
voices = []
for i in xrange(32):
voice = Voice()
voice.algo = 6
for j in xrange(6):
# voice.operators[j].detu =
pass
for j in [1, 3, 5]:
voice.operators[j].egl = [50, 80, 90, 50]
voice.operators[j].egr = [40, 40, 40, 40]
voice.operators[j].olvl = 65
voice.operators[j].frec = 1
for j in [0, 2, 4]:
voice.operators[j].egl = [99, 80, 60, 0]
voice.operators[j].egr = [40, 40, 40, 40]
voice.operators[j].olvl = 99
voice.operators[j].frec = 2
voice.ptl = [70, 60, 50, 50]
voice.ptr = [98, 50, 50, 50]
voice.name = 'VOICE '+str(i)
voices.append(voice)
write_sysex_file('voice.syx', packed_stream_from_bank(voices))
|
988,577 | 8711880206c06a158ea7a4648c6370d34d658f52 | import ael
def getroot(prf,*rest):
par = getparent(prf)
'''
if par.prfid != '9806':
return getroot(par)
else:
if prf:
return prf.prfid
else:
return '1'
'''
if par:
if par.prfid != '9806':
return getroot(par)
else:
if prf:
return prf.prfid
else:
return '1'
else:
return '1'
def getparent(prf):
p = ael.PortfolioLink.select('member_prfnbr = %d' %(prf.prfnbr))
return p[0].owner_prfnbr
'''
print ael.Portfolio['4440798'].prfid
print 'Fin - ', getroot(ael.Portfolio['4440798'],None)
'''
mapd = {'43034': 'DELTA_ONE','47076': 'DELTA_ONE','Delta One 1 47464': 'DELTA_ONE','Delta One 2 47605': 'DELTA_ONE','Delta One 3 47613': 'DELTA_ONE','Delta One 4 47621': 'DELTA_ONE','47035': 'DELTA_ONE',
'47159': 'ALSI_TRADING','47001 STRADDLES': 'ALSI_TRADING',
'44404': 'Single Stock Trading','44255': 'Single Stock Trading','47209 Book Build': 'Single Stock Trading','47696': 'Single Stock Trading',
'47670': 'Single Stock Trading','47688': 'Single Stock Trading', '47662 EQ_SA_PairsOption': 'Single Stock Trading',
'47506 NRD': 'Linear Trading','47589': 'Linear Trading','47324': 'Linear Trading','42945 CFD Misdeals': 'Linear Trading','47043':'Linear Trading',
'47738': 'Linear Trading','47472': 'Single Stock Trading', '47787_RAFRES': 'Linear Trading',
'42846': 'Client Trading','47597 Client Trades': 'Client Trading',
'43042': 'SMALL_CAP','47167': 'SMALL_CAP','47373': 'SMALL_CAP',
'47415': 'Structured Note Products',
'44263':'Structured Transactions','44271 Telkom Delta':'Structured Transactions','47142':'Structured Transactions','47233':'Structured Transactions',
'49007 EQ_SA_Opportunity':'Arbitrage Baskets','49015 EQ_SA_Pairs':'Arbitrage Baskets','49031 Eq_SA_TrendA':'Arbitrage Baskets',
'49072':'Arbitrage Baskets','49114 EQ_SA_StockVol':'Arbitrage Baskets','49023 EQ_SA_RelativeVol':'Arbitrage Baskets',
'EQ_SA_PairsOption':'Arbitrage Baskets', '47654 EQ_SA_PairsAuction':'Arbitrage Baskets','47795_RAFIND':'Linear Trading', '47803_RAFFIN':'Linear Trading',
'BRADS':'DELTA_ONE', '47720 EQ-SA-DELTA1':'Arbitrage Baskets','49023':'GRAVEYARD', '49007':'GRAVEYARD', '49015':'GRAVEYARD',
'47233 OML':'Structured Transactions', '49114':'GRAVEYARD', '47811 Pairs Trading':'DELTA_ONE','47829 USD Equity Swaps':'DELTA_ONE',
'Delta One 1 45062':'DELTA_ONE','Delta One 2 45070':'DELTA_ONE','Delta One 4 45096':'DELTA_ONE' }
plist = ['Structured Note Products', 'Structured Transactions', 'Single Stock Trading', 'Alsi Trading', 'DELTA_ONE', 'Linear Trading']
def mapping(port, *rest):
try:
return mapd[(port.prfid)]
except:
ael.log(port.prfid)
return port.prfid
|
988,578 | 56f39ce5421912d45d47a7d4ec49116e522abdd3 | from brawlbracket.app import app
print('\n\n\n----------------test----------------\n\n\n') |
988,579 | 74b024ae0aeb5a57966788b12f4df64b33039aab | '''
Problem:
The set [1,2,3,…,n] contains a total of n! unique permutations.
By listing and labeling all of the permutations in order, We get the following sequence (ie, for n = 3):
"123"
"132"
"213"
"231"
"312"
"321"
Given n and k, return the kth permutation sequence.
Note: Given n will be between 1 and 9 inclusive.
'''
'''
Solution:
1. 以某一数字开头的排列有(n-1)! 个。例如: 123, 132, 以1开头的是 2!个
2. 所以第一位数字就可以用 (k-1) / (n-1)! 来确定 .这里K-1的原因是,序列号我们应从0开始计算,否则在边界时无法计算。
3. 第二位数字。假设前面取余后为m,则第二位数字是 第 m/(n-2)! 个未使用的数字。
4. 不断重复2,3,取余并且对(n-k)!进行除法,直至计算完毕
'''
class Solution:
def getPermutation(self, n, k):
elegible = range(1, n + 1)
per = []
for i in range(n):
digit = (k - 1) / math.factorial(n - i - 1)
per.append(elegible[digit])
elegible.remove(elegible[digit])
k = (k - 1) % math.factorial(n - i - 1) + 1
return "".join([str(x) for x in per])
|
988,580 | 678ce0b5ec08b095e5ba7d31a20a0dbb42378472 | import os.path
DIR_DEFAULT = os.path.join(os.environ['HOME'], '.Dominik')
DIR_DIC = os.path.join(DIR_DEFAULT, 'dictionary')
DIR_DIC_YML = os.path.join(DIR_DEFAULT, 'dictionary', 'yml')
DIR_SQLITE = os.path.join(DIR_DEFAULT, 'sqlite')
DIR_LOG = os.path.join(DIR_DEFAULT, 'log')
DATABASE_URI_CHAT_DEFAULT = 'sqlite:///' + os.path.join(DIR_SQLITE, 'chat.db')
try:
os.makedirs(DIR_DEFAULT, exist_ok=True)
os.makedirs(DIR_DIC, exist_ok=True)
os.makedirs(DIR_DIC_YML, exist_ok=True)
os.makedirs(os.path.join(DIR_DIC_YML, 'formally'), exist_ok=True)
os.makedirs(os.path.join(DIR_DIC_YML, 'informally'), exist_ok=True)
os.makedirs(DIR_SQLITE, exist_ok=True)
os.makedirs(DIR_LOG, exist_ok=True)
except OSError as e:
print(e)
try:
open(os.path.join(DIR_LOG, 'app.log'), 'a').close()
except OSError as e:
print(e)
|
988,581 | 0d3f63ba72f7a313f3754fad188a509a66767185 | from django.contrib.auth.models import User
from django.http import JsonResponse
from security.models import SystemUser
from utils.security.generators import generate_account_id
from utils.security.utils import *
from utils.security.generators import _generate_jwt_token
from django.utils.datetime_safe import date
def create_sys_account(userImage, fullName, password):
todays_date = date.today()
userId = "SYS" + fullName[0] + fullName[-1] + "/" + str(todays_date.year)
userModel = SystemUser(
userId= userId,
userImage= userImage,
fullName= fullName,
password= hash_password(password),
)
userModel.save()
return JsonResponse({
'code': 0,
'userId': userId,
'message': 'System Account created',
})
def authenticate_sys_admin(userId, password):
if SystemUser.objects.filter(userId=userId).exists():
user = SystemUser.objects.get(userId=userId)
if check_password(user.password, password):
result = _generate_jwt_token
token_exists = SystemUser.objects.filter(token=result).exists()
while token_exists is True:
result = _generate_jwt_token
token = _generate_jwt_token()
SystemUser.objects.filter(userId=userId).update(token=token)
return JsonResponse({
'code':0,
'token':token,
})
else:
return JsonResponse({
'code':1,
'message':'Authentication Failed'
})
else:
return JsonResponse({
'code':1,
'message':'Authentication Failed1'
})
|
988,582 | a98c7e5ec1f3c0bb25cdedbe2b893d7bbb4990a5 | import unittest
from pychembldb import chembldb, Cell
class CellTest(unittest.TestCase):
def setUp(self):
self.target = chembldb.query(Cell).get(1)
def test_cell_id(self):
self.assertEqual(self.target.cell_id, 1)
def test_cell_name(self):
self.assertEqual(self.target.cell_name, "DC3F")
def test_cell_description(self):
self.assertEqual(self.target.cell_description, "DC3F")
def test_cell_source_tissue(self):
self.assertEqual(self.target.cell_source_tissue, "Lung")
def test_cell_source_organism(self):
self.assertEqual(self.target.cell_source_organism, "Cricetulus griseus")
def test_cell_source_tax_id(self):
self.assertEqual(self.target.cell_source_tax_id, 10029)
def test_clo_id(self):
self.assertEqual(self.target.clo_id, None)
def test_efo_id(self):
self.assertEqual(self.target.efo_id, None)
def test_cellosaurus_id(self):
self.assertEqual(self.target.cellosaurus_id, "CVCL_4704")
def test_cl_lincs_id(self):
self.assertEqual(self.target.cl_lincs_id, None)
def test_cembl_id(self):
self.assertEqual(self.target.chembl_id, "CHEMBL3307241")
|
988,583 | 878d14b919f1e568dd985a13716fae44324e96ff | # Module: say
# Description: Uses powershell and a TTS engine to make your computer say something
# Usage: !say "Something to say"
# Dependencies: time, os
import os, asyncio, configs
async def say(ctx, txt):
if configs.operating_sys == "Windows":
await ctx.send("Saying: " + txt)
os.system(
"powershell Add-Type -AssemblyName System.Speech; $synth = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer; $synth.Speak('" + txt + "')")
elif configs.operating_sys == "Linux":
await ctx.send("Saying: " + txt)
os.system('spd-say "{}"'.format(txt))
else:
await ctx.send("Can't use TTS")
await asyncio.sleep(3)
|
988,584 | 45ef956f03a8f532582877af71ecfaf185d5d001 | class Node(object):
def __init__(self, name, andrewID):
self.name = name
self.andrewID = andrewID
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, Node) and self.name == other.name
def __repr__(self):
return self.name
class Queue(object):
def __init__(self, name, descriptions, location, index, category, course = None, coursenum = None):
self.name = name
self.descriptions = descriptions
self.location = location
self.course = course
self.coursenum = coursenum
self.list = []
self.index = index
self.category = category
def enq(self, person, index=None):
if (index == None):
self.list.append(person)
else:
if (index >= 0 and index <= len(list)):
self.list.insert(index, person)
def deq(self, person=None):
if (person == None):
self.list = self.list[1:]
if person in self.list:
self.list.remove(person)
def size(self):
return len(self.list)
def getQueueList(self):
return self.list
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, Queue) and self.name == other.name
def __repr__(self):
return "Queue(" + self.name + ", " + str(self.list) + ")" |
988,585 | 102eea560916b13b4140b17730b523c7703d2fe8 | from flask import Flask, jsonify, Response, request
from wrapper import *
app = Flask(__name__)
@app.route('/multiple-issue',methods=['GET'])
def multipleIssue():
if request.method == 'GET':
return getMultipleIssues()
@app.route('/img',methods=['GET'])
def image():
theme = request.args.get("issue")
if request.method == 'GET':
return getImageUrl(theme)
if __name__ == '__main__':
app.run() |
988,586 | 5dd2e38b3ff766ae1e52ff81de85dedf3ccf67d1 | from django.shortcuts import render, redirect, get_object_or_404
from django.views import generic
from django import forms
from django.template import Template
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import auth
from django.utils.datastructures import MultiValueDict
from django.urls import reverse
from django.views.generic.edit import FormMixin, ModelFormMixin
from meets.models import Meet, Attendee, Assignment
from teams.models import Team, TeamMember
from userprofile.models import UserProfile
# CWOrg's library
from cwlog import logger
import datetime
from common.utils import get_today, get_month_range
class DashboardView(LoginRequiredMixin, generic.DetailView):
'''Summery of user's activities.'''
template_name = "dashboard/dashboard.html"
def __init__(self, *args, **kwargs):
super(DashboardView, self).__init__(*args, **kwargs)
pass
def get_object(self):
user = self.request.user
return UserProfile.objects.get(user=user)
def get_object_data(self, **kwargs):
"""List of object fields to display.
Choice fields values are expanded to readable choice label.
"""
user = self.request.user
return UserProfile.objects.get(user=user)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = self.request.user
context['user'] = user
today = get_today()
daterange = get_month_range()
teams = Team.objects.filter(teammember__member=user)
context['teams'] = teams
meets = Meet.objects.filter(team__teammember__member=user).filter(starttime__gte=today).order_by('starttime')[:5]
attendees = Attendee.objects.filter(meet__in=meets)
#
meet_headers = [ ("name", "meet", "", "Meet"),
("team", "team", "", "Team"),
("starttime", "datetime", "", "Date"),
]
context['meet_headers'] = meet_headers
upcoming = []
for meet in meets:
mrow = []
url = "meets/{}".format( meet.slug)
for field, col, cls, ttl in meet_headers:
value = getattr(meet, field)
mrow.append( (value, url) )
url = None
pass
upcoming.append(mrow)
pass
context['meet_data'] = upcoming
# Assemble the team list
team_headers = [ ("name", "name", "", "Team"),
("owner", "owner", "", "Owner"),
]
context['team_headers'] = team_headers
myteams = []
for team in teams:
mrow = []
url = "teams/{}".format(team.slug)
for field, col, cls, ttl in team_headers:
value = getattr(team, field)
mrow.append( (value, url) )
url = None
pass
myteams.append(mrow)
pass
context['teams'] = myteams
return context
def has_object_permission(self, request, obj):
# work when your access /item/item_id/
# Instance must have an attribute named `owner`.
return obj.user == request.user
def has_update_permission(self, request, obj):
return self.has_object_permission(request, obj)
pass
|
988,587 | 1ae7a36d922338f32def2e1b3ed6a6e1572137b2 | import argparse
import datetime
import math
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import shape
from shape import ShapeDataset, MixShapeDataset
from shape_train import RnnModel_fc, RnnModel_fcn, RnnModel_fcn2, RnnModel_n2d, RnnModel_d2n
def str2bool(v):
return v.lower() in ('true', '1')
def validate(net, data_loader, render_fn, net_type, use_cuda, save_dir ):
"""Used to monitor progress on a validation set & optionally plot solution."""
# if render_fn is not None:
# if not os.path.exists(save_dir):
# os.makedirs(save_dir)
net.eval()
for batch_idx, batch in enumerate(data_loader):
data, points, gt = batch
# points, gt = batch
if use_cuda:
data = data.cuda()
points = points.cuda()
gt = gt.cuda().detach()
# Full forward pass through the dataset
with torch.no_grad():
if net_type == 'fc':
output = net(data)
else:
output = net(points)
if render_fn is not None:
name = 'net_%03d.png'%(batch_idx)
path = os.path.join(save_dir, name)
# render_fn(data, output, gt, path )
render_fn(data, output, gt, path )
net.train()
def train(net, train_data, valid_data,
batch_size, lr,
max_grad_norm, epoch_num,
**kwargs):
"""Constructs the main actor & critic networks, and performs all training."""
date = datetime.datetime.now()
now = '%s' % date.date()
now += '-%s' % date.hour
now += '-%s' % date.minute
now = str(now)
save_dir = os.path.join('shape', 'curve',
str( kwargs['dim_num_max'] ) + 'd-' + kwargs['net_type'] + '-' + kwargs['multi_dim'] + '-note-' + kwargs['note'] ) #+ '-' + now)
checkpoint_dir = os.path.join(save_dir, 'checkpoints')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
net_optim = optim.Adam(net.parameters(), lr=lr)
loss_function=nn.MSELoss()
train_loader = DataLoader(train_data, batch_size, shuffle=True, num_workers=0)
valid_loader = DataLoader(valid_data, len(valid_data), shuffle=False, num_workers=0)
best_reward = np.inf
my_rewards = []
my_losses = []
train_size = kwargs['train_size']
log_step = int(train_size / batch_size)
if log_step > 100:
log_step = int(100)
if log_step == 0:
log_step = int(1)
for epoch in range(epoch_num):
net.train()
times, losses, rewards = [], [], []
epoch_start = time.time()
start = epoch_start
valid_dir = os.path.join(save_dir, 'render', '%s' % epoch)
if not os.path.exists(valid_dir):
os.makedirs(valid_dir)
for batch_idx, batch in enumerate(train_loader):
if kwargs['just_test'] == True:
continue
data, points, gt = batch
# points, gt = batch
use_cuda = kwargs['use_cuda']
if use_cuda:
data = data.cuda()
points = points.cuda()
gt = gt.cuda()
# Full forward pass through the dataset
if kwargs['net_type'] == 'fc':
output = net(data)
else:
output = net(points)
loss = loss_function( output, gt.detach() )
net_optim.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), max_grad_norm)
net_optim.step()
losses.append(torch.mean(loss.detach()).item())
if (batch_idx + 1) % log_step == 0:
end = time.time()
times.append(end - start)
start = end
mean_loss = np.mean(losses[-log_step:])
my_losses.append(mean_loss)
print(' Epoch %d Batch %d/%d, loss: %2.4f, took: %2.4fs' %
(epoch, batch_idx, len(train_loader), mean_loss,
times[-1]))
mean_loss = np.mean(losses)
mean_reward = np.mean(rewards)
# Save the weights
epoch_dir = os.path.join(checkpoint_dir, '%s' % epoch)
if not os.path.exists(epoch_dir):
os.makedirs(epoch_dir)
save_path = os.path.join(epoch_dir, 'net.pt')
torch.save(net.state_dict(), save_path)
# Save rendering of validation set dims
valid_dir = os.path.join(save_dir, 'render', '%s' % epoch)
validate(
net,
valid_loader,
render_fn=kwargs['render_fn'],
net_type=kwargs['net_type'],
use_cuda=kwargs['use_cuda'],
save_dir=valid_dir
)
print('Epoch %d, mean epoch loss: %2.4f, took: %2.4fs '\
'(%2.4fs / %d batches) ' % \
(epoch, mean_loss, time.time() - epoch_start, np.mean(times), log_step ) )
# '(%2.4fs / %d batches) | shape: %s' % \
# (epoch, mean_loss, time.time() - epoch_start, np.mean(times), log_step, data.shape ) )
plt.close('all')
plt.title('Loss')
plt.plot(range(len(my_losses)), my_losses, '-')
plt.savefig(save_dir + '/loss.png' , bbox_inches='tight', dpi=400)
np.savetxt(save_dir + '/losses.txt', my_losses)
def load_and_train(args):
def num_range_list( num_min, num_max, num_step ):
if num_max == num_min:
return [num_max]
return [
i for i in range(
num_min, num_max+1, num_step
)
]
dim_num_range = num_range_list( args.dim_num_min, args.dim_num_max, args.dim_num_step )
use_cuda = args.use_cuda
print('Loading data...')
total_type_num = len(dim_num_range)
each_train_size = int( args.train_size / total_type_num )
each_valid_size = int( args.valid_size / total_type_num )
train_files, valid_files = shape.create_mix_dataset(
dim_num_range,
each_train_size,
each_valid_size,
args.sample_num,
args.standard,
seed=args.seed,
)
if args.just_generate == True:
return
train_data = MixShapeDataset( train_files, args.batch_size, args.train_size, args.seed )
valid_data = MixShapeDataset( valid_files, args.valid_size, args.valid_size, args.seed + 1 )
input_size = 2
if args.net_type == 'n2d':
RnnModel = RnnModel_n2d
elif args.net_type == 'd2n':
RnnModel = RnnModel_d2n
elif args.net_type == 'fc':
RnnModel = RnnModel_fc
elif args.net_type == 'fcn':
RnnModel = RnnModel_fcn
elif args.net_type == 'fcn2':
RnnModel = RnnModel_fcn2
input_size = 4
net = RnnModel(
input_size,
args.hidden_size,
args.num_layers,
args.dropout,
)
if use_cuda:
net = net.cuda()
kwargs = vars(args)
kwargs['train_data'] = train_data
kwargs['valid_data'] = valid_data
kwargs['render_fn'] = shape.render
multi_str = ''
if len(dim_num_range) > 1:
multi_str += 'M'
else:
multi_str += 'S'
kwargs['multi_dim'] = multi_str
print(multi_str)
if args.checkpoint:
path = os.path.join(args.checkpoint, 'net.pt')
net.load_state_dict(torch.load(path))
print('Loading pre-train model', path)
train(net, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Combinatorial Optimization')
parser.add_argument('--seed', default=12345, type=int)
parser.add_argument('--checkpoint', default=None)
parser.add_argument('--lr', default=5e-4, type=float)
parser.add_argument('--max_grad_norm', default=2., type=float)
parser.add_argument('--batch_size', default=4, type=int)
parser.add_argument('--dropout', default=0.1, type=float)
parser.add_argument('--layers', dest='num_layers', default=1, type=int)
parser.add_argument('--epoch_num', default=1, type=int)
parser.add_argument('--use_cuda', default=True, type=str2bool)
parser.add_argument('--cuda', default='0', type=str)
parser.add_argument('--train_size',default=32, type=int)
parser.add_argument('--valid_size', default=32, type=int)
parser.add_argument('--net_type', default='fc', type=str)
parser.add_argument('--dim_num_min', default=16, type=int)
parser.add_argument('--dim_num_max', default=16, type=int)
parser.add_argument('--dim_num_step', default=2, type=int)
parser.add_argument('--sample_num', default=80, type=int)
parser.add_argument('--standard', default=0.1, type=float)
parser.add_argument('--hidden_size', default=128, type=int)
parser.add_argument('--just_test', default=False, type=str2bool)
parser.add_argument('--just_generate', default=False, type=str2bool)
parser.add_argument('--note', default='debug', type=str)
args = parser.parse_args()
print('Sample num: %s' % args.sample_num)
print('Dim min num: %s' % args.dim_num_min)
print('Dim max num: %s' % args.dim_num_max)
print('Net type: %s' % args.net_type)
print('Note: %s' % args.note)
if args.use_cuda:
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
load_and_train(args)
|
988,588 | 3fb98eb68b61ad9968065ebf555fe75c62f98c16 | # -*- coding: utf-8 -*-
"""
Created on Mon May 3 10:09:13 2021
@author: Keyur Asodariya
Time com:- O(n^1.585)
"""
X=int(input("Enter number 1 :"))
Y=int(input("Enter number 2 :"))
def karatsuba(X,Y):
#base condition
if X<10 or Y<10:
return X*Y
m=max(len(str(X)),len(str(Y)))
if m%2!=0:
m-=1
a,b=divmod(X, 10**int(m/2))
c,d=divmod(Y, 10**int(m/2))
ac=karatsuba(a,c)
bd=karatsuba(b,d)
ad_bc=karatsuba((a+b),(c+d))-ac-bd
return (ac*(10**m))+(ad_bc*(10**int(m/2)))+bd
print(karatsuba(X,Y)) |
988,589 | 6751cbdad700f3cc793eda01f4412f2c71ab5823 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerMeta(type):
def all(cls):
return sorted(getattr(cls, x) for x in dir(cls) if not x.startswith('__'))
def __str__(cls):
return str(cls.all())
def __contains__(cls, item):
return item in cls.all()
# TODO: consider moving all these enums into some appropriate section of the code, rather than having them be global
# like this. (e.g. instead set METHODS = {'euler': Euler, ...} in methods/__init__.py)
class METHODS(metaclass=ContainerMeta):
euler = 'euler'
milstein = 'milstein'
srk = 'srk'
midpoint = 'midpoint'
reversible_heun = 'reversible_heun'
adjoint_reversible_heun = 'adjoint_reversible_heun'
heun = 'heun'
log_ode_midpoint = 'log_ode'
euler_heun = 'euler_heun'
class NOISE_TYPES(metaclass=ContainerMeta): # noqa
general = 'general'
diagonal = 'diagonal'
scalar = 'scalar'
additive = 'additive'
class SDE_TYPES(metaclass=ContainerMeta): # noqa
ito = 'ito'
stratonovich = 'stratonovich'
class LEVY_AREA_APPROXIMATIONS(metaclass=ContainerMeta): # noqa
none = 'none' # Don't compute any Levy area approximation
space_time = 'space-time' # Only compute an (exact) space-time Levy area
davie = 'davie' # Compute Davie's approximation to Levy area
foster = 'foster' # Compute Foster's correction to Davie's approximation to Levy area
class METHOD_OPTIONS(metaclass=ContainerMeta): # noqa
grad_free = 'grad_free'
|
988,590 | 6eb88ef84533d7e52566087d55b995d8364e94fe | import numpy as np
import serial
import cv2
pi = 3.14159265359
def distance(ax, ay, bx, by):
return np.sqrt((ax - bx) ** 2 + (ay - by) ** 2)
def getangle(xx, yy, dxx, dyy):
if dxx == xx:
if dyy < yy:
de = -pi/2
else:
de = pi/2
elif dxx < xx:
if dyy < yy:
de = np.arctan((dyy - yy) / (dxx - xx)) - pi
else:
de = np.arctan((dyy - yy) / (dxx - xx)) + pi
else:
de = np.arctan((dyy - yy) / (dxx - xx))
return de
def getangledist(xx, yy, dxx, dyy, aa):
de = getangle(xx, yy, dxx, dyy) - aa
if de > pi:
de -= 2 * pi
elif de < -pi:
de += 2 * pi
dis = distance(dxx, dyy, xx, yy)
return de, dis
def switch(de):
if -pi/2 < de < pi/2:
re = 0
else:
if de > 0:
de -= pi
else:
de += pi
re = 1
return de, re
def goto(de, re, spd):
if de > pi/6:
ls = -100
rs = 100
elif de < -pi/6:
ls = 100
rs = -100
else:
ls = int(spd + 100 * de)
rs = int(spd - 100 * de)
if re == 1:
temp = ls
ls = -rs
rs = -temp
return ls, rs
def gotopro(de, re, dis):
if dis > 15:
return goto(de, re, 200)
elif dis > 3:
return goto(de, re, 100)
else:
return goto(de, re, 0)
def getpre(xx, yy, aa, arr):
dea, disa = np.zeros(20), np.zeros(20)
for i in range(0, len(arr)):
dea[i], disa[i] = getangledist(xx, yy, arr[i][0], arr[i][1], aa)
m = 0
while (2 * disa[m] > 16 * (m + 1)):
m += 1
if m == 19:
break
return m
def getdir(dxx, dyy, dxxo, dyyo):
if dxx > dxxo and dyy < dyyo:
return 1
elif dxx < dxxo and dyy < dyyo:
return 2
elif dxx < dxxo and dyy > dyyo:
return 3
elif dxx > dxxo and dyy > dyyo:
return 4
else:
return 0
def gettrack(dtt, dxx, dyy, tsp):
if dtt == 1:
xxdir = np.cos(b)
yydir = -np.sin(b)
elif dtt == 2:
xxdir = -np.cos(b)
yydir = -np.sin(b)
elif dtt == 3:
xxdir = -np.cos(b)
yydir = np.sin(b)
elif dtt == 4:
xxdir = np.cos(b)
yydir = np.sin(b)
else:
xxdir = 0
yydir = 0
result = [[0 for col in range(2)] for row in range(20)]
for i in range(0, 160):
if dxx > 223:
xxdir = -np.cos(b)
elif dxx < 17:
xxdir = np.cos(b)
if dyy > 163:
yydir = -np.sin(b)
elif dyy < 17:
yydir = np.sin(b)
dxx += tsp * xxdir
dyy += tsp * yydir
if i % 8 == 0:
result[i//8] = (dxx, dyy)
return result
def read():
string = readport.readline()
while len(string) != 9:
string = readport.readline()
hx = string[1]
hy = string[2]
tx = string[3]
ty = string[4]
dx = string[5]
dy = string[6]
return (hx+tx)/2, (hy+ty)/2, dx, dy, getangle(tx, ty, hx, hy)
def write(ls, rs):
if ls > 0 and rs > 0:
strsend = chr(1) + chr(abs(ls)) + chr(abs(rs))
elif ls < 0 and rs > 0:
strsend = chr(2) + chr(abs(ls)) + chr(abs(rs))
elif ls > 0 and rs < 0:
strsend = chr(3) + chr(abs(ls)) + chr(abs(rs))
elif ls < 0 and rs < 0:
strsend = chr(4) + chr(abs(ls)) + chr(abs(rs))
else:
strsend = chr(1) + chr(1) + chr(1)
writeport.write(strsend.encode(encoding='latin1'))
def draw():
global x, y, a, dx, dy, going, mm
img = np.zeros((180, 240, 3))
if get == 0:
for i in range(0, len(going)):
if i == mm:
cv2.circle(img, (int(going[i][0]), 180 - int(going[i][1])), 2, (0, 0, 255), 2)
else:
cv2.circle(img, (int(going[i][0]), 180 - int(going[i][1])), 2, (255, 255, 255), 2)
hx = x + 10 * np.cos(a)
hy = y + 10 * np.sin(a)
tx = x - 10 * np.cos(a)
ty = y - 10 * np.sin(a)
cv2.circle(img, (int(hx), int(180 - hy)), 3, (0, 255, 255), 2)
cv2.circle(img, (int(tx), int(180 - ty)), 3, (0, 255, 0), 2)
cv2.line(img, (int(hx), 180 - int(hy)), (int(tx), 180 - int(ty)), (255, 255, 255))
cv2.circle(img, (dx, 180 - dy), 3, (255, 0, 0), 2)
cv2.line(img, (int(x), 180 - int(y)), (dx, 180 - dy), (255, 255, 255))
cv2.imshow('car', img)
cv2.waitKey(1)
x = 120
y = 90
dx = 120
dy = 90
idx = 120
idy = 90
a = 0
delta = 0
dist = 0
b = pi/4
lspeed = 0
rspeed = 0
get = 1
num = 0
rev = 0
mode = 0
frame = 0
mm = 0
readport = serial.Serial('COM21', 9600, timeout=50)
writeport = serial.Serial('COM29', 9600, timeout=50)
while 1:
# get positions
dxo = dx
dyo = dy
x, y, dx, dy, a = read()
if frame > 8 * (mm + 1):
get = 2
if distance(dxo, dyo, dx, dy) > 10:
get = 1
# get track
if mode == 1:
idx = dx
idy = dy
else:
if get == 1:
get = 2
elif get == 2:
dt = getdir(dx, dy, dxo, dyo)
going = gettrack(dt, dx, dy, 2)
mm = getpre(x, y, a, going)
idx, idy = going[mm]
get = 0
frame = 0
else:
frame += 1
# calculate angle
delta, dist = getangledist(x, y, idx, idy, a)
# head/tail switch
delta, rev = switch(delta)
# get speed
# lspeed, rspeed = goto(delta, rev, 6)
lspeed, rspeed = gotopro(delta, rev, dist)
write(lspeed, rspeed)
draw() |
988,591 | faf73df787ae8b2208c8383ab1cb9567573b21f9 | """ Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255), nullable=False)
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255), nullable=False)
|
988,592 | 1bf724f02c54b128a008a8dfe1f8b4985e262f0c | from random import choice
from random import random
from model.utils import *
class Ant:
def __init__(self, graph):
self.__graph = graph
self.__path = [choice(self.__graph.nodes)]
def computeProduct(self, node): # trace^alpha * (1/cost)^beta
return (self.__graph.trace[self.__path[-1]][node] ** self.__graph.settings['alpha']) * \
(1 / self.__graph[self.__path[-1]][node] ** self.__graph.settings['beta'])
def addMove(self): # alegera oraselor
nextNodes = [node for node in self.__graph.nodes if node not in self.__path]
if len(nextNodes) == 0:
return
pairs = [(node, self.computeProduct(node)) for node in nextNodes]
if random() < self.__graph.settings['q0']:
self.__path.append(max(pairs, key=lambda pair: pair[1])[0])
else:
# ruleta
s = sum([pair[1] for pair in pairs])
prob = [(node, val / s) for node, val in pairs]
prob = [
(
prob[i][0],
sum([prob[j][1] for j in range(i + 1)]) # suma prob pana la nod
)
for i in range(len(prob))]
r = random()
i = 0
while r > prob[i][1]:
i += 1
self.__path.append(prob[i][0])
@property
def fitness(self):
return fitness(self.__path, self.__graph)
@property
def path(self):
return self.__path
|
988,593 | dc3ca11c1c567df72dd2823a6950c6051feee90c | #! /usr/bin/env python
# encoding: utf-8
import os
import hashlib
class HttpResolver(object):
"""
Http Resolver functionality. Downloads a file.
"""
def __init__(self, url_download, dependency, source, cwd):
"""Construct a new instance.
:param url_download: An UrlDownload instance
:param dependency: The dependency instance.
:param source: The URL of the dependency as a string
:param cwd: Current working directory as a string. This is the place
where we should create new folders etc.
"""
self.url_download = url_download
self.dependency = dependency
self.source = source
self.cwd = cwd
def resolve(self):
"""
Fetches the dependency if necessary.
:return: The path to the resolved dependency as a string.
"""
# Store the current source in the dependency object
self.dependency.current_source = self.source
# Use the first 6 characters of the SHA1 hash of the repository url
# to uniquely identify the repository
source_hash = hashlib.sha1(self.source.encode("utf-8")).hexdigest()[:6]
# The folder for storing the file
folder_name = "http-" + source_hash
folder_path = os.path.join(self.cwd, folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
if self.dependency.filename:
filename = self.dependency.filename
else:
filename = None
file_path = self.url_download.download(
cwd=folder_path, source=self.source, filename=filename
)
assert os.path.isfile(file_path), "We should have a valid path here!"
return file_path
|
988,594 | 2d7a1fd80f1cf61c402495bcd3678991895fefa3 | """
This module is not operational. Sorry.
All the features in this module will automatically be extracted by the FeatureExtractor object.
They must all receive a line and a context, and return a value.
"""
# import nltk
# import en_core_web_md
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
#load word probabilities
#word_probabilities = {}
#
#word_prevalence_path = os.path.join(os.path.dirname(__file__), 'data', 'word_prevalence.txt')
#with open(word_prevalence_path) as f:
# for line in f:
# word, prob = line.split()
# word_probabilities[word] = float(prob)
#
def _extract_full_correspondence_from_context(line, context):
# TODO maybe the context should only be BACKWARDS!
this_dialog_line = ''
previous_dialog_line = ''
try:
seperators = []
for i in range(len(context)-1):
if context[i].character != context[i+1].character:
seperators.append(i+1)
this_dialog_line = context[seperators[-1]:]
this_dialog_line = " ".join(l.txt for l in this_dialog_line)
previous_dialog_line = context[seperators[-2]:seperators[-1]]
previous_dialog_line = " ".join(l.txt for l in previous_dialog_line)
except IndexError:
pass
return previous_dialog_line, this_dialog_line
def num_of_words(line, context):
"""
a temporary feature to test the design
"""
return [('num_of_word', len(line.txt.split()))]
def character_speaking(line, context):
return [('character_speaking', line.character)]
#
#def word_prevalence(line, context):
# value = 0
# words = re.split(r'[\s\,\.\?\!\;\:"]', line.txt.lower())
# for word in words:
# if word:
# try:
# value += log(word_probabilities[word])
# except KeyError:
# value += log(word_probabilities['UNK'])
# return [('word_prevalence', value)]
#
#
#def rarest_word_probability(line, context):
# words = re.split(r'[\s\,\.\?\!\;\:"]', line.txt.lower())
# words = [word for word in words if word]
# line_word_probabilities = [word_probabilities[word] for word in words if word in word_probabilities]
# if not line_word_probabilities:
# line_word_probabilities.append(0)
# return [('rarest_word_probability', min(line_word_probabilities))]
#
#
#def sentiment_and_semantical_differences(line, context):
# # sentiments
# prev_dialog, this_dialog = _extract_full_correspondence_from_context(line, context)
# sid = SentimentIntensityAnalyzer()
# prev_dialog_ss = sid.polarity_scores(prev_dialog)
# this_dialog_ss = sid.polarity_scores(this_dialog)
#
# # semantic similarity
# doc1, doc2 = nlp(this_dialog), nlp(prev_dialog)
# return [('vader_sent_prev_dialog_neg', prev_dialog_ss['neg']),
# ('vader_sent_prev_dialog_neu', prev_dialog_ss['neu']),
# ('vader_sent_prev_dialog_pos', prev_dialog_ss['pos']),
# ('vader_sent_prev_dialog_comp', prev_dialog_ss['compound']),
# ('vader_sent_dialog_neg', this_dialog_ss['neg']),
# ('vader_sent_dialog_neu', this_dialog_ss['neu']),
# ('vader_sent_dialog_pos', this_dialog_ss['pos']),
# ('vader_sent_dialog_comp', this_dialog_ss['compound']),
# ('semantical_similarity', doc1.similarity(doc2))
# ]
#
#
#
#def word_prevalence(line, context):
|
988,595 | ad5f5a178c5a6a0fc927ae2142b5b484edc1ea2c | import os
import re
from datetime import timedelta
import numpy as np
from dateutil import rrule, parser
from library import yaml
from library.xlwings_pd import OperatingExcelPd
CONT_PAT = r"(?<=[\>|\)])\n(.*)"
SIGN_PAT1 = r"打[卡|开](.*)次(.*)"
SIGN_PAT2 = r"打[卡|开](.*)\D(.*)次"
NICK_PAT = r":\d{2} (.*)[\(|\<]([^\)|\>]*)"
DATE_PAT = r"(\d{4}-\d{2}-\d{2}) (\d{1,2}:\d{1,2}:\d{1,2})"
YAML_PATH = "./config.yaml"
num_dict = {"一": "1", "二": "2", "三": "3", "四": "4", "五": "5", "六": "6", "七": "7", "八": "8", "九": "9", "十": "10",
"十一": "11", "十二": "12", "十三": "13", "十四": "14", "十五": "15"}
miss_list = []
susp_list = []
with open(YAML_PATH, 'r', encoding='utf-8') as f:
conf = yaml.safe_load(f)
TXT_NAME = conf['txt']['name']
TXT_PATH = conf['txt']['path'] + TXT_NAME
EXCEL_NAME = conf['excel']['name']
SHEET_NAME = conf['excel']['sheet']
EXCEL_PATH = conf['excel']['path'] + EXCEL_NAME
NESS_WORDS = conf['necessaryWords']
FORB_WORDS = conf['forbiddenWords']
with open(TXT_PATH, "r", encoding="utf-8") as fo:
txt_file = fo.read()
ro = OperatingExcelPd(EXCEL_PATH, SHEET_NAME)
dp = ro.return_dp()
QQ_dict = dp.to_dict("dict")['QQ']
QQ_dict = {str(v): int(k) + 1 for k, v in QQ_dict.items()}
nick_dict = dp.to_dict("dict")['nickname']
nick_dict = {v: int(k) + 1 for k, v in nick_dict.items()}
Origin_Date = ro.read_one_cell(1, 3)
Update_Date = ro.read_one_cell(2, 3)
print(Origin_Date)
def char_trans(char):
"""
中文大写的数字数值化
:param char: string 待处理的数据
:return: int 对应数据
"""
if char in num_dict.keys():
return num_dict[char]
return char
def inter_trans(inter):
"""
数据化
:param inter: string 待处理数据
:return: double 计算完后的数据
"""
inter = inter.lower()
inter = inter.strip("分,, .。")
inter = char_trans(inter)
special_symbol = ["w", "万", "。"]
for symbol in special_symbol:
if symbol in inter:
raw_int = inter.split(symbol)[0]
raw_int = char_trans(raw_int)
if "." in raw_int:
return float(raw_int) * 10000
raw_float = inter.split(symbol)[1]
raw_float = char_trans(raw_float)
return float(raw_int + "." + raw_float) * 10000
else:
return float(inter)
def filter_words(content, nessary=NESS_WORDS):
filter_flag = False
for item in FORB_WORDS:
if item in content:
filter_flag = True
break
for item in nessary:
if item not in content:
filter_flag = True
break
return filter_flag
def formatter_str(string):
try:
time = re.search(SIGN_PAT1, string).group(1)
average = re.search(SIGN_PAT1, string).group(2)
if average == "":
raise AttributeError
except AttributeError:
time = re.search(SIGN_PAT2, string).group(2)
average = re.search(SIGN_PAT2, string).group(1)
time = inter_trans(time)
average = inter_trans(average)
average = average * 10000 if average < 10000 else average
return time, average
def run(Excel_File):
"""
主逻辑处理
:param Excel_File: Excel_File
:return: None
"""
print(f"Current First Date: {Origin_Date}")
li = txt_file.split("\n\n")
for content in li:
if filter_words(content): continue
column_no = 4
date = re.search(DATE_PAT, content).group(1)
time = re.search(DATE_PAT, content).group(2)
diff_date = rrule.rrule(rrule.DAILY, dtstart=parser.parse(str(Origin_Date)),
until=parser.parse(f'{date} {time}')).count() - 1
modify_date = rrule.rrule(rrule.DAILY, dtstart=parser.parse(str(Update_Date)),
until=parser.parse(f'{date} {time}')).count() - 1
if diff_date > 6 or modify_date < 0:
continue
column_no += diff_date * 6
if column_no < 0:
continue
if "2019-09-12 22:47:51" in content:
print(1)
pass
nick = re.search(NICK_PAT, content).group(1)
QQ = re.search(NICK_PAT, content).group(2)
time = "0" + time if len(time) == 7 else time
line_no_QQ = QQ_dict.get(QQ) or QQ_dict.get(str(QQ))
line_no_nick = nick_dict.get(nick)
if (line_no_QQ or line_no_nick) is None:
miss_list.append(content)
continue
elif line_no_QQ is None and line_no_nick is not None:
line_no = line_no_nick
print(f"QQ have been changed, auto changing\n{QQ}\t{nick}")
Excel_File.w_data(line_no, 1, QQ)
elif line_no_nick is None and line_no_QQ is not None:
line_no = line_no_QQ
if nick.strip() == "":
print("nick is None")
else:
print(f"nick have been changed, auto changing\n{QQ}\t{nick}")
Excel_File.w_data(line_no, 2, nick)
else:
line_no = line_no_QQ
if not np.isnan(Excel_File.read_one_cell(line_no, column_no)):
continue
Excel_File.w_data(line_no, column_no + 3, date + " " + time)
try:
words = re.search(CONT_PAT, content).group(1)
if filter_words(words, ["打", "次"]): continue
(times, average) = formatter_str(content)
total = times * average
except (AttributeError, ValueError):
Excel_File.w_data(line_no, column_no + 5, words)
susp_list.append(content)
continue
print(f"{nick}\n\t\t{date} {time}\t{words}\n")
Excel_File.w_data(line_no, column_no, times)
Excel_File.w_data(line_no, column_no + 1, average)
Excel_File.w_data(line_no, column_no + 2, total)
# 表格后处理
if "line_no" in dir():
Excel_File.w_data(2, 3, parser.parse(date + " " + time) + timedelta(days=-1))
unsigned = Excel_File.dp.loc[np.isnan(Excel_File.dp[f"次数.{diff_date + 1}"]), ["QQ", "nickname"]]
Excel_File.save_to_excel()
with open(f"./Error_list_{SHEET_NAME}.txt", "w", encoding="utf-8") as f:
f.write("以下为未解析的成功记录数据".center(60, "*") + "\n")
for member in susp_list:
f.write(member)
f.write("\n" + "".center(30, "-") + "\n")
f.write("\n\n" + "以下为未找到该成员的消息记录".center(60, "*") + "\n")
for member in miss_list:
f.write(member)
f.write("\n" + "".center(30, "-") + "\n")
# f.write("下为暂时还未打卡名单:".center(60, "*") + "\n")
print("以下为暂时还未打卡名单:(已复制于您粘贴板,可在其它处粘贴使用)")
unsigned.to_clipboard()
print(unsigned)
unsigned.to_csv("./unsigned.csv")
if __name__ == '__main__':
run(ro)
os.system("pause")
|
988,596 | eec9ff36e9e96db44820cd548d0ff43572e461aa | import json
import scrapy
BASE_IFOOD_URL = 'https://www.ifood.com.br/delivery/'
BASE_AVATAR_URL = 'https://static-images.ifood.com.br/image/upload/f_auto,t_high/logosgde/'
BASE_URL = 'https://marketplace.ifood.com.br/v1/merchants?latitude=-23.19529&longitude=-45.90321&channel=IFOOD'
class Restaurant(scrapy.Item):
name = scrapy.Field()
rating = scrapy.Field()
price_range = scrapy.Field()
delivery_time = scrapy.Field()
delivery_fee = scrapy.Field()
distance = scrapy.Field()
category = scrapy.Field()
avatar = scrapy.Field()
url = scrapy.Field()
@staticmethod
def parse_avatar(item):
avatar = ''
for resource in item['resources']:
if resource['type'].lower() == 'logo':
avatar = resource['fileName']
if avatar:
return ''.join([BASE_AVATAR_URL, avatar])
return avatar
class IfoodSpider(scrapy.Spider):
name = 'ifood'
start_urls = [f'{BASE_URL}&size=0']
def parse(self, response):
data = json.loads(response.text)
total = data['total']
pages_count = total // 100
if total / 100 != total // 100:
pages_count += 1
for page in range(pages_count):
yield scrapy.Request(f'{BASE_URL}&size=100&page={page}', callback=self.parse_page)
def parse_page(self, response):
data = json.loads(response.text)
for item in data['merchants']:
yield Restaurant({
'name': item['name'],
'rating': item['userRating'],
'price_range': item['priceRange'],
'delivery_time': item['deliveryTime'],
'delivery_fee': item['deliveryFee']['value'],
'distance': item['distance'],
'category': item['mainCategory']['name'],
'avatar': Restaurant.parse_avatar(item),
'url': f"{BASE_IFOOD_URL}{item['slug']}/{item['id']}",
})
|
988,597 | 7956419d9ce6a24458d2d575382ba8c645206935 | import sys
from random import randint, random, uniform, choice, shuffle
import numpy as np
from .individual import Individual
from .problem import Problem
from .algorithm_genetic import GeneralEvolutionaryAlgorithm
from .operators import RandomGenerator, PmMutator, ParetoDominance, EpsilonDominance, crowding_distance, \
NonUniformMutation, UniformMutator, CopySelector, IntegerGenerator
from .archive import Archive
from copy import copy, deepcopy
import time
import math
class SwarmAlgorithm(GeneralEvolutionaryAlgorithm):
def __init__(self, problem: Problem, name="General Swarm-based Algorithm"):
super().__init__(problem, name)
# self.options.declare(name='v_max', default=, lower=0., desc='maximum_allowed_speed')
self.global_best = None # swarm algorithms share the information, who is the leader
self.dominance = ParetoDominance() # some dominance should be defined for every kind of multi-opbjective swarm
def init_pvelocity(self, population):
pass
def init_pbest(self, population):
for individual in population:
individual.features['best_cost'] = individual.costs_signed
individual.features['best_vector'] = individual.vector
def khi(self, c1: float, c2: float) -> float:
"""
Constriction coefficient [1].
[1] Ebarhart and Kennedym Empirical study of particle swarm optimization,” in Proc. IEEE Int. Congr.
Evolutionary Computation, vol. 3, 1999, pp. 101–106.
:param c1: specific parameter to control the particle best component.
:param c2: specific parameter to control the global best component.
:return: float, constriction coefficient
"""
rho = c1 + c2
if rho <= 4:
result = 1.0
else:
result = 2.0 / (2.0 - rho - (rho ** 2.0 - 4.0 * rho) ** 0.5)
return result
def speed_constriction(self, velocity, u_bound, l_bound) -> float:
"""
Velocity constriction factor [1].
.. Ref:
[1] Nebro, Antonio J., et al. "SMPSO: A new PSO-based metaheuristic for multi-objective optimization."
2009 IEEE Symposium on Computational Intelligence in Multi-Criteria Decision-Making (MCDM). IEEE, 2009.
:param velocity: parameter velocity for the i^th component
:param ub: upper bound
:param lb: lower bound
:return:
"""
delta_i = (u_bound - l_bound) / 2.
# user defined max speed
velocity = min(velocity, delta_i)
velocity = max(velocity, -delta_i)
return velocity
def select_leader(self):
pass
def inertia_weight(self):
pass
def update_global_best(self, offsprings):
pass
def update_velocity(self, population):
pass
def update_position(self, population):
pass
def update_particle_best(self, population):
for particle in population:
flag = self.dominance.compare(particle.costs_signed, particle.features['best_cost'])
if flag != 2:
particle.features['best_cost'] = particle.costs_signed
particle.features['best_vector'] = particle.vector
def turbulence(self, particles, current_step=0):
pass
def run(self):
pass
class OMOPSO(SwarmAlgorithm):
"""
Implementation of OMOPSO, a multi-objective particle swarm optimizer (MOPSO).
OMOPSO uses Crowding distance, Mutation and ε-Dominance.
According to [3], OMOPSO is one of the top-performing PSO algorithms.
[1] Margarita Reyes SierraCarlos A. Coello Coello
Improving PSO-Based Multi-objective Optimization Using Crowding, Mutation and ∈-Dominance
DOI https://doi.org/10.1007/978-3-540-31880-4_35
[2] S. Mostaghim ; J. Teich :
Strategies for finding good local guides in multi-objective particle swarm optimization (MOPSO)
DOI: 10.1109/SIS.2003.1202243
[3] Durillo, J. J., J. Garcia-Nieto, A. J. Nebro, C. A. Coello Coello, F. Luna, and E. Alba (2009).
Multi-Objective Particle Swarm Optimizers: An Experimental Comparison.
Evolutionary Multi-Criterion Optimization, pp. 495-509
"""
def __init__(self, problem: Problem, name="OMOPSO"):
super().__init__(problem, name)
self.options.declare(name='prob_mutation', default=0.1, lower=0,
desc='prob_mutation'),
self.options.declare(name='epsilons', default=0.01, lower=1e-6,
desc='prob_epsilons')
self.n = self.options['max_population_size']
self.selector = CopySelector(self.problem.parameters)
self.dominance = ParetoDominance()
self.individual_features['velocity'] = dict()
self.individual_features['best_cost'] = dict()
self.individual_features['best_vector'] = dict()
# Add front_number feature
self.individual_features['front_number'] = 0
# set random generator
self.generator = RandomGenerator(self.problem.parameters, self.individual_features)
self.leaders = Archive()
self.archive = Archive(dominance=EpsilonDominance(epsilons=self.options['epsilons']))
self.non_uniform_mutator = NonUniformMutation(self.problem.parameters, self.options['prob_mutation'],
self.options['max_population_number'])
self.uniform_mutator = UniformMutator(self.problem.parameters, self.options['prob_mutation'],
self.options['max_population_number'])
# constants for the speed and the position calculation
self.c1_min = 1.5
self.c1_max = 2.0
self.c2_min = 1.5
self.c2_max = 2.0
self.r1_min = 0.0
self.r1_max = 1.0
self.r2_min = 0.0
self.r2_max = 1.0
self.min_weight = 0.1
self.max_weight = 0.5
def inertia_weight(self):
return uniform(self.min_weight, self.max_weight)
def init_pvelocity(self, population):
"""
Inits the particle velocity and its allowed maximum speed.
:param population: list of individuals
:return
"""
for individual in population:
# the initial speed is set to zero
individual.features['velocity'] = [0] * len(individual.vector)
return
def turbulence(self, particles, current_step=0):
"""
OMOPSO applies a combination of uniform and nonuniform
mutation to the particle swarm(uniform mutation to the first 30 % of
the swarm, non - uniform to the next 30 %, and no mutation on the particles)
"""
for i in range(len(particles)):
if i % 3 == 0:
mutated = self.uniform_mutator.mutate(particles[i])
elif i % 3 == 1:
mutated = self.non_uniform_mutator.mutate(particles[i], current_step)
particles[i].vector = copy(mutated.vector)
return
def update_velocity(self, individuals):
for individual in individuals:
individual.features['velocity'] = [0] * len(individual.vector)
global_best = self.select_leader()
r1 = round(uniform(self.r1_min, self.r1_max), 1)
r2 = round(uniform(self.r2_min, self.r2_max), 1)
c1 = round(uniform(self.c1_min, self.c1_max), 1)
c2 = round(uniform(self.c2_min, self.c2_max), 1)
for i in range(0, len(individual.vector)):
momentum = self.inertia_weight() * individual.vector[i]
v_cog = c1 * r1 * (individual.features['best_vector'][i] - individual.vector[i])
v_soc = c2 * r2 * (global_best.vector[i] - individual.vector[i])
v = self.khi(c1, c2) * (momentum + v_cog + v_soc)
individual.features['velocity'][i] = self.speed_constriction(v, self.parameters[i]['bounds'][1],
self.parameters[i]['bounds'][0])
def update_position(self, individuals):
for individual in individuals:
for parameter, i in zip(self.parameters, range(len(individual.vector))):
individual.vector[i] = individual.vector[i] + individual.features['velocity'][i]
# adjust maximum position if necessary
if individual.vector[i] > parameter['bounds'][1]:
individual.vector[i] = parameter['bounds'][1]
individual.features['velocity'][i] *= -1
# adjust minimum position if necessary
if individual.vector[i] < parameter['bounds'][0]:
individual.vector[i] = parameter['bounds'][0]
individual.features['velocity'][i] *= -1
def update_global_best(self, swarm):
""" Manages the leader class in OMOPSO. """
# the fitness of the particles are calculated by their crowding distance
crowding_distance(swarm)
# the length of the leaders archive cannot be longer than the number of the initial population
self.leaders += swarm
self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')
self.archive += swarm
return
def select_leader(self):
"""
There are different possibilities to select the global best solution.
The leader class in this concept contains everybody after the initialization, every individual expected as a
leader, we select 2 from them and select the non-dominated as the global best.
:return:
"""
if self.leaders.size() == 1:
return self.leaders.rand_choice()
candidates = self.leaders.rand_sample(2)
# randomly favourize one of them
# best_global = choice(candidates)
# should select those which has bigger fitness
# # if one of them dominates, it will be selected as global best
# dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)
#
# if dom == 1:
# best_global = candidates[0]
#
# if dom == 2:
# best_global = candidates[1]
if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:
best_global = candidates[1]
else:
best_global = candidates[0]
return best_global
def run(self):
t_s = time.time()
self.problem.logger.info("PSO: {}/{}".format(self.options['max_population_number'],
self.options['max_population_size']))
# update mutators
self.non_uniform_mutator = NonUniformMutation(self.problem.parameters, self.options['prob_mutation'],
self.options['max_population_number'])
self.uniform_mutator = UniformMutator(self.problem.parameters, self.options['prob_mutation'],
self.options['max_population_number'])
# initialize the swarm
self.generator.init(self.options['max_population_size'])
individuals = self.generator.generate()
for individual in individuals:
# append to problem
self.problem.individuals.append(individual)
# add to population
individual.population_id = 0
self.evaluate(individuals)
self.init_pvelocity(individuals)
self.init_pbest(individuals)
self.update_global_best(individuals)
# sync to datastore
for individual in individuals:
self.problem.data_store.sync_individual(individual)
it = 0
while it < self.options['max_population_number']:
offsprings = self.selector.select(individuals)
self.update_velocity(offsprings)
self.update_position(offsprings)
self.turbulence(offsprings, it)
self.evaluate(offsprings)
self.update_particle_best(offsprings)
self.update_global_best(offsprings)
# update individuals
individuals = offsprings
for individual in individuals:
# add to population
individual.population_id = it + 1
# append to problem
self.problem.individuals.append(individual)
# sync to datastore
self.problem.data_store.sync_individual(individual)
it += 1
t = time.time() - t_s
self.problem.logger.info("PSO: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
class SMPSO(SwarmAlgorithm):
"""
Implementation of SMPSP, a multi-objective particle swarm optimizer (MOPSO).
OMOPSO uses Crowding distance, Mutation and ε-Dominance.
According to [3], SMPSP is one of the top-performing PSO algorithms. There are 3 key-differences between OMOPS§ and
SMPSO, the mutator is polynomial mutation, the moment component of the velocity is constant, the values of the C1
and C2 values are [1.5, 2.5], while in the case of OMOPSO they are selected from [1.5, 2.0].
Here, instead of reversing the values from the borders, their speed are reduced by multiplying it by
0.001 [3].
Both of the SMPSO and OMOPSO can be defined with and without epsilon-dominance archive.
[1] Margarita Reyes SierraCarlos A. Coello Coello
Improving PSO-Based Multi-objective Optimization Using Crowding, Mutation and ∈-Dominance
DOI https://doi.org/10.1007/978-3-540-31880-4_35
[2] S. Mostaghim ; J. Teich :
Strategies for finding good local guides in multi-objective particle swarm optimization (MOPSO)
DOI: 10.1109/SIS.2003.1202243
[3] Durillo, J. J., J. Garcia-Nieto, A. J. Nebro, C. A. Coello Coello, F. Luna, and E. Alba (2009).
Multi-Objective Particle Swarm Optimizers: An Experimental Comparison.
Evolutionary Multi-Criterion Optimization, pp. 495-509
"""
def __init__(self, problem: Problem, name="SMPSO Algorithm"):
super().__init__(problem, name)
self.options.declare(name='prob_mutation', default=0.1, lower=0,
desc='prob_mutation'),
self.n = self.options['max_population_size']
self.individual_features['velocity'] = dict()
self.individual_features['best_cost'] = dict()
self.individual_features['best_vector'] = dict()
# Add front_number feature
self.individual_features['front_number'] = 0
self.selector = CopySelector(self.problem.parameters)
self.dominance = ParetoDominance()
# set random generator
self.generator = RandomGenerator(self.problem.parameters, self.individual_features)
self.leaders = Archive()
self.mutator = PmMutator(self.problem.parameters, self.options['prob_mutation'])
# constants for the speed and the position calculation
self.c1_min = 1.5
self.c1_max = 2.5
self.c2_min = 1.5
self.c2_max = 2.5
self.r1_min = 0.0
self.r1_max = 1.0
self.r2_min = 0.0
self.r2_max = 1.0
self.min_weight = 0.1
self.max_weight = 0.1
def inertia_weight(self):
return uniform(self.min_weight, self.max_weight)
def init_pvelocity(self, individuals):
"""
Inits the particle velocity and its allowed maximum speed.
:param population: list of individuals
:return
"""
for individual in individuals:
# the initial speed is set to zero
individual.features['velocity'] = [0] * len(individual.vector)
return
def turbulence(self, particles, current_step=0):
""" SMPSO applies polynomial mutation on 15% of the particles """
for i in range(len(particles)):
if i % 6 == 0:
mutated = self.mutator.mutate(particles[i])
particles[i].vector = copy(mutated.vector)
def update_velocity(self, individuals):
for individual in individuals:
individual.features['velocity'] = [0] * len(individual.vector)
global_best = self.select_leader()
r1 = round(uniform(self.r1_min, self.r1_max), 1)
r2 = round(uniform(self.r2_min, self.r2_max), 1)
c1 = round(uniform(self.c1_min, self.c1_max), 1)
c2 = round(uniform(self.c2_min, self.c2_max), 1)
for i in range(0, len(individual.vector)):
momentum = self.inertia_weight() * individual.vector[i]
v_cog = c1 * r1 * (individual.features['best_vector'][i] - individual.vector[i])
v_soc = c2 * r2 * (global_best.vector[i] - individual.vector[i])
v = self.khi(c1, c2) * (momentum + v_cog + v_soc)
individual.features['velocity'][i] = self.speed_constriction(v, self.parameters[i]['bounds'][1],
self.parameters[i]['bounds'][0])
def update_position(self, individuals):
for individual in individuals:
for parameter, i in zip(self.parameters, range(len(individual.vector))):
individual.vector[i] = individual.vector[i] + individual.features['velocity'][i]
# adjust maximum position if necessary
if individual.vector[i] > parameter['bounds'][1]:
individual.vector[i] = parameter['bounds'][1]
individual.features['velocity'][i] *= 0.001
# adjust minimum position if necessary
if individual.vector[i] < parameter['bounds'][0]:
individual.vector[i] = parameter['bounds'][0]
individual.features['velocity'][i] *= 0.001
def update_global_best(self, swarm):
""" Manages the leader class in OMOPSO. """
# the fitness of the particles are calculated by their crowding distance
crowding_distance(swarm)
# the length of the leaders archive cannot be longer than the number of the initial population
self.leaders += swarm
self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')
# self.problem.archive += swarm
return
def select_leader(self):
"""
There are different possibilities to select the global best solution.
The leader class in this concept contains everybody after the initialization, every individual expected as a
leader, we select 2 from them and select the non-dominated as the global best.
:return:
"""
if self.leaders.size() == 1:
return self.leaders.rand_choice()
candidates = self.leaders.rand_sample(2)
# randomly favourize one of them
# best_global = choice(candidates)
# should select those which has bigger fitness
# # if one of them dominates, it will be selected as global best
# dom = self.dominance.compare(candidates[0].costs_signed, candidates[1].costs_signed)
#
# if dom == 1:
# best_global = candidates[0]
#
# if dom == 2:
# best_global = candidates[1]
if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:
best_global = candidates[1]
else:
best_global = candidates[0]
return best_global
def run(self):
t_s = time.time()
self.problem.logger.info("PSO: {}/{}".format(self.options['max_population_number'],
self.options['max_population_size']))
# initialize the swarm
self.generator.init(self.options['max_population_size'])
individuals = self.generator.generate()
for individual in individuals:
# append to problem
self.problem.individuals.append(individual)
# add to population
individual.population_id = 0
self.evaluate(individuals)
self.init_pvelocity(individuals)
self.init_pbest(individuals)
self.update_global_best(individuals)
# sync to datastore
for individual in individuals:
self.problem.data_store.sync_individual(individual)
it = 0
while it < self.options['max_population_number']:
offsprings = self.selector.select(individuals)
self.update_velocity(offsprings)
self.update_position(offsprings)
self.turbulence(offsprings, it)
self.evaluate(offsprings)
self.update_particle_best(offsprings)
self.update_global_best(offsprings)
# update individuals
individuals = offsprings
for individual in individuals:
# add to population
individual.population_id = it + 1
# append to problem
self.problem.individuals.append(individual)
# sync to datastore
self.problem.data_store.sync_individual(individual)
it += 1
t = time.time() - t_s
self.problem.logger.info("PSO: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
class PSOGA(SwarmAlgorithm):
"""
Implementation a hybrid of PSO-GA
"""
def __init__(self, problem: Problem, name="PSOGA Algorithm"):
super().__init__(problem, name)
self.options.declare(name='prob_cross', default=1.0, lower=0,
desc='prob_cross')
self.options.declare(name='prob_mutation', default=0.1, lower=0,
desc='prob_mutation'),
self.n = self.options['max_population_size']
self.individual_features['velocity'] = dict()
self.individual_features['best_cost'] = dict()
self.individual_features['best_vector'] = dict()
self.individual_features['dominate'] = []
self.individual_features['crowding_distance'] = 0
self.individual_features['domination_counter'] = 0
# Add front_number feature
self.individual_features['front_number'] = 0
self.selector = CopySelector(self.problem.parameters)
self.dominance = ParetoDominance()
# set random generator
self.generator = IntegerGenerator(self.problem.parameters, self.individual_features)
self.leaders = Archive()
self.mutator = PmMutator(self.problem.parameters, self.options['prob_mutation'])
# constants for the speed and the position calculation
self.c1_min = 1.5
self.c1_max = 2.5
self.c2_min = 1.5
self.c2_max = 2.5
self.r1_min = 0.0
self.r1_max = 1.0
self.r2_min = 0.0
self.r2_max = 1.0
self.min_weight = 0.1
self.max_weight = 0.1
self.distribution_index = 1
self.probability = 1
def inertia_weight(self):
return uniform(self.min_weight, self.max_weight)
def init_pvelocity(self, individuals):
for individual in individuals:
individual.features['velocity'] = [0] * len(individual.vector)
# A function for rounding to the nearest integer for all offsprings
def makeinteger(self, individual):
for i in range(len(individual)):
individual[i] = np.rint(individual[i]).astype(int)
def crossover(self, particles):
# nVar = len(particles)
# parents = self.tournamentselection(particles)
parent1 = particles[0]
parent2 = particles[1]
'''
SBX algorithm :
Paper describing the algorithm:
Title: An Efficient Constraint Handling Method for Genetic Algorithms
Author: Kalyanmoy Deb
More info: Appendix A. Page 30.
URL: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.33.7291&rep=rep1&type=pdf
discretization using round() command, make the parents to have real value(discrete value)
example:
a = 7
b = 8
c = 10
average = round((a+b)/c)
'''
# parent1.vector = np.round(parent1.vector)
# parent2.vector = np.round(parent2.vector)
"""
x1 represent parent1 and x2 represent parent2
"""
x1 = deepcopy(parent1.vector)
x2 = deepcopy(parent2.vector)
u = random()
if u <= self.probability:
for i, param in enumerate(self.parameters):
lb = param['bounds'][0]
ub = param['bounds'][1]
# each variable in a solution has a 50% chance of changing its value. This should be removed when
# dealing with one-dimensional solutions.
if u <= 0.5:
if x1[i] > x2[i]:
y1, y2 = x2[i], x1[i]
else:
y1, y2 = x1[i], x2[i]
# if the value in parent1 is not the same of parent2
if abs(x2[i] - x1[i]) > sys.float_info.epsilon:
# we use different beta for each child.
rand = random()
beta = 1.0 + (2.0 * (y1 - lb) / (y2 - y1))
alpha = 2.0 - pow(beta, -(self.distribution_index + 1.0))
# calculation of betaq
if u <= (1.0 / alpha):
betaq = pow(rand * alpha, (1.0 / (self.distribution_index + 1.0)))
else:
betaq = pow(1.0 / (2.0 - rand * alpha), 1.0 / (self.distribution_index + 1.0))
# calculation of the first child
c1 = 0.5 * ((1 + betaq) * y1 + (1 - betaq) * y2)
# The second value of beta uses the upper limit (ul) and the maximum between parent1 and parent2
# (y2)
beta = 1.0 + (2.0 * (ub - y2) / (y2 - y1))
alpha = 2.0 - pow(beta, -(self.distribution_index + 1.0))
# calculation of betaq
if u <= (1.0 / alpha):
betaq = pow(rand * alpha, (1.0 / (self.distribution_index + 1.0)))
else:
betaq = pow(1.0 / (2.0 - rand * alpha), 1.0 / (self.distribution_index + 1.0))
# calculation of the second child
c2 = 0.5 * ((1 - betaq) * y1 + (1 + betaq) * y2)
# check the boundaries
c1 = max(lb, min(c1, ub))
c2 = max(lb, min(c2, ub))
if random() <= 0.5:
x1[i], x2[i] = c2, c1
else:
x1[i], x2[i] = c1, c2
else:
x1 = parent1.vector
x2 = parent2.vector
# 50% chance of changing values. In the case random > 0.5, the children should have the same value as
# the parents
else:
x1 = parent1.vector
x2 = parent2.vector
# if the random number generated is greater than the crossover rate, return the children as exact clones of
# the parents
else:
x1 = parent1.vector
x2 = parent2.vector
self.makeinteger(x1)
self.makeinteger(x2)
return Individual(list(x1), parent1.features), Individual(list(x2), parent2.features)
# delta = 0.1
#
# for i in range(len(particles)):
# alpha = uniform(-delta, 1 + delta)
#
# particles[i].vector = alpha * np.asarray(particles[i].vector) + (1 - alpha) * np.asarray(particles[i].vector)
def mutation(self, particle, current_step=0):
global deltal, deltar
y = []
# nVar = len(particles)
# for i in range(len(particles) - 1):
# sigma = int(uniform(1, nVar)) / 10
# sigma = 0.1
# y = particles
# y.vector = np.asarray(particles.vector) * sigma
# return y
# Plynomial Mutation
# PDF = Analyzing Mutation Schemes for real-Parameter Genetic Algorithms
# Link = https://www.iitk.ac.in/kangal/papers/k2012016.pdf
# section 2
#
# u = uniform(0, 1)
# if u <= 0.5:
# deltal = pow(2 * u, 1 / (self.distribution_index + 1)) - 1
# else:
# deltar = 1 - pow(2 * (1 - u), 1 / (self.distribution_index + 1))
#
# if u <= 0.5:
# x = x + deltal * (x - lb)
# else:
# x = x + deltar * (ub - x)
for i, param in enumerate(self.parameters):
if uniform(0, 1) < self.probability:
lb = param['bounds'][0]
ub = param['bounds'][1]
x = particle.vector[i]
u = uniform(0, 1)
# delta = min((x - lb), (ub - x)) / (ub - lb)
# mut_probability = 1 / (self.distribution_index + 1)
if u <= 0.5:
deltal = pow(2 * u, 1 / (self.distribution_index + 1)) - 1
else:
deltar = 1 - pow(2 * (1 - u), 1 / (self.distribution_index + 1))
if u <= 0.5:
x = x + deltal * (x - lb)
else:
x = x + deltar * (ub - x)
# check child boundaries
x = max(lb, min(x, ub))
y.append(x)
else:
y.append(particle[i].vector)
self.makeinteger(y)
return Individual(y, particle.features)
def update_velocity(self, individuals):
"""
update velocity : w * v(i -1) + c1 * r1 * (best_vector - pos(i)) + c2 * r2 * (global_best - pos(i))
@param individuals:
@return: update individual.features['velocity']
"""
for individual in individuals:
individual.features['velocity'] = [0] * len(individual.vector)
global_best = self.select_leader()
r1 = round(uniform(self.r1_min, self.r1_max), 1)
r2 = round(uniform(self.r1_min, self.r1_max), 1)
c1 = round(uniform(self.c1_min, self.c1_max), 1)
c2 = round(uniform(self.c1_min, self.c1_max), 1)
for i in range(0, len(individual.vector)):
lb = self.parameters[i]['bounds'][0]
ub = self.parameters[i]['bounds'][1]
w = self.khi(c1, c2)
momentum = w * individual.vector[i]
v_cog = c1 * r1 * (individual.features['best_vector'][i] - individual.vector[i])
v_soc = c2 * r2 * (global_best.vector[i] - individual.vector[i])
v = momentum + v_cog + v_soc
individual.features['velocity'][i] = self.speed_constriction(v, ub, lb)
def select_leader(self):
if self.leaders.size() == 1:
return self.leaders.rand_choice()
candidates = self.leaders.rand_sample(2)
if candidates[1].features['crowding_distance'] > candidates[0].features['crowding_distance']:
best_global = candidates[1]
else:
best_global = candidates[0]
return best_global
def update_position(self, individuals):
for individual in individuals:
for parameter, i in zip(self.parameters, range(len(individual.vector))):
individual.vector[i] = individual.vector[i] + individual.features['velocity'][i]
if individual.vector[i] > parameter['bounds'][1]:
individual.vector[i] = parameter['bounds'][1]
individual.features['velocity'][i] *= -1
if individual.vector[i] < parameter['bounds'][0]:
individual.vector[i] = parameter['bounds'][0]
individual.features['velocity'][i] *= -1
self.makeinteger(individual.vector)
def update_global_best(self, swarm):
crowding_distance(swarm)
self.leaders += swarm
self.leaders.truncate(self.options['max_population_size'], 'crowding_distance')
return
def tournamentselection(self, parents):
# offsprings = []
# parent1 = []
# parent2 = []
# for offspring in parents:
# copy_offspring = deepcopy(offspring)
# copy_offspring.population_id = -1
# offsprings.append(copy_offspring)
#
# tournamentSelection Select and return one individual by tournament selection. A number of individuals are
# picked at random and the one with the highest fitness is selected for the next generation.Tournament
# selection: Pick tourny_size individuals at random. Return the best one out of the bunch.
tournamentSize = 2
popSize = len(parents)
selected = []
if tournamentSize > popSize:
for p in parents:
selected.append(p)
else:
indices = list(range(popSize))
shuffle(indices)
for i in range(tournamentSize):
selected.append(parents[indices[i]])
selected = sorted(selected, key=lambda c: c.costs, reverse=True)
return selected
# parent1 = self.selector.select(parents)
# parent2 = self.selector.select(parents)
# return parent1, parent2
def run(self):
start = time.time()
self.problem.logger.info("PSOGA: {}/{}".format(self.options['max_population_number'],
self.options['max_population_size']))
# initialization of swarm
self.generator.init(self.options['max_population_size'], self.parameters)
individuals = self.generator.generate()
for individual in individuals:
# append to problem
self.problem.individuals.append(individual)
# add to population
individual.population_id = 0
self.evaluate(individuals)
self.init_pvelocity(individuals)
self.init_pbest(individuals)
self.update_global_best(individuals)
for individual in individuals:
self.problem.data_store.sync_individual(individual)
it = 0
for it in range(self.options['max_population_number']):
offsprings = self.selector.select(individuals)
# PSO operators
self.update_velocity(offsprings)
self.update_position(offsprings)
self.evaluate(offsprings)
# GA operators
selected = self.tournamentselection(offsprings)
offspring1, offspring2 = self.crossover(selected)
offspring1 = self.mutation(offspring1, it)
offspring2 = self.mutation(offspring2, it)
offsprings.append(offspring1)
offsprings.append(offspring2)
self.evaluate(offsprings)
self.update_particle_best(offsprings)
self.update_global_best(offsprings)
# update individuals
individuals = offsprings
for individual in individuals:
# add to population
individual.population_id = it + 1
# append to problem
self.problem.individuals.append(individual)
# sync to datastore
self.problem.data_store.sync_individual(individual)
# it += 1
t = time.time() - start
self.problem.logger.info("PSOGA: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
# ........................
#
# ........................
# class PSO_V1(SwarmAlgorithm):
# """
#
# X. Li. A Non-dominated Sorting Particle Swarm Optimizer for Multiobjective
# Optimization. In Genetic and Evolutionary Computation - GECCO 2003, volume
# 2723 of LNCS, pages 37–48, 2003.
#
# This algorithm is a variant of the original PSO, published by Eberhart(2000), the origin of this modified
# algorithm, which constriction factor was introduced by Clercs in 1999.
#
# The code is based on SHI and EBERHARTS algorithm.
#
# Empirical study of particle swarm optimization,” in Proc. IEEE Int. Congr. Evolutionary Computation, vol. 3,
# 1999, pp. 101–106.
# """
#
# def __init__(self, problem: Problem, name="Particle Swarm Algorithm - with time varieting inertia weight"):
# super().__init__(problem, name)
# self.n = self.options['max_population_size']
# self.mutator = SwarmStepTVIW(self.problem.parameters, self.options['max_population_number'])
# self.selector = DummySelector(self.problem.parameters, self.problem.signs)
#
# def run(self):
# # set random generator
# self.generator = RandomGenerator(self.problem.parameters)
# self.generator.init(self.options['max_population_size'])
#
# population = self.gen_initial_population()
# self.evaluate(population.individuals)
# self.add_features(population.individuals)
#
# for individual in population.individuals:
# self.mutator.evaluate_best_individual(
# individual) # TODO: all evaluating should be derived from Evaluator class
#
# self.selector.fast_nondominated_sorting(population.individuals)
# self.populations.append(population)
#
# t_s = time.time()
# self.problem.logger.info("PSO: {}/{}".format(self.options['max_population_number'],
# self.options['max_population_size']))
#
# i = 0
# while i < self.options['max_population_number']:
# offsprings = self.selector.select(population.individuals)
#
# pareto_front = []
# for individual in offsprings:
# if individual.features['front_number'] == 1:
# pareto_front.append(individual)
#
# for individual in offsprings:
# index = randint(0, len(pareto_front) - 1) # takes random individual from Pareto front
# best_individual = pareto_front[index]
# if best_individual is not individual:
# self.mutator.update(best_individual)
# self.mutator.mutate(individual)
#
# self.evaluate(offsprings)
# self.add_features(offsprings)
#
# for individual in offsprings:
# self.mutator.evaluate_best_individual(individual)
#
# self.selector.fast_nondominated_sorting(offsprings)
# population = Population(offsprings)
# self.populations.append(population)
#
# i += 1
#
# t = time.time() - t_s
# self.problem.logger.info("PSO: elapsed time: {} s".format(t))
|
988,598 | fef0d38df4ef197b8d70cab342b5cc3e603f8d24 | #!/usr/bin/env python3
# _*_ coding=utf-8 _*_
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib.request import HTTPError
try:
html = urlopen("https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_historical_population?oldformat=true")
except HTTPError as e:
print("not found")
bsObj = BeautifulSoup(html,"html.parser")
tables = bsObj.findAll("table",{"class":"wikitable"})
if tables is None:
print("no table");
exit(1)
i = 1
for table in tables:
fileName = "table%s.csv" % i
rows = table.findAll("tr")
csvFile = open(fileName,'wt',newline='',encoding='utf-8')
writer = csv.writer(csvFile)
try:
for row in rows:
csvRow = []
for cell in row.findAll(['td','th']):
csvRow.append(cell.get_text())
writer.writerow(csvRow)
finally:
csvFile.close()
i += 1 |
988,599 | c9a3dc10d8b0c93b16b4caf125f35e0171355960 | from re import template
from fastapi import FastAPI, Body
from starlette.templating import Jinja2Templates
from starlette.requests import Request
import sqlalchemy
engine = sqlalchemy.create_engine(
'mysql+pymysql://root:Makt0112pc-49466@localhost:3306/db_fastapi')
app = FastAPI(
title='FastAPIでつくるtoDoアプリケーション',
description='FastAPIチュートリアル:FastAPI(とstarlette)でシンプルなtoDoアプリを作りましょう.',
version='0.9 beta'
)
templates = Jinja2Templates(directory="templates")
jinja_env = templates.env
def index(request: Request):
return templates.TemplateResponse('index.html',
{'request': request, 'abc': 'タイトル', }
)
@app.get('/home')
def home(request: Request):
homelist = engine.execute(
"SELECT * FROM Post"
)
return templates.TemplateResponse('home.html',
{'request': request,
'lists': homelist}
)
@app.get('/new')
def home(request: Request):
return templates.TemplateResponse('new.html',
{'request': request}
)
@app.post("/create")
def create(username: str = Body(...), content: str = Body(...)):
engine.execute(
f"INSERT INTO Post (username,content) VALUES ('{username}','{content}')")
@app.get("/delete_wearning")
def delete_wearning(request: Request):
return templates.TemplateResponse('warning.html',
{'request': request}
)
@app.get("/delete")
def delete(request: Request):
engine.execute(
' DELETE FROM Post'
)
return templates.TemplateResponse('home.html',
{'request': request}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.