text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
#coding=utf-8
def BinarySearch(a, target):
low = 0
high = len(a) - 1
# 在其它语言中,如果low + high的和大于Integer的最大值,比如2 ** 31 - 1,
# 计算便会发生溢出,使它成为一个负数,然后被2除时结果仍为负数。
# 方法之一是用减法而不是加法——来实现:mid = low + ((high - low) / 2)。
while low <= high:
mid = low + ((high - low) / 2)
midVal = a[mid]
if midVal < target: # <
low = mid + 1
elif midVal > target: # >
high = mid - 1
else: # ==
return mid
return -1
if __name__ == '__main__':
array = [2, 12, 34, 56, 71, 77, 84, 86, 87, 94, 98, 102, 112, 142, 156, 174, 183, 196, 199, 201, 226]
print "The target position is: ", BinarySearch(array, 87)
|
# encoding: utf-8
import os
import re
from contextlib import closing
import datetime
import urllib
import json
from flask import Flask
from flask import Markup
from flask import render_template
from flask import request
from flask import session
from flask import redirect
from flask import url_for
from flask import escape
from flask import g
import unicodecsv
from jinja2 import evalcontextfilter
from jinja2 import Markup
from jinja2 import escape
from flask import abort
from flask import send_from_directory
from flask.ext.sqlalchemy import SQLAlchemy
from yourtopia.core import app, db
import indexpreview
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
class Usercreated(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(100))
user_url = db.Column(db.String(150))
description = db.Column(db.Text)
weights = db.Column(db.Text)
created_at = db.Column(db.DateTime)
user_ip = db.Column(db.String(15))
country = db.Column(db.String(3))
version = db.Column(db.Integer)
def __init__(self, weights, user_ip, country, version):
self.created_at = datetime.datetime.utcnow()
self.weights = json.dumps(weights)
self.user_ip = user_ip
self.country = country
self.version = version
def __repr__(self):
return '<Usercreated %s>' % self.id
def to_dict(self):
"""Return a JSON-encodable dict of the data in this object"""
return {
'id': self.id,
'user_name': self.user_name,
'user_url': self.user_url,
'description': self.description,
'weights': self.weights,
'created_at': self.created_at.isoformat(' '),
'user_ip': self.user_ip,
'country': self.country,
'version': self.version
}
@app.route('/')
def home():
category_headlines = extract_i18n_keys(i18n, r'category_headline.*')
return render_template('home.html', metadata=metadata, i18n_strings=category_headlines)
@app.route('/browse/', defaults={'page': 1})
@app.route('/browse/<int:page>/')
def browse(page):
offset = 0
if page > 1:
offset = (page - 1) * app.config['BROWSE_PERPAGE']
total = Usercreated.query.filter(Usercreated.user_name != None).count()
entries = get_usercreated_entries(app.config['BROWSE_PERPAGE'] + 1, offset)
if len(entries):
show_prev = False
show_next = False
if (len(entries) > app.config['BROWSE_PERPAGE']):
entries.pop()
show_next = True
if offset > 0:
show_prev = True
return render_template('browse.html',
entries=entries, page=page,
show_next=show_next,
show_prev=show_prev,
total=total)
else:
abort(404)
@app.route('/about/')
def about():
return render_template('about.html', metadata=metadata)
@app.route('/i/<int:id>/')
def details(id):
url = request.scheme + "://" + request.host + request.path
category_headlines = extract_i18n_keys(i18n, r'category_headline.*')
entry = get_usercreated_entries(id=id)
return render_template('details.html', entry=entry.to_dict(), i18n_strings=category_headlines, url=url)
@app.route('/edit/<int:id>/')
def edit_single(id):
# user can only access his own ID here
if 'dataset_id' not in session:
abort(401)
if session['dataset_id'] != id:
abort(401)
entry = Usercreated.query.get(id)
i18n_strings = extract_i18n_keys(i18n, r'category_headline.*')
i18n_strings.update(extract_i18n_keys(i18n, r'sharing_.*'))
return render_template('edit.html', id=id, entry=entry.to_dict(), i18n_strings=i18n_strings)
@app.route('/edit/', methods=['POST'])
def edit():
"""
This view function receives the user-created model
via POST and redirects the user to finalize the
sharing process. The user's dataset's ID is stored
in the session.
"""
anonymized_ip = anonymize_ip(request.remote_addr)
if 'id' not in request.form:
# first save
id = add_usercreated_entry(request.form['data'], anonymized_ip)
create_preview_images(id)
session['dataset_id'] = id
return redirect(url_for('edit_single', id=id))
else:
# second/subsequent save (publishing/sharing)
id = session['dataset_id']
if id != int(request.form['id']):
# user is probably creating a second dataset
abort(401)
user_name = None
user_url = None
description = None
if 'description' in request.form:
if request.form['description'] != i18n['sharing_textfield_default'][session['lang']]:
description = request.form['description']
if 'user_name' in request.form:
if request.form['user_name'] != i18n['sharing_userfield_default'][session['lang']]:
user_name = request.form['user_name']
if 'user_url' in request.form:
user_url = request.form['user_url']
update_usercreated_entry(id=id,
user_name=user_name,
user_url=user_url,
description=description)
return redirect(url_for('details', id=id))
@app.route('/thumbs/<int:id>/<string:filename>')
def thumb(id, filename):
folder_path = indexpreview.get_folder_path(id, app.config['THUMBS_PATH'])
file_path = os.path.join(folder_path, filename)
if not os.access(file_path, os.F_OK):
create_preview_images(id)
return send_from_directory(folder_path, filename, mimetype="image/png")
def create_preview_images(id):
"""
This creates the preview image fora usercreated entry
"""
entry = get_usercreated_entries(id=id).to_dict()
weight_tuples = []
# weed through weights and use only those for main categories
# If the last character is a number, we skip the key.
weights = json.loads(entry['weights'])
for w in weights.keys():
if w[-1].isdigit():
continue
weight_tuples.append((w, weights[w]))
weight_tuples = sorted(weight_tuples, key=lambda item: item[0])
weight_values = []
for t in weight_tuples:
weight_values.append(t[1])
img = indexpreview.create_preview_image(weight_values)
indexpreview.save_image_versions(img, id, app.config['THUMBS_PATH'])
def get_usercreated_entries(num=1, offset=0, id=None):
"""
Read a number of user-generated datasets from
the database
"""
if id is not None:
entry = Usercreated.query.get(id)
return entry
else:
entries = Usercreated.query.filter(Usercreated.user_name != None).order_by('id DESC').limit(num).offset(offset).all()
return entries
def add_usercreated_entry(json_data, ip):
"""
Writes the user-generated data to the database and returns the ID
"""
data = json.loads(json_data)
country = data['country']
version = data['version']
weights = {}
for key in data.keys():
# if key ends in _weight, it is used
keyparts = key.split('_')
if keyparts[len(keyparts) - 1] == 'weight':
weights[key] = data[key]
uc = Usercreated(weights, ip, country, version)
db.session.add(uc)
db.session.commit()
return uc.id
def update_usercreated_entry(id, user_name, user_url, description):
"""
Extends a previously created user-generated dataset
"""
entry = Usercreated.query.get(id)
entry.user_name = user_name
entry.user_url = user_url
entry.description = description
db.session.commit()
def import_series_metadata(path):
"""
Reads metadata information from JSON file
"""
try:
f = open(path)
except:
app.logger.error('File ' + path + ' could not be opened.')
return
raw = json.loads(f.read())
metadata = {}
for item in raw['hits']['hits']:
entry = {
'id': item['_source']['id'],
'label': {},
'description': {},
'type': item['_source']['type'],
'format': item['_source']['format'],
'source_url': None,
'icon': item['_source']['icon'],
'high_is_good': None
}
# read language-dependent strings
for key in item['_source'].keys():
if '@' in key:
(name, lang) = key.split('@')
entry[name][lang] = item['_source'][key]
# source URL
url_find = re.search(r'(http[s]*://.+)\b', item['_source']['source'])
if url_find is not None:
entry['source_url'] = url_find.group(1)
metadata[item['_source']['id']] = entry
return metadata
def import_i18n_strings(path):
"""
Read internationalization strings from CSV data source
and return it as a dict
"""
f = open(path)
reader = unicodecsv.reader(f, encoding='utf-8', delimiter=",",
quotechar='"', quoting=unicodecsv.QUOTE_MINIMAL)
rowcount = 0
i18n = {}
fieldnames = []
for row in reader:
if rowcount == 0:
fieldnames = row
else:
my_id = ''
my_strings = {}
fieldcount = 0
for field in row:
if fieldnames[fieldcount] == 'string_id':
my_id = field
else:
my_strings[fieldnames[fieldcount]] = field
fieldcount += 1
i18n[my_id] = my_strings
rowcount += 1
return i18n
def extract_i18n_keys(thedict, regex_pattern):
"""
This helper function extracts keys matching a given regex pattern
and return them as dict. This is useful for passing only part of
the i18n data structure to a view.
"""
ret = {}
for key in thedict:
match = re.match(regex_pattern, key)
if match is not None:
ret[key] = thedict[key]
return ret
def set_language():
"""
Sets the language according to what we support, what
the user agent says it supports and what the session
says it supports
"""
lang_url_param = request.args.get('lang', '')
if lang_url_param != '' and lang_url_param in app.config['LANG_PRIORITIES']:
lang = lang_url_param
elif 'lang' not in session:
lang = app.config['LANG_PRIORITIES'][0]
ua_languages = request.accept_languages
for user_lang, quality in ua_languages:
for offered_lang in app.config['LANG_PRIORITIES']:
if user_lang == offered_lang:
lang = offered_lang
else:
lang = session['lang']
session['lang'] = lang
def anonymize_ip(ip):
"""
Replace last of four number packets in an IP address by zero
"""
parts = ip.split('.')
parts[3] = '0'
return '.'.join(parts)
@app.template_filter('i18n')
def i18n_filter(s, lang="en"):
"""
Output the key in the user's selected language
"""
if s not in i18n:
app.logger.debug("Key is invalid: " + s)
return 'Key "' + s + '" is invalid'
if session['lang'] not in i18n[s]:
app.logger.debug("Key is not translated: " + s)
return 'Key "' + s + '" not available in "' + session['lang'] + '"'
if session['lang'] == '':
app.logger.debug("Key is empty: " + s)
return 'Key "' + s + '" in "' + session['lang'] + '" is empty'
#app.logger.debug(['i18n_filter', i18n[s][session['lang']]])
return Markup(i18n[s][session['lang']])
@app.template_filter('dateformat')
def dateformat_filter(s, format='%Y-%m-%d'):
"""
Output a date according to a given format
"""
if not isinstance(s, datetime.datetime):
try:
s = datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S.%f')
except:
s = datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
# strip off microseconds if they exist
s = s.replace(microsecond=0)
return Markup(s.strftime(format))
@app.template_filter()
@evalcontextfilter
def nl2br(eval_ctx, value):
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
@app.template_filter('urlencode')
def urlencode_filter(s):
if type(s) == 'Markup':
s = s.unescape()
s = s.encode('utf8')
s = urllib.quote_plus(s)
#app.logger.debug(['urlencode', s])
return Markup(s)
#@app.teardown_request
#def teardown_request(exception):
# if hasattr(g, 'db'):
# g.db.close()
db.create_all()
app.before_request(set_language)
app.secret_key = 'A0ZrhkdsjhkjlksgnkjnsdgkjnmN]LWX/,?RT'
i18n = import_i18n_strings(app.config['I18N_STRINGS_PATH'])
metadata = import_series_metadata(app.config['METADATA_PATH'])
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'], host=app.config['HOST'],
port=app.config['PORT'])
|
# This example describe how to integrate ODEs with scipy.integrate module, and how
# to use the matplotlib module to plot trajectories, direction fields and other
# useful information.
#
# == Presentation of the Lokta-Volterra Model ==
#
# We will have a look at the Lokta-Volterra model, also known as the
# predator-prey equations, which are a pair of first order, non-linear, differential
# equations frequently used to describe the dynamics of biological systems in
# which two species interact, one a predator and one its prey. They were proposed
# independently by Alfred J. Lotka in 1925 and Vito Volterra in 1926:
# du/dt = a*u - b*u*v
# dv/dt = -c*v + d*b*u*v
#
# with the following notations:
#
# * u: number of preys (for example, rabbits)
#
# * v: number of predators (for example, foxes)
#
# * a, b, c, d are constant parameters defining the behavior of the population:
#
# + a is the natural growing rate of rabbits, when there's no fox
#
# + b is the natural dying rate of rabbits, due to predation
#
# + c is the natural dying rate of fox, when there's no rabbit
#
# + d is the factor describing how many caught rabbits let create a new fox
#
# We will use X=[u, v] to describe the state of both populations.
#
# Definition of the equations:
#
from numpy import *
import numpy
import matplotlib.pylab as pylab
from matplotlib.widgets import Slider, Button, RadioButtons
from scipy import integrate
# Definition of parameters
# a is the natural growing rate of rabbits, when there's no fox
# b is the natural dying rate of rabbits, due to predation
# c is the natural dying rate of fox, when there's no rabbit
# d is the factor describing how many caught rabbits let create a new fox
a = 1.
b = 0.1
c = 1.5
d = 0.75
def dX_dt(X,t, a, b, c, d):
""" Return the growth rate of fox and rabbit populations. """
return array([ a*X[0] - b*X[0]*X[1] ,
-c*X[1] + d*b*X[0]*X[1] ])
def d2X_dt2(X, t, a, b, c, d):
""" Return the Jacobian matrix evaluated in X. """
return array([[a -b*X[1], -b*X[0] ],
[b*d*X[1] , -c +b*d*X[0]] ])
#
# === Population equilibrium ===
#
# Before using !SciPy to integrate this system, we will have a closer look on
# position equilibrium. Equilibrium occurs when the growth rate is equal to 0.
# This gives two fixed points:
#
X_f0 = array([ 0. , 0.])
X_f1 = array([c / (d * b), a / b])
all(dX_dt(X_f0, 0, a, b, c, d) == zeros(2)) and all(dX_dt(X_f1, 0, a, b, c, d) == zeros(2)) # => True
#
# === Stability of the fixed points ===
# Near theses two points, the system can be linearized:
# dX_dt = A_f*X where A is the Jacobian matrix evaluated at the corresponding point.
# We have to define the Jacobian matrix:
#
#
# So, near X_f0, which represents the extinction of both species, we have:
# A_f0 = d2X_dt2(X_f0) # >>> array([[ 1. , -0. ],
# # [ 0. , -1.5]])
#
# Near X_f0, the number of rabbits increase and the population of foxes decrease.
# The origin is a [http://en.wikipedia.org/wiki/Saddle_point saddle point].
#
# Near X_f1, we have:
A_f1 = d2X_dt2(X_f1, 0, a, b, c, d)
# whose eigenvalues are +/- sqrt(c*a).j:
lambda1, lambda2 = linalg.eigvals(A_f1) # >>> (1.22474j, -1.22474j)
# They are imaginary number, so the fox and rabbit populations are periodic and
# their period is given by:
T_f1 = 2*pi/abs(lambda1) # >>> 5.130199
#
# == Integrating the ODE using scipy.integate ==
#
# Now we will use the scipy.integrate module to integrate the ODEs.
# This module offers a method named odeint, very easy to use to integrate ODEs:
#
t = linspace(0, 15, 1000) # time
X0 = array([10, 5]) # initials conditions: 10 rabbits and 5 foxes
X, infodict = integrate.odeint(dX_dt, X0, args=(a, b, c, d), t=t, full_output=True)
infodict['message'] # >>> 'Integration successful.'
#
# `infodict` is optional, and you can omit the `full_output` argument if you don't want it.
# Type "info(odeint)" if you want more information about odeint inputs and outputs.
#
# We can now use Matplotlib to plot the evolution of both populations:
#
rabbits, foxes = X.T
f1, ax = pylab.subplots()
pylab.subplots_adjust(left=0.15, bottom=0.55)
plot1, = pylab.plot(t, rabbits, lw=2, color='red', label='Ofiary')
plot2, = pylab.plot(t, foxes, lw=2, color='blue', label='Drapieżniki')
pylab.grid()
pylab.legend(loc='best')
pylab.xlabel('Czas')
pylab.ylabel('Populacja')
pylab.title('Model Lotka-Volterra')
pylab.axis([0, 15, 0, 100])
axcolor = 'lightgoldenrodyellow'
axa = pylab.axes([0.1, 0.35, 0.75, 0.03], facecolor=axcolor, title='Współczynnik narodzin ofiar')
axb = pylab.axes([0.1, 0.25, 0.75, 0.03], facecolor=axcolor, title='Współczynnik śmierci ofiar')
axc = pylab.axes([0.1, 0.15, 0.75, 0.03], facecolor=axcolor, title='Współczynnik śmierci drapieżników')
axd = pylab.axes([0.1, 0.05, 0.75, 0.03], facecolor=axcolor, title='Efektywność uśmiercania ofiar')
sa = Slider(axa, 'a', 0.1, 10.0, valinit=1.)
sb = Slider(axb, 'b', 0.01, 1.0, valinit=0.1)
sc = Slider(axc, 'c', 0.1, 10.0, valinit=1.5)
sd = Slider(axd, 'd', 0.1, 10.0, valinit=0.75)
def update(val):
a = sa.val
b = sb.val
c = sc.val
d = sd.val
X, infodict = integrate.odeint(dX_dt, X0, args=(a, b, c, d), t=t, full_output=True)
rabbits, foxes = X.T
plot1.set_ydata(rabbits)
plot2.set_ydata(foxes)
f1.canvas.draw_idle()
sa.on_changed(update)
sb.on_changed(update)
sc.on_changed(update)
sd.on_changed(update)
pylab.show()
f1.savefig('rabbits_foxes.png')
#
#
# The populations are indeed periodic, and their period is near to the T_f1 we calculated.
#
|
import sys
sys.path.append('Config/')
import config
from abc import ABCMeta, abstractmethod
import threading, time, logging, datetime, json
import gspread, cls_GSS
from oauth2client.service_account import ServiceAccountCredentials
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class Keeper:
def __init__(self,input):
pass
@abstractmethod
def insert_gs(self):
pass
class GSKeeper(Keeper):
def __init__(self, inp):
self.config = inp
def gss(self):
return cls_GSS.GoogleSpeadSheet(self.config.config_info_gs)
class MEKeeper(Keeper):
pass
# if __name__ == '__main__':
# input = {
# 'fileId':'',
# 'app_key_path':'',
# } |
import csv;
numbers = """Zaika:+919116666156
Yo Zing:+917983653992
ChaapHut:+919950699999
Tea Tradition: +917340000547
Tandoor:+911416530007
Saras:+917357549601
Login:+919116666156
Let's Go Live:+917742603072
Kebab Nation:+919983087222
HealthBar:+917073991323
Dev Sweets and Snacks:+919001641663
Delight:+917240422018
Cruncheezz:+917665423182
Crazy Chef:+919521099336
Chilling point:+919351516665
Chatkara:+917983653992
Cafe Dialog:+917229906333"""
for number in numbers.split('\n'):
print(number)
k,v = number.split(':')
with open('./day.csv', 'a', newline = '') as day:
writer = csv.writer(day, delimiter = ',')
writer.writerow([k,v])
|
from timer import slee
from multiprocessing import Pool
def start_function_for_processing(n):
sleep(0.5)
result_sent_back_to_parent = n * n
return result_sent_back_to_parent
if __name__ == '__main__':
with Pool(process=5) as p:
results = p.map(start_function_for_processing, range(200), chunksize=10)
print results |
import torch
import torch.nn as nn
import matplotlib
matplotlib.use('agg')
############################################################################
# Helper Utilities
############################################################################
def weights_init_normal(m):
# Set initial state of weights
classname = m.__class__.__name__
if hasattr(m, 'no_init'):
print(f'Skipping Init on Pre-trained:{classname}')
else:
if 'ConvTrans' == classname:
pass
elif 'Linear' in classname:
#TODO - CHECK IF ORTHAGONAL IS BETTER
nn.init.kaiming_normal(m.weight.data)
elif 'Conv2d' in classname or 'ConvTrans' in classname:
nn.init.kaiming_normal(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def weights_init_icnr(m):
# Apply ICNR to our PreShuffConv modules
classname = m.__class__.__name__
if 'PreShuffConv' in classname:
print(m.__class__.__name__)
m.init_icnr()
print('new icnr init')
def mft(tensor):
# Return mean float tensor #
return torch.mean(torch.FloatTensor(tensor))
|
"""analyze.py:
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2017-, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def main():
df = pd.read_csv('./article_ephys_metadata_curated.csv', sep='\t')
cols = list(df.columns)
print(sorted(cols))
print( [ x for x in cols if 'volt' in x] )
for i, x in enumerate(['tau']):
plt.subplot(3, 3, i+1)
x = df[x]
x = x.dropna()
plt.hist(x, bins=20)
plt.title(f'mean={np.mean(x):.2f}', fontsize=10)
plt.show()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Oriëntatie op AI
Opgave: recursie
(c) 2019 Hogeschool Utrecht
Tijmen Muller (tijmen.muller@hu.nl)
Let op! Je mag voor deze opgave geen extra modules importeren met 'import'.
"""
def faculteit(n):
""" Bereken n! op recursieve wijze. """
# Base case
if n == 0:
return 1
# Recursie
else:
return faculteit(0)
def exponent(n):
""" Bereken 2^n op recursieve wijze. """
# Base case
# Recursie
return n
def som(lst):
""" Bereken de som van alle elementen van gegeven lijst lst op recursieve wijze. """
return 1
def palindroom(woord):
""" Bepaal of gegeven woord (str) een palindroom is op recursieve wijze. """
return False
"""
==========================[ HU TESTRAAMWERK ]================================
Onderstaand staan de tests voor je code -- hieronder mag je niets wijzigen!
Je kunt je code testen door deze file te runnen of met behulp van pytest.
"""
import math
import random
def test_faculteit():
for i in range(6):
assert faculteit(i) == math.factorial(i), \
f"Fout: faculteit({i}) geeft {faculteit(i)} in plaats van {math.factorial(i)}"
def test_exponent():
for i in range(10):
assert exponent(i) == 2**i, \
f"Fout: exponent({i}) geeft {exponent(i)} in plaats van {2**i}"
def test_som():
for i in range(6):
lst_test = random.sample(range(-10, 11), i)
assert som(lst_test) == sum(lst_test), \
f"Fout: som({lst_test}) geeft {som(lst_test),} in plaats van {sum(lst_test)}"
def test_palindroom():
testcases = [
("", True),
("radar", True),
("maandnaam", True),
("pollepel", False),
("Maandnaam", False)
]
for testcase, res in testcases:
assert palindroom(testcase) is res, \
f"Fout: palindroom({testcase}) geeft {palindroom(testcase)} in plaats van {res}"
if __name__ == '__main__':
try:
print("\x1b[0;32m")
test_faculteit()
print("Je functie faculteit() doorstaat de tests!")
test_exponent()
print("Je functie exponent() doorstaat de tests!")
test_som()
print("Je functie som() doorstaat de tests!")
test_palindroom()
print("Je functie palindroom() doorstaat de tests!")
print("\x1b[0;30m")
x = input("Geef een woord: ")
print(f"'{x}' is {'' if palindroom(x) else 'g'}een palindroom!")
except AssertionError as ae:
print("\x1b[0;31m")
print(ae)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
import ma.views.user
import ma.views.driver
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'jiaoke.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^user_register$', ma.views.user.userRegister),
url(r'^user_login$', ma.views.user.userLogin),
url(r'^user_logout$', ma.views.user.userLogout),
url(r'^user_get_info$', ma.views.user.userGetInfo),
#driver
url(r'^driver/update_location$', ma.views.driver.driverUpdateLocation),
url(r'^driver/get_location$',ma.views.driver.driverGetLocation),
url(r'^driver/get_order$',ma.views.driver.driverGetOrder),
url(r'^driver/update_order$',ma.views.driver.driverUpdateOrder),
)
|
# Fibonacci數列f1,f2,….,fn ,f1 =1; f2 =1;當n>2時,fn = fn-1+fn-2 ;設計一程式輸入
# 一整數n(1<n<100),找出fn 。(請上傳 Fibonacci.py)
n = int(input("輸入n:"))
f=[1,1]
for i in range(2,n):
f.append(f[i-1] + f[i-2])
print(f[n-1]) |
s = [int(_) for _ in list(input())]
for i in range(len(s)):
if 9-s[i] < s[i]:
if (i == 0 and 9-s[i] == 0):
continue
s[i] = 9-s[i]
print(int(''.join([str(_) for _ in s])))
|
import json
import math
import Functions
with open('urls.json', 'r') as file_handle:
links = json.load(file_handle)
with open('inverted_index.json', 'r') as file_handle:
inverted_index = json.load(file_handle)
doc_count = len(links)
lemmas = {}
for filename in links:
print(filename)
f = open(filename + 'lemmatized' + '.txt', 'r')
text = f.read()
f.close()
lemmas_in_file = text.split()
lemmas[filename] = lemmas_in_file
tokens_tf = {}
for filename in lemmas:
print("calculate TF for " + filename + "/" + str(doc_count))
for lemma in lemmas[filename]:
if lemma not in tokens_tf:
tokens_tf[lemma] = {}
for filename_temp in links:
tokens_tf[lemma][filename_temp] = 0
tokens_tf[lemma][filename] += 1
Functions.write_to_file_results(tokens_tf, "TF", "{}")
tokens_df = {}
print("calculate DF")
for lemma in inverted_index:
if lemma not in tokens_df:
tokens_df[lemma] = 0
for index, value in enumerate(inverted_index[lemma]):
tokens_df[lemma] += value
Functions.write_to_file_results(tokens_df, "DF", "{}", for_each_document=False)
tokens_idf = {}
print("calculate IDF")
for lemma in inverted_index:
tokens_idf[lemma] = math.log(doc_count / tokens_df[lemma])
Functions.write_to_file_results(tokens_idf, "IDF", "{:.4f}", for_each_document=False)
tf_idf = {}
print("calculate TF*IDF")
index = 1
print(tokens_tf)
for lemma in tokens_tf:
for filename in tokens_tf[lemma]:
print(str(index))
print(lemma)
index += 1
if lemma not in tf_idf:
tf_idf[lemma] = []
if lemma != 'наиль':
tf_idf[lemma].append(tokens_tf[lemma][filename] * tokens_idf[lemma])
Functions.write_to_file_tf_idf(tf_idf)
|
from flask import Flask, render_template, request, redirect
from flask_table import Table, Col
from PyDictionary import PyDictionary
import json
import time
import sys
import sqlite3 as sql
import random
from mw_get_def import ProcessWords
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/enternew', methods=['POST','GET'])
def new_term():
if request.method == 'POST':
if request.form['type']=="a":
if request.form['term']=="":
msg = "** Please enter a term to be defined, jackass. **"
elif request.form['defin']=="":
term = request.form['term']
term_list = []
term_list.append(term)
ProcessWords(term_list)
msg="Term Added."
else:
term = request.form['term']
pos = request.form['pos']
defin = request.form['defin']
if defin:
con = sql.connect("vocab.db")
cur = con.cursor()
cur.execute("select max(id) from defined")
row = cur.fetchone()
next_id = int(row[0]) + 1
cur.execute("INSERT INTO defined (id, term,part_of_speech,definition, quizzed, correct2, date_added) VALUES (?,?,?,?,0,0,datetime(\"now\"))",
(next_id, term, pos, defin))
con.commit()
msg = "** Record for "+term+" as a " + pos + " successfully added to definition list. **"
con.close()
return render_template('new_term2.html', msg=msg)
elif request.form['type']=="b":
dictionary=PyDictionary()
undefs = []
con = sql.connect('vocab.db')
cur = con.cursor()
term_list = request.form['term_list'].splitlines()
ProcessWords(term_list)
msg = "Second submit (B) returned"
return render_template('new_term2.html', msg=msg)
else:
term_file = request.form["term_file"]
msg = "Third submit (C) returned. File was "+term_file
word_list = open(term_file, 'r').read().splitlines()
ProcessWords(word_list)
return render_template('new_term2.html', msg=msg)
else:
return render_template('new_term2.html')
@app.route('/editterm/<termid>/')
def edit_term(termid):
con = sql.connect("vocab.db")
cur = con.cursor()
# termid = int(termid)
cur.execute("select * from defined where id = ?", (termid,))
row = cur.fetchone()
#print(row, file=sys.stderr)
term = row[1]
pos = row[2]
defin = row[3]
return render_template('edit_term.html', termid=termid, term=term, pos=pos, defin=defin)
@app.route('/editnewterm/<term>/')
def edit_newterm(term):
con = sql.connect("vocab.db")
cur = con.cursor()
#term = request.args.get("term")
cur.execute("select max(id) from defined")
row = cur.fetchone()
next_id = int(row[0]) + 1
cur.execute("insert into defined (id, term)values(?,?)", (next_id, term))
con.commit()
return render_template('edit_term.html', termid=next_id, term=term)
@app.route('/addrec', methods=['POST', 'GET'])
# @app.route('/addrec/<termid>/')
def addrec():
if request.method == 'POST':
try:
term = request.form['term']
pos = request.form['pos']
defin = request.form['defin']
with sql.connect("vocab.db") as con:
cur = con.cursor()
cur.execute("select max(id) from defined")
row = cur.fetchone()
next_id = int(row[0]) + 1
cur.execute(
"INSERT INTO defined (id, term,part_of_speech,definition, quizzed, correct, date_added) VALUES (?,?,?,?,0,0,datetime(\"now\"))",
(next_id, term, pos, defin))
con.commit()
msg = "Record successfully added to definition list."
con.commit()
except:
con.rollback()
msg = "error in insert operation"
finally:
return render_template("result.html", msg=msg)
con.close()
@app.route('/reporting')
def reporting():
con = sql.connect("vocab.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select round(sum(correct)*100.0/count(correct)*1.0,1), count(correct) from quizquestion")
row = cur.fetchone();
oall = str(row[0]) + "% correct on " + str(row[1]) + " questions."
cur.execute(
'select term, sum(correct) as cor, count(correct) as tot, sum(correct)*1.0/count(correct)*1.0 as pct from quizquestion, defined where term_id = id group by term having pct < 1.0 order by pct, tot desc')
rows = cur.fetchall();
con.close()
return render_template("reporting.html", oall=oall, rows=rows)
@app.route('/editrec', methods=['POST', 'GET'])
def editrec():
msg = "1=1"
termid = request.form['termid']
term = request.form['term']
pos = request.form['pos']
defin = request.form['defin']
con = sql.connect("vocab.db")
cur = con.cursor()
cur.execute("Update defined set definition = ?, term = ?, part_of_speech = ? WHERE id = ?", (defin, term, pos, termid))
con.commit()
msg = "Definition successfully updated"
return render_template("result.html", msg=msg)
con.close()
@app.route('/defined', methods=['POST','GET'])
@app.route('/defined/<alpha>')
def deflist(alpha=None):
con = sql.connect("vocab.db")
con.row_factory = sql.Row
cur = con.cursor()
if (alpha is None):
if request.method == 'POST':
srch_term = request.form['srch_term']
qry = "select * from defined where term like('%"
qry = qry + srch_term + "%') order by term"
cur.execute(qry)
else:
cur.execute("select * from defined order by term")
else:
alpha = alpha + "%"
qry = "select * from defined where term like('"
qry = qry + alpha + "%') order by term"
cur.execute(qry)
rows = cur.fetchall()
num_words = len(rows)
return render_template("list.html", rows=rows, num_words=num_words)
@app.route('/undefined')
def udeflist():
con = sql.connect("vocab.db")
con.execute("delete from undefined where term IN(select distinct term from defined)")
con.commit()
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from undefined order by term")
rows = cur.fetchall();
num_words = len(rows)
return render_template("list2.html", rows=rows, num_words=num_words)
@app.route('/flashcard')
def flashcard():
con = sql.connect("vocab.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select * from defined order by random() limit 1")
row = cur.fetchone()
return render_template("cardfront.html", row=row)
@app.route('/edit', defaults={'term': "xxxxxxxxxx", 'pos': "xxxxxxxxxx"})
@app.route('/edit/<term>/<pos>/')
def edit(term, pos):
if term != 'xxxxxxxxxx' and pos != 'xxxxxxxxxx':
con = sql.connect("vocab.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select definition from defined where term = ? and part_of_speech = ?", (term, pos))
row = cur.fetchone();
defin = str(row["definition"])
return render_template("edit_term.html", term=term, pos=pos, defin=defin)
else:
return deflist()
@app.route('/quiz', defaults={'nq': 10}, methods=['POST', 'GET'])
@app.route('/quiz/<nq>/', methods=['POST', 'GET'])
def quiz(nq):
if request.method == 'GET':
nq = int(nq)
con = sql.connect("vocab.db")
cur = con.cursor()
cur.execute("insert into quiz(q_num) values (?)", [nq])
quiz_id = cur.lastrowid
con.commit()
quizd = {}
for z in range(nq):
## instantiate a single question
con = sql.connect("vocab.db")
con.row_factory = sql.Row
cur = con.cursor()
# favor words that have not yet been quizzed by a certain percentage
new_favor_factor = cur.execute("select favor_new from config")
new_favor_factor = new_favor_factor.fetchone()[0]
if random.randint(1, 100) < new_favor_factor:
## get all term ids that are not in quizquestions
x = "select * from defined where id not in (select distinct term_id from quizquestion) order by random() limit 1"
else:
got_wrong_factor = cur.execute("select favor_wrong from config")
got_wrong_factor = got_wrong_factor.fetchone()[0]
if random.randint(1, 100) > got_wrong_factor:
x = "select * from defined where id in (select distinct term_id from quizquestion) order by random() limit 1"
else:
x = "select * from defined where id in (select term_id from quizquestion group by term_id having sum(correct)/count(correct)< 1) order by random() limit 1"
cur.execute(x)
row = cur.fetchone()
termid = str(row["id"])
term = str(row["term"])
pos = str(row["part_of_speech"])
correct = (str(row["definition"]).capitalize())
#print(termid, term, pos, correct)
## Get distractors
cur.execute(
"select definition, id from defined where id != ? and part_of_speech = ? order by random() limit 3",
(termid, pos))
rows = cur.fetchall()
options = {correct: 1}
for i in range(0, 3):
options[str(rows[i][0])] = 0
# shuffle
keys = list(options.keys())
random.shuffle(keys)
x = [(key, options[key]) for key in keys]
x = [list(i) for i in x]
x[0].insert(0, 'A')
x[1].insert(0, 'B')
x[2].insert(0, 'C')
x[3].insert(0, 'D')
x.append(termid)
quizd[(term)] = x
#print(quizd)
return render_template("quiz.html", quizd=quizd, quiz_id=quiz_id)
else:
#print(request.form)
quiz_id = request.form['quiz_id']
con = sql.connect("vocab.db")
con.row_factory = sql.Row
cur = con.cursor()
for x in request.form:
if x != 'quiz_id':
if request.form[x] == 'NULL':
cur.execute("insert into quizquestion(quiz_id, term_id, correct) values(?,?,1)", (quiz_id, x))
con.commit()
else:
cur.execute("insert into quizquestion(quiz_id, term_id, correct, guess) values(?,?,0,?)", (quiz_id, x, request.form[x]))
con.commit()
cur.execute("select defined.id, term from defined, quizquestion where defined.id = quizquestion.term_id and quiz_id = ? and quizquestion.correct = 1 order by term", (quiz_id,))
rows_r = cur.fetchall()
nr = len(rows_r)
cur.execute("select defined.id, term, defined.definition, guess from defined, quizquestion where defined.id = quizquestion.term_id and quiz_id = ? and quizquestion.correct = 0 order by term", (quiz_id,))
rows_w = cur.fetchall()
nw = len(rows_w)
pct_r = round(nr*100/(nr + nw))
return render_template("quiz_results.html", rows_r = rows_r, rows_w = rows_w, quiz_id = quiz_id, pct_r = pct_r)
@app.route('/del_udef', methods=['POST', 'GET'])
@app.route('/del_udef', methods=['POST', 'GET'])
def delete_udef():
term = request.args.get("term")
con = sql.connect("vocab.db")
cur = con.cursor()
cur.execute("delete from undefined where term = ?", (term,))
con.commit()
return udeflist()
@app.route('/del_def', methods=['POST', 'GET'])
def delete_term():
termid = request.args.get("termid")
con = sql.connect("vocab.db")
cur = con.cursor()
cur.execute("delete from defined where id = ?", (termid,))
con.commit()
return deflist()
if __name__ == '__main__':
app.run(debug=True)
|
def read_sudoku(filename: str) -> List[List[str]]:
""" Прочитать Судоку из указанного файла """
with open(filename) as f:
content = f.read()
digits = [c for c in content if c in '123456789.']
grid = group(digits, 9)
return grid
def display(grid: List[List[str]]) -> None:
"""Вывод Судоку """
width = 2
line = '+'.join(['-' * (width * 3)] * 3)
for row in range(9):
print(''.join(grid[row][col].center(width) + ('|' if str(col) in '25' else '') for col in range(9)))
if str(row) in '25':
print(line)
print()
def group(values: List[str], n: int) -> List[List[str]]:
"""
Сгруппировать значения values в список, состоящий из списков по n элементов
>>> group([1,2,3,4], 2)
[[1, 2], [3, 4]]
>>> group([1,2,3,4,5,6,7,8,9], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
"""
return [row[i:i+n] for i in range(0,len(row), n)]
def get_row(grid: List[List[str]], pos: Tuple[int, int]) -> List[str]:
""" Возвращает все значения для номера строки, указанной в pos
>>> get_row([['1', '2', '.'], ['4', '5', '6'], ['7', '8', '9']], (0, 0))
['1', '2', '.']
>>> get_row([['1', '2', '3'], ['4', '.', '6'], ['7', '8', '9']], (1, 0))
['4', '.', '6']
>>> get_row([['1', '2', '3'], ['4', '5', '6'], ['.', '8', '9']], (2, 0))
['.', '8', '9']
"""
return grid[pos[0]]
def get_col(grid: List[List[str]], pos: Tuple[int, int]) -> List[str]:
""" Возвращает все значения для номера столбца, указанного в pos
>>> get_col([['1', '2', '.'], ['4', '5', '6'], ['7', '8', '9']], (0, 0))
['1', '4', '7']
>>> get_col([['1', '2', '3'], ['4', '.', '6'], ['7', '8', '9']], (0, 1))
['2', '.', '8']
>>> get_col([['1', '2', '3'], ['4', '5', '6'], ['.', '8', '9']], (0, 2))
['3', '6', '9']
"""
return [grid[i][pos[1]] for i in range(len(grid))]
def get_block(grid: List[List[str]], pos: Tuple[int, int]) -> List[str]:
""" Возвращает все значения из квадрата, в который попадает позиция pos
>>> grid = read_sudoku('puzzle1.txt')
>>> get_block(grid, (0, 1))
['5', '3', '.', '6', '.', '.', '.', '9', '8']
>>> get_block(grid, (4, 7))
['.', '.', '3', '.', '.', '1', '.', '.', '6']
>>> get_block(grid, (8, 8))
['2', '8', '.', '.', '.', '5', '.', '7', '9']
"""
lc = (pos[0]//3 *3, pos[1]//3*3) #левый верхний угол
return [grid[i][j] for i in range(lc[0],lc[0]+3)
for j in range(lc[1],lc[1]+3)]
def find_empty_positions(grid: List[List[str]]) -> Optional[Tuple[int, int]]:
""" Найти первую свободную позицию в пазле
>>> find_empty_positions([['1', '2', '.'], ['4', '5', '6'], ['7', '8', '9']])
(0, 2)
>>> find_empty_positions([['1', '2', '3'], ['4', '.', '6'], ['7', '8', '9']])
(1, 1)
>>> find_empty_positions([['1', '2', '3'], ['4', '5', '6'], ['.', '8', '9']])
(2, 0)
"""
L = len(grid)
for i in range(L):
for j in range(L):
if grid[i][j] == '.':
return (i, j)
return (-1, -1)
def find_possible_values(grid: List[List[str]], pos: Tuple[int, int]) -> Set[str]:
""" Вернуть множество всех возможных значения для указанной позиции
>>> grid = read_sudoku('puzzles/puzzle1.txt')
>>> values = find_possible_values(grid, (0,2))
>>> set(values) == {'1', '2', '4'}
True
>>> values = find_possible_values(grid, (4,7))
>>> set(values) == {'2', '5', '9'}
True
"""
s = set(map(str, range(1,10)))
return (s - set(get_col(grid,pos) + get_row(grid, pos) + get_block(grid,pos)))
def solve(grid: List[List[str]]) -> Optional[List[List[str]]]:
""" Решение пазла, заданного в grid
Как решать Судоку?
1. Найти свободную позицию
2. Найти все возможные значения, которые могут находиться на этой позиции
3. Для каждого возможного значения:
3.1. Поместить это значение на эту позицию
3.2. Продолжить решать оставшуюся часть пазла
>>> grid = read_sudoku('puzzle1.txt')
>>> solve(grid)
[['5', '3', '4', '6', '7', '8', '9', '1', '2'], ['6', '7', '2', '1', '9', '5', '3', '4', '8'], ['1', '9', '8', '3', '4', '2', '5', '6', '7'], ['8', '5', '9', '7', '6', '1', '4', '2', '3'], ['4', '2', '6', '8', '5', '3', '7', '9', '1'], ['7', '1', '3', '9', '2', '4', '8', '5', '6'], ['9', '6', '1', '5', '3', '7', '2', '8', '4'], ['2', '8', '7', '4', '1', '9', '6', '3', '5'], ['3', '4', '5', '2', '8', '6', '1', '7', '9']]
"""
i, j = find_empty_positions(grid)
if i == -1:
return grid
pv = find_possible_values(grid, (i, j)) #possible values
for v in pv:
grid[i][j] = v
solution = solve(grid)
if solution is not None:
return solution
grid[i][j] = '.'
def check_solution(solution: List[List[str]]) -> bool:
""" Если решение solution верно, то вернуть True, в противном случае False """
return all(not find_possible_values(solution, (i, j))
for i in range(9) for j in range(9))
import random
from typing import Tuple, List, Set, Optional
def generate_sudoku(N: int) -> List[List[str]]:
""" Генерация судоку заполненного на N элементов
>>> grid = generate_sudoku(40)
>>> sum(1 for row in grid for e in row if e == '.')
41
>>> solution = solve(grid)
>>> check_solution(solution)
True
>>> grid = generate_sudoku(1000)
>>> sum(1 for row in grid for e in row if e == '.')
0
>>> solution = solve(grid)
>>> check_solution(solution)
True
>>> grid = generate_sudoku(0)
>>> sum(1 for row in grid for e in row if e == '.')
81
>>> solution = solve(grid)
>>> check_solution(solution)
True
"""
def generate_sudoku(N):
s = set(map(str, range(1,10)))
field = [['.']*9] for i in range(9)]
if N <= 0:
return field #возвращаем пустое поле
field[0] = random.sample(s, 9)
solve(field)
if N>81:
return field
else:
spaces = random.sample(range(81), 81 - N)
for sp in spaces:
field[sp // 9][sp % 9] = '.'
return field
|
#
# @lc app=leetcode.cn id=26 lang=python3
#
# [26] 删除排序数组中的重复项
#
# @lc code=start
class Solution:
# * 一次循环,统计重复的数字个数n,然后将下一个数字往前移动n个位置。
# 36ms 98% 86% 14.3MB
def removeDuplicates1(self, nums: List[int]) -> int:
duplicate_count = 0
for i in range(1,len(nums)):
if(nums[i-1] == nums[i]):
duplicate_count += 1
nums[i-duplicate_count] = nums[i]
return len(nums) - duplicate_count
# * 统计不重复的元素个数n,然后将下一个元素的位置移动到n
# 40ms 96% 66% 14.4MB
def removeDuplicates2(self, nums: List[int]) -> int:
unique_count = 0
for i in range(1,len(nums)):
if nums[i-1] != nums[i]:
unique_count += 1
nums[unique_count] = nums[i]
return unique_count+1
# * 参照官方题解,双指针法,指针i遍历数组,指针j指向当前不重复的值,nums[i] != nums[j]时,nums[++j]=nums[a]
# 与上面两个算法复杂度一样
def removeDuplicates(self, nums: List[int]) -> int:
j = 0
for i in range(1,len(nums)):
if nums[j] != nums[i]:
j += 1
nums[j] = nums[i]
return j + 1
# @lc code=end
|
import pyodbc
import connections as conn
cursor_new = conn.conn_new.cursor()
cursor_old = conn.conn_old.cursor()
oldMakeUp = cursor_old.execute('SELECT M.Makeup_ID, M.Makeup_Category_ID, M.Makeup_Brand_ID, M.Makeup_Attribute_ID, M.Makeup_Name, M.Makeup_Volume, M.ASIN, MA.Makeup_Attribute_Name, MB.Makeup_Brand_Name, MC.Makeup_Category_Name FROM Makeup AS M JOIN Makeup_Attributes AS MA on M.Makeup_Attribute_ID = MA.Makeup_Attribute_ID JOIN Makeup_Brands AS MB on M.Makeup_Brand_ID = MB.Makeup_Brand_ID JOIN Makeup_Categories AS MC on M.Makeup_Category_ID = MC.Makeup_Category_ID')
count = 0
for row in oldMakeUp:
MK_Attribute_ID = 0
Attribute = cursor_new.execute('SELECT Makeup_Attribute_ID FROM Makeup_Attributes Where Makeup_Attribute_Name = ?', row[7])
for Attributerow in Attribute:
MK_Attribute_ID = Attributerow[0]
if(MK_Attribute_ID == 0):
cursor_new.execute('Insert Into Makeup_Attributes Values(?)', row[7])
Attribute = cursor_new.execute(
'SELECT Makeup_Attribute_ID FROM Makeup_Attributes Where Makeup_Attribute_Name = ?', row[7])
for Attributerow in Attribute:
MK_Attribute_ID = Attributerow[0]
MK_Cat_ID = 0
Category = cursor_new.execute('SELECT Cat_ID FROM Category Where Cat_Name = ?', row[7])
for Categoryrow in Category:
MK_Cat_ID = Categoryrow[0]
if(MK_Cat_ID == 0):
cursor_new.execute('Insert Into Category Values(3,?)', row[9])
Category = cursor_new.execute(
'SELECT Cat_ID FROM Category Where Cat_Name = ?', row[9])
for Categoryrow in Category:
MK_Cat_ID = Categoryrow[0]
Mk_Brand_ID = 0
Brand = cursor_new.execute('SELECT Brand_ID FROM Brand Where Brand_Name = ?', row[8])
for Brandrow in Brand:
Mk_Brand_ID = Brandrow[0]
if(Mk_Brand_ID == 0):
cursor_new.execute('Insert Into Brand Values(?)', row[8])
Brand = cursor_new.execute(
'SELECT Brand_ID FROM Brand Where Brand_Name = ?', row[8])
for Brandrow in Brand:
Mk_Brand_ID = Brandrow[0]
cursor_new.execute('Insert Into Product_Info (Prod_SKU, Dept_ID, Cat_ID, Brand_ID, Makeup_Attribute_ID, Prod_Name, Prod_ASIN, Prod_Volume) Values (?,3,?,?,?,?,?,?)',
row[0], MK_Cat_ID, Mk_Brand_ID, MK_Attribute_ID, row[4], row[6], row[5])
count = count + 1
conn.conn_new.commit()
print(str(count) + ' rows interted successfully!') |
import os
import onedrivesdk
from django.conf import settings
BASE_DIR = settings.BASE_DIR # Comment this line out when creating authentication without app running.
'''
Configurations for the Microsoft One Drive API. API tokens are provided for convenient
recreation of the web server. Please don't abuse the limits on the API.
'''
redirect_uri = 'https://localhost:8080/'
client_secret = 'MICROSOFT_ONE_DRIVE_CLIENT_SECRET'
client_id='MICROSOFT_ONE_DRIVE_CLIENT_ID'
api_base_url='https://api.onedrive.com/v1.0/'
scopes=['wl.signin', 'wl.offline_access', 'onedrive.readwrite']
http_provider = onedrivesdk.HttpProvider()
auth_provider = onedrivesdk.AuthProvider(
http_provider=http_provider,
client_id=client_id,
scopes=scopes)
'''
Upload a file onto the one drive sdk, loading a user's information
'''
def uploadFile(directory, filename, local_path):
client = load_authenticated_session()
root_folder = client.item(drive='me', id='root').children.get()
for item in root_folder:
if item.name == directory:
fileobj = client.item(drive='me', id=item.id).children[filename]
returned_item = fileobj.upload(local_path)
'''
Load the pickled credentials for a Microsoft One Drive Account from the application's folder
'''
def load_authenticated_session():
client = onedrivesdk.OneDriveClient(api_base_url, auth_provider, http_provider)
client.auth_provider.load_session(path=os.path.join(BASE_DIR, "api/controllers/tmp/onedrive_session.pickle"))
client.auth_provider.refresh_token()
return client
'''
Create and pickle credentials for a Microsoft One Drive account. Implements the Oauth2 protocol in a command
line interface, and saves the refresh token, so no need to reauthenticate the application.
'''
def create_authenticated_session():
client = onedrivesdk.OneDriveClient(api_base_url, auth_provider, http_provider)
auth_url = client.auth_provider.get_auth_url(redirect_uri)
# Ask for the code
print('Paste this URL into your browser, approve the app\'s access.')
print('Copy everything in the address bar after "code=", and paste it below. Put code in quotations.')
print(auth_url)
code = input('Paste code here: ')
client.auth_provider.authenticate(code, redirect_uri, client_secret)
client.auth_provider.save_session(path='./tmp/onedrive_session.pickle')
'''
Run this executable to generate a credentials file for the application to use. Comment out BASE_DIR
when running this script.
'''
if __name__ == "__main__":
create_authenticated_session()
|
from yourapplication import app
@app.route('/')
def index():
return 'Hello World!'
@app.route("/blog")
def blog():
return "This is the blog page"
|
import os.path
import numpy as np
from invoke import run
import argparse
import time
"""
This python script runs the pdal pipeline merge-pipe-v0.json for a list of ground and object las files.
The top level directory must be specified by hand. merge-pipe-v0.json should be located in the directory
from which this script is run.
merge-pipe-v0.json has the form:
{
"pipeline":[
{
"tag":"ground_laz", reads the ground las file
"type":"readers.las",
"filename":"ground_file"
},
{
"type":"filters.assign", sets the Classification value to ground ==2
"assignment":"Classification[:]=2",
"tag":"ground_classed"
},
{
"tag":"objects_laz", reads the object file
"type":"readers.las",
"filename":"objects_file"
},
{
"type":"filters.assign", sets the claqssification value to unclassified
"assignment":"Classification[:]=1",
"tag":"objects_classed"
},
{
"tag":"merging", merges the two files
"type":"filters.merge",
"inputs":["ground_classed" , "objects_classed"]
},
{
"tag":"output_merged", writes output
"type":"writers.las",
"filename":"merged_file",
"forward":"all"
}
]
}
See pdal.io for a documentation of the individual pdal filters
"""
"""
++++++++++++++++++++++++++++++++++
Define utility functions
"""
# create pdal pipeline command to run the merge-pipe-v0.json pipeline using filenames specified at runtime
def run_pipe_cmd(ingroundfile,inobjectfile,mergedfile):
rp_cmd = "pdal pipeline 'merge-pipe-v0.json' --writers.las.filename="+mergedfile+".laz --stage.ground_laz.filename="+ingroundfile+".laz --stage.objects_laz.filename="+inobjectfile+".laz --nostream"
return rp_cmd
# extract unique tile names from a directory containing AHN2 files (ground files in this case located in the subdirectory terrain
def get_parent_list(data_path):
object_file_path = data_path+'/terrain'
all_tile_names = os.listdir(object_file_path)
unique_tile_names = list(set([tn.split('_')[0][1:] for tn in all_tile_names]))
return unique_tile_names
# construct file names at subtile level during run time. This assumes that the subdirectory merged has been created previously (manually)
def set_file_names(datapath,tile,index):
files_exist = 0
if index < 10:
ground_file_name = data_path+'/terrain/'+'g'+tile+'_0'+str(index)
object_file_name = data_path+'/objects/'+'u'+tile+'_0'+str(index)
merged_file_name = data_path+'/merged/'+'ahn2_'+tile+'_0'+str(index)
else:
ground_file_name = data_path+'/terrain/'+'g'+tile+'_'+str(index)
object_file_name = data_path+'/objects/'+'u'+tile+'_'+str(index)
merged_file_name = data_path+'/merged/'+'ahn2_'+tile+'_'+str(index)
if os.path.isfile(ground_file_name+'.laz') and os.path.isfile(object_file_name+'.laz'):
print('mergeing files for tile:'+tile+' subtile:'+str(index)+' ')
else:
print('subtile '+str(index)+' does not exist for tile '+tile+' .')
files_exist = 1
return files_exist, ground_file_name, object_file_name, merged_file_name
# loop over all tiles with their subtiles and run pipeline merging ground and object files in each case
def merge_loop(parent_tile_list,data_path):
for tile in parent_tile_list:
tile_start_time = time.time()
for i in range(25):
subtile_start_time=time.time()
index = i+1
files_exist, ground_file, object_file,merged_file = set_file_names(data_path,tile,index)
if files_exist == 0:
run_merge_cmd = run_pipe_cmd(ground_file, object_file, merged_file)
result_merge_pipe = run(run_merge_cmd, hide=True, warn=True)
if result_merge_pipe.ok != True:
print('pipeline failure for tile: '+tile+' subtile: '+str(index))
else:
print('subtile '+str(index)+' does not exist.')
subtile_end_time=time.time()
subtile_diff_time = subtile_end_time - subtile_start_time
print(('total time for subtile: % sec') % (subtile_diff_time))
tile_end_time = time.time()
tile_diff_time = tile_end_time - tile_start_time
print(('total time for tile: % sec') % (tile_diff_time))
"""
++++++++++++++++++++++++
Main()
"""
start_time = time.time()
#set data path manually
data_path = '/path/to/top-level/data_directory'
#get tile list
parent_tile_list = get_parent_list(data_path)
#execute merge for all subtiles of tiles in tile list
merge_loop(parent_tile_list,data_path)
full_time = time.time()
total_diff_time = full_time - start_time
print('done')
print(('total time : % sec') % (total_diff_time))
|
import websocket
import sys
import datetime
def basic_test():
ws = websocket.create_connection(sys.argv[2])
print "* [Starting basic test]"
print "* [Load string ~ 1kB]"
hit_factor = 300
load_string = "LOAD_TEST"*100
then = datetime.datetime.now()
for i in range(1,hit_factor):
ws.send(load_string)
ws.recv()
now = datetime.datetime.now()
print "* [Done]"
delta = now - then
print "* [Took {0}s]".format(delta.total_seconds())
print "* [Sent {0}kB]".format(hit_factor * len(load_string) / 1000)
print "* [Throughtput {0}kBps]".format(hit_factor*2/delta.total_seconds())
ws.close()
def parse_args():
if sys.argv[1] == 'basic':
basic_test();
parse_args()
|
import sys
from skimage import io, transform, feature, img_as_ubyte
import numpy as np
def preprocess(filename):
image = img_as_ubyte(io.imread(filename, as_grey = True))
if image.shape[0] != 768:
print(image.shape)
print("WARN: Resizing image to old iPad Size. TODO> Move forward to retina images!")
return img_as_ubyte(transform.resize(image, (768, 1024)))
return image
def yield_slits(image):
ff = feature.canny(image)
pi = 0
for i in range(len(ff)):
cnt = np.count_nonzero(ff[i])
if cnt > 800:
diff = i - pi
if diff > 50 and diff < 60:
slit = image[i-52:i]
yield slit
pi = i
def parse(filename):
image = preprocess(filename)
return yield_slits(image)
def check_and_parse(filename):
"""
Check whether enemy or not, and returns slit.
filename : file object or url, which can be read from imread
"""
image = preprocess(filename)
isEnemy = (image[55][600] < 150)
for it in yield_slits(image):
yield (isEnemy, it)
if __name__ == "__main__":
io.use_plugin('pil')
check_and_parse("9.png")
for it in parse("9.png"):
print(it)
|
import pandas as pd
import numpy as np
df_sup = pd.read_csv('predictions_402.csv')
df_sup = df_sup.iloc[:, 1:]
df = pd.read_csv('arrange.csv')
df = df.iloc[:, 1:]
# for i in range(len(df)):
# for j in range(len(df.iloc[i])):
# df.iloc[i, j] = True if df.iloc[i, j] == 1 else False
df_sup = df_sup.mask(np.array(df, dtype=bool))
df_sup = df_sup.replace(0, np.nan)
df_sup.to_csv('prob3_sup.csv')
# print(df['0'])
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, Http404
from account.decorator import unauthenticated_user, allowed_users, admin_only
from . models import EmailEntry
from .forms import EmailEntryForm, EmailEntryUpdateForm
# Create your views here.
@login_required
def custormer_page(request, *args, **kwargs):
return render(request, 'user_page.html', {})
@login_required
@admin_only
def email_entry_update_view(request, id=None, *args, **kwargs):
try:
obj = EmailEntry.objects.get(id=id)
except EmailEntry.DoesNotExist:
raise Http404
form = EmailEntryUpdateForm(request.POST or None, instance=obj)
if form.is_valid():
form.save()
return render(request, 'emails/update.html', {'form': form, 'obj': obj})
@login_required
@allowed_users(['admin'])
def email_entry_create_view(request, *args, **kwargs):
context = {}
if request.user.is_authenticated:
context['some_cool_staff'] = "whatever"
print(request.user, request.user.is_authenticated)
form = EmailEntryForm(request.POST or None)
context['form'] = form
if form.is_valid():
form.save()
form = EmailEntryForm()
context['added'] = True
context['form'] = form
return render(request, 'home.html', context)
@login_required
@allowed_users(['admin'])
def email_entry_list_view(request, *args, **kwargs):
queryset = EmailEntry.objects.all()
context = {'object_list': queryset}
return render(request, 'emails/list.html', context)
@login_required
@allowed_users(['admin'])
def email_entry_detail_view(request, id=None, *args, **kwargs):
try:
obj = EmailEntry.objects.get(id=id)
except EmailEntry.DoesNotExist:
raise Http404
context = {'obj': obj}
return render(request, 'emails/detail.html', context)
@login_required
@admin_only
def email_entry_delete_view(request, id=None, *args, **kwargs):
try:
obj = EmailEntry.objects.get(id=id)
except EmailEntry.DoesNotExist:
raise Http404
if request.method == 'POST':
obj.delete()
return redirect('/emails')
return render(request, 'emails/delete.html', {'obj': obj}) |
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et:wrap:ai:fileencoding=utf-8:
__author__ = "Tiago Alves Macambira < first . last @ chaordicsystems.com>"
__copyright__ = "Copyright (C) 2013 Chaordic Systems S/A"
__license__ = "Public Domain"
from mrjob.job import MRJob
class TwitterNumFollowersApp(MRJob):
def steps(self):
return [self.mr(mapper=self.map_identity,
reducer=self.reducer_identity),
]
def map_counter(self, key, value):
user = value.split("/t")[0];
attr = value.split("/t")[1];
if (type(attr) == 'str'):
yield (attr, 1)
def reducer_counter(self, key, values):
yield (key, sum(values))
def map_to_same_reducer(self, key, value):
user = value.split("/t")[0];
yield (user, 1)
if __name__ == '__main__':
TwitterNumFollowersApp.run()
|
from django import forms
from Books.models import Book
from django.forms import ModelForm
#class BookCreateForm(ModelForm):
# book_name = forms.CharField(max_length=120)
# author = forms.CharField(max_length=120)
# price = forms.IntegerField()
# pages = forms.IntegerField()
class BookCreateForm(ModelForm):
class Meta:
model = Book
fields = "__all__"
def clean(self):
cleaned_data=super().clean()
book_name=cleaned_data.get('book_name')
price=cleaned_data.get('price')
pages=cleaned_data.get('pages')
book=Book.objects.filter(book_name=book_name)
if book:
msg="Book with same name already exist"
self.add_error('book_name',msg)
if price<100:
msg = "Book with this price does not exist"
self.add_error('price', msg)
if pages<=50:
msg = "Page Number Should greater"
self.add_error('book_name', msg)
class BookUpdate(ModelForm):
class Meta:
model = Book
fields = "__all__"
|
'''
Created on 12-mei-2012
@author: Erik Vandeputte
'''
import pylast
from pyechonest import song
#API_KEY and API_SECRET
API_KEY = "23d4d080ab66300840b2f6cc49151fbb"
API_SECRET = "02b5d7e670df35c99b0c09f50e365239"
def get_tempo(artist, title):
"gets the tempo for a song"
results = song.search(artist=artist, title=title, results=1, buckets=['audio_summary'])
if len(results) > 0:
return results[0].audio_summary['tempo']
else:
return None
def get_data():
network = pylast.LastFMNetwork(api_key = API_KEY, api_secret = API_SECRET)
user = network.get_user("perikvdp");
return user.get_loved_tracks();
if __name__ == '__main__':
artist = "Lady Gaga"
title = "Poker face"
print artist,title, "tempo: ",get_tempo(artist,title)
'''tracks = get_data()
for lovedtrack in tracks:
track = lovedtrack[0]
artist = track.artist
title = track.title
print artist,title, "tempo: ",get_tempo(artist,title)''' |
sum1=0
num1=int(input("enter the starting number "))
num2=int(input("enter the ending number "))
while num1<num2:
sum1=sum1+num1;
print(sum1)
num1=num1+1
print("sum is",sum1); |
"""Copyright 2014 Uli Fahrer
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from pipeline import CasUtil, Annotation
class Normalizer(object):
def normalize_word_token(self, token):
# TODO: lOoOoOoOoOoOoOoOoOoOoOoOoOoOoOoOol does not work
tout = ""
num_consec = 0
last_char = None
if self.is_special_token(token):
return token
for c in token:
num_consec = num_consec + 1 if c == last_char else 0
if num_consec >= 2:
continue
tout += c
last_char = c
return tout
@staticmethod
def is_special_token(c):
return any([not c.isalpha(), c.startswith("@"), c.startswith("#"), c.startswith("http")])
def process(self, cas):
for token_annot in CasUtil.get_annotations(cas, "Token"):
token = token_annot.get_covered_text()
normalized = self.normalize_word_token(token)
if normalized != token:
norm_annot = Annotation(cas.get_view(), token_annot.begin, token_annot.end, "Error", normalized)
cas.add_fs_annotation(norm_annot) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 2 18:25:33 2021
@author: jujharbedi
"""
# Importing flask framework to deploy project
from flask import Flask, render_template, request, redirect, url_for, session, Response
import requests
from time import sleep
from concurrent.futures import ThreadPoolExecutor
# Importing libraries for data analaysis
import random
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib.figure import Figure
from matplotlib.patches import Circle, Rectangle, Arc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config['TEMPLATES_AUTO_RELOAD'] = True
# Import data
dataset = pd.read_csv('./static/data/shot_data.csv')
# Placeholder for raw dataset
raw = dataset
raw.head()
# All column headers in dataset
data_columns = ["action_type ", "combined_shot_type", "game_event_id", "game_id",
"lat","loc_x", "loc_y" ,"lon", "minutes_remaining", "period",
"playoffs", "season", "seconds_remaining", "shot_distance",
"shot_made_flag","shot_type", "shot_zone_area", "shot_zone_basic",
"shot_zone_range", "team_id", "team_name", "game_date",
"matchup", "opponent", "shot_id"]
# Column headers we want to keep
keep_columns = ["action_type ", "combined_shot_type", "game_event_id", "game_id",
"lat", "loc_x", "loc_y" ,"lon", "period", "playoffs", "season",
"shot_distance", "shot_made_flag", "shot_zone_area", "shot_zone_basic", "shot_zone_range", "opponent",
"shot_id"]
for i in range(len(data_columns)):
if not data_columns[i] in keep_columns:
dataset.drop(data_columns[i], inplace = True, axis = 1)
# Creating temp variables for plotting
datapoints = dataset.values
#Removing nan values for shot_flag (data cleaning)
dataset = dataset[pd.notnull(dataset['shot_made_flag'])]
##------ Got this function for drawing a basketball court from here: http://savvastjortjoglou.com/nba-shot-sharts.html ------#
def draw_court(ax=None, color='black', lw=2, outer_lines=False):
# If an axes object isn't provided to plot onto, just get current one
if ax is None:
ax = plt.gca()
# Create the various parts of an NBA basketball court
# Create the basketball hoop
# Diameter of a hoop is 18" so it has a radius of 9", which is a value
# 7.5 in our coordinate system
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
# Create backboard
backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color)
# The paint
# Create the outer box 0f the paint, width=16ft, height=19ft
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
# Create the inner box of the paint, widt=12ft, height=19ft
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
# Create free throw top arc
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
# Create free throw bottom arc
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
# Restricted Zone, it is an arc with 4ft radius from center of the hoop
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
# Three point line
# Create the side 3pt lines, they are 14ft long before they begin to arc
corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw,
color=color)
corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color)
# 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop
# I just played around with the theta values until they lined up with the
# threes
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
# Center Court
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
# List of the court elements to be plotted onto the axes
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
# Draw the half court line, baseline and side out bound lines
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
# Add the court elements onto the axes
for element in court_elements:
ax.add_patch(element)
return ax
# Creating a function to plot x and y coordinates based on shot zones
def scatter_plot_by_category(plot, feat):
alpha = 0.1
# Grouping data frame by shot category
gs = dataset.groupby(feat)
# Creating color map for each category
cmap = cm.get_cmap('viridis')
# Create RBG values for each color
colors = cmap(np.linspace(0, 1, len(gs)))
for g, c in zip(gs, colors):
plot.scatter(g[1].loc_x, g[1].loc_y, color=c, alpha=alpha, label=g[0])
plot.legend(markerscale=2)
# Creating figure for Shots by Shot Zone
figure1 = plt.figure(1, figsize=(20,10))
# Subplot for shots by shot_zone_area
plt1 = plt.subplot(131)
scatter_plot_by_category(plt1, 'shot_zone_area')
plt.title('Shots by shot_zone_area', fontsize=20)
draw_court(outer_lines=True)
# Subplot for shots by shot_zone_basic
plt2 = plt.subplot(132)
scatter_plot_by_category(plt2, 'shot_zone_basic')
plt.title('Shots by shot_zone_basic', fontsize=20)
draw_court(outer_lines=True)
# Subplot for shots by shot_zone_range
plt3 = plt.subplot(133)
scatter_plot_by_category(plt3, 'shot_zone_range')
plt.title('Shots by shot_zone_range', fontsize=20)
draw_court(outer_lines=True)
# Saving figure as a image file
figure1.savefig('./static/img/shotZonePlot.png')
# Plotting Made vs Missed Shots by Year
data_made = dataset.shot_made_flag == 1
data_missed = dataset.shot_made_flag == 0
shot_missed = dataset[data_missed].season.value_counts()
shot_success = dataset[data_made].season.value_counts()
shots = pd.concat([shot_success,shot_missed],axis=1)
shots.columns=['Success','Missed']
figure2 = plt.figure(2, figsize=(22,9))
shots.plot(ax=figure2.add_subplot(111), kind='bar',stacked=False,rot=1,color=['#008000','#FF0000'])
plt.xlabel('Season')
plt.ylabel('Number of shots')
plt.legend(fontsize=15)
plt.title("Made vs Missed Shots by Year", fontsize=20)
# Saving figure as a image file
figure2.savefig('./static/img/madeVsMissedShots.png')
def getPieChart(i, zone):
# Plotting number of shots taken per shot range
pieData = dataset[zone].value_counts()
# Checking which zone
if "range" in zone:
# Removing all shots from beyond half court
pieData = pieData.drop("Back Court Shot")
title = "Shots Made by Distance"
filePath = "./static/img/pieChartDistance.png"
if "area" in zone:
# Removing all shots from beyond half court
pieData = pieData.drop("Back Court(BC)")
title = "Shots Made by Location"
filePath = "./static/img/pieChartLocation.png"
if "basic" in zone:
# Removing all shots from beyond half court
pieData = pieData.drop("Backcourt")
title = "Shots Made by Type"
filePath = "./static/img/pieChartBasic.png"
figure3 = plt.figure(i, figsize=[11,8])
# Getting labels for pie chart
labels = pieData.keys()
numLabels = len(labels)
plt.pie(x=pieData, autopct="%.1f%%", explode=[0.05]*numLabels, labels=labels, pctdistance=.5)
plt.title(title, fontsize=20)
# Saving figure as a image file
figure3.savefig(filePath)
# Iterate over shotzones to get pieChart of each shot zone category
shotZones = ["shot_zone_range", "shot_zone_area", "shot_zone_basic"]
# Counter for figures to prevent duplicates
i = 5
for zone in shotZones:
i = i + 1
getPieChart(i, zone)
# Plotting percentage made per shot range
def plotAccuracyByZone(zone):
# Create dataset with shot zone and
data = dataset[[zone, "shot_made_flag"]]
# Getting number of shots made and missed
data = data.groupby(by=[zone, "shot_made_flag"]).size()
# Creating placeholders for bar chart columns
zones = []
made_shots = []
missed_shots = []
for index, count in enumerate(data):
# Remove Backourt Shots from data
if "Backcourt" in data.keys()[index][0] or "Back Court" in data.keys()[index][0]:
continue
# Prevent duplicate zones from being added
if index % 2 == 0:
zones.append(data.keys()[index][0])
# Creating array of made shots by zone
if data.keys()[index][1] == 1.0:
made_shots.append(count)
# Creating array of missed shots by zone
else:
missed_shots.append(count)
if "range" in zone:
title = "Shots Made by Distance"
filePath = "./static/img/distance.png"
if "area" in zone:
title = "Shots Made by Location"
filePath = "./static/img/location.png"
if "basic" in zone:
title = "Shots Made by Type"
filePath = "./static/img/basic.png"
figure = plt.figure(figsize=[11,8])
# Width of bar chart
width = 0.35
plt.bar(zones, made_shots, width, label='Shots Made', color="green")
plt.bar(zones, missed_shots, width, bottom=made_shots, label='Shots Missed', color="red")
plt.ylabel("Shot Attempts")
plt.title(title, fontsize=20)
plt.legend(loc='upper center')
# Make space for and rotate the x-axis tick labels
figure.autofmt_xdate()
# Saving figure as a image file
figure.savefig(filePath)
# Clear figure
plt.clf()
return filePath
# DOCS https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
executor = ThreadPoolExecutor(1)
@app.route("/")
def home():
# Load extensive prediction.py file in background
executor.submit(loadFile)
# Create secondary data frame to output as table in order to show some sample data
table = raw.head(30)
# Get selection for stacked bar chart
zone = request.args.get('jsdata')
if zone:
# Plot accuracy for selected Zone
path = plotAccuracyByZone(zone)
return path
else:
# Show default chart
path = plotAccuracyByZone("shot_zone_range")
return render_template("index.html", tables=[table.to_html(index = False, classes='table table-bordered table-striped table-hover', header="true")], titles=table.columns.values, chart = path)
@app.route("/prediction")
def prediction():
if request.method == "GET":
# Import data from prediction file
from prediction import dataset, featureImportance, newFeatureImportance, rawPredData, predData, accuracy
encodedData = dataset.head(30)
rawPredData = rawPredData.head(50)
predData = predData.head(50)
return render_template("prediction.html", encodedData=[encodedData.to_html(index = False, classes='table table-bordered table-striped table-hover')], featureImp=[featureImportance.to_html(index = False, classes='table table-bordered table-striped table-hover')], newFeatureImp=[newFeatureImportance.to_html(index = False, classes='table table-bordered table-striped table-hover')],
pred=[rawPredData.to_html(index = False, classes='table table-bordered table-striped table-hover')], roundedPred=[predData.to_html(index = False, classes='table table-bordered table-striped table-hover')], result=accuracy)
def loadFile():
from prediction import accuracy
sleep(10) |
# Generated by Django 2.2.5 on 2019-11-24 05:03
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('RSSG', '0010_auto_20191123_1132'),
]
operations = [
migrations.AddField(
model_name='operations',
name='explanation',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='operations',
name='image',
field=models.ImageField(default='default.jpg', upload_to='ops'),
),
migrations.AddField(
model_name='operations',
name='implementation_year',
field=models.CharField(default=django.utils.timezone.now, max_length=5),
preserve_default=False,
),
migrations.AddField(
model_name='operations',
name='name',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='operations',
name='purpose_of_this_op',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
#store a set of dictionaries in a list or a list of items as a value in a dictionary
#can nest a set of dictionaries in a list, a list of items in a dictionary, or
# a dictionary in a dictionary
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
#can make code that automatically generates each alien
#make an empty list for storing aliens
aliens = []
#make 30 green aliens
for alien_number in range(30):
new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}
aliens.append(new_alien)
for alien in aliens[0:3]:
if alien['color'] == 'green':
alien['color'] = 'yellow'
alien['speed'] = 'medium'
alien['points'] = 10
elif alien['color'] == 'yellow':
alien['color'] = 'red'
alien['speed'] = 'fast'
alien['points'] = 15
#show first 5 aliens
for alien in aliens[:5]:
print(alien)
print("...")
print("Total number of aliens: " + str(len(aliens))) |
# Copyright 2017 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base application-platform build configuration.
If applications wish to customize or extend build configuration information by
platform, they should add an <application-name>/configuration.py file to the
port directory of the platform they wish to customize
(e.g. linux/x64x11/cobalt/configuration.py for the `cobalt` application and the
`linux-x64x11` platform). This module should contain an class that extends the
class defined here.
"""
class ApplicationConfiguration(object):
"""Base build configuration class for all Starboard applications.
Should be derived by application configurations.
"""
def __init__(self, platform_configuration, application_name,
application_directory):
"""Initialize ApplicationConfiguration.
Args:
platform_configuration: An instance of StarboardBuildConfiguration for the
platform being built.
application_name: The name of the application that is loading this
configuration.
application_directory: The absolute path of the directory containing the
application configuration being loaded.
"""
self._platform_configuration = platform_configuration
self._application_name = application_name
self._application_directory = application_directory
def GetName(self):
"""Gets the application name."""
return self._application_name
def GetDirectory(self):
"""Gets the directory of the application configuration."""
return self._application_directory
def GetTestEnvVariables(self):
"""Gets a dict of environment variables needed by unit test binaries."""
return {}
def GetTestFilters(self):
"""Gets all tests to be excluded from a unit test run.
Returns:
A list of initialized starboard.tools.testing.TestFilter objects.
"""
return []
def GetTestTargets(self):
"""Gets all tests to be run in a unit test run.
Returns:
A list of strings of test target names.
"""
return []
def GetTestBlackBoxTargets(self):
"""Gets all tests to be run in black box test run.
Returns:
A list of strings of black box test target names.
"""
return []
|
from collections import Counter
import math
from sympy import Symbol, factor, expand, Rational
import numpy as np
# Generating All Partitions: A Comparison Of Two Encodings
# https://arxiv.org/abs/0909.2331
def accel_asc(n):
a = [0 for unused_variable in range(n + 1)]
k = 1
y = n - 1
while k != 0:
x = a[k - 1] + 1
k -= 1
while 2 * x <= y:
a[k] = x
y -= x
k += 1
l = k + 1
while x <= y:
a[k] = x
a[l] = y
yield a[:k + 2]
x += 1
y -= 1
a[k] = x + y
y = x + y - 1
yield a[:k + 1]
# Symmetries and Groups Michaelmas, Term 2008, Hugh Osborn
# http://www.damtp.cam.ac.uk/user/ho/GNotes.pdf
def casimir(a):
num = 0
for i in range(0,len(a)):
for j in range(0,a[i]):
num = num + 2*(j - i)
return num
# This function prints values of Casimir operator on each conjugacy class of symmetric group S_{n}
def printAllCasimirs(n):
partitions = list(accel_asc(n))
for i in range(0,len(partitions)):
print('CasimirValue(',partitions[i],') = ',casimir(partitions[i]))
# This function returns total number of boxes in Young diagram
def NormOfYoungDiagram(a):
sum = 0
for i in range(0,len(a)):
sum = sum + a[i]
return sum
def printAllNormOfYoungDiagram(n):
partitions = list(accel_asc(n))
for i in range(0,len(partitions)):
print('Norm(',partitions[i],') = ',NormOfYoungDiagram(partitions[i]))
# ON THE HECKE ALGEBRAS AND THE COLORED HOMFLY POLYNOMIAL
# https://arxiv.org/pdf/math/0601267.pdf
def firstTermOfHOMPLY(k,r,setOfColours):
t_power = 0
nu_power = k * (r - 1)/2
for i in range(0, len(setOfColours)):
t_power = t_power + casimir(setOfColours[i])
t_power = t_power * k * r/2
print('t^{',t_power, '} *', '{/nu}^{',nu_power,'}')
def denominatorSizeOfConjClass(youngDiag):
a = Counter(youngDiag)
con = 1
for j in range(0,NormOfYoungDiagram(youngDiag)+1):
con = con * ((j**(a[j])) * math.factorial(a[j]))
return con
def sizeOfConjClass(youngDiag):
res = math.factorial(NormOfYoungDiagram(youngDiag))//denominatorSizeOfConjClass(youngDiag)
return res
def printSizesOfConjClasses(number):
qqq = list(accel_asc(number))
summ = 0
for n in range(0,len(qqq)):
summ = summ + sizeOfConjClass(qqq[n])
print("ConjClassSize(",qqq[n],") =",sizeOfConjClass(qqq[n]))
print(summ," -vs- ",math.factorial(number))
def productTermInSchurLatex(youngDiag):
con='Z'
for i in range(0,len(youngDiag)):
if con == 'Z':
con = '\\frac{q^{'+str(youngDiag[i])+'/2}-q^{-'+str(youngDiag[i])+'/2} }{t^{'+str(youngDiag[i])+'/2}-t^{-'+str(youngDiag[i])+'/2} }'
else:
con = con+'\\cdot\\frac{q^{'+str(youngDiag[i])+'/2}-q^{-'+str(youngDiag[i])+'/2} }{t^{'+str(youngDiag[i])+'/2}-t^{-'+str(youngDiag[i])+'/2} }'
return con #it returns answer in latex format
def getNumberBeforeSymbol(starting_index_in_file,symbol,charater_data):
count=''
i = starting_index_in_file
while charater_data[i] != symbol:
count = count + charater_data[i]
i = i + 1
return [int(count), i]
def loadOfCharacterTableToMemory(number):
qqq = list(accel_asc(number))
f = open('CharacterTable/characterTable'+str(number)+'.txt', 'r+')
charater_data = f.read()
f.close()
#a = [[0 for unused_variable in range(0, len(qqq))] for unused_variable in range(0, len(qqq))]
a = np.zeros(shape=(len(qqq), len(qqq)), dtype=np.int64)
index_in_file = 1
while charater_data[index_in_file] != '}':
i_index_info = getNumberBeforeSymbol(index_in_file,',', charater_data)
i_index = i_index_info[0]
index_in_file = i_index_info[1] + 1
j_index_info = getNumberBeforeSymbol(index_in_file,':', charater_data)
j_index = j_index_info[0]
index_in_file = j_index_info[1] + 1
a_ij_info = getNumberBeforeSymbol(index_in_file, ',', charater_data)
i_index = i_index - 1
j_index = j_index - 1
a[i_index][j_index] = a_ij_info[0]
index_in_file = a_ij_info[1]
index_in_file += 1
print('finally')
return a
def productTermInSchur(youngDiag):
con = 1
t = Symbol('t')
q = Symbol('n')
for i in range(0,len(youngDiag)):
con = con * (q**(Rational(youngDiag[i], 2))-q**(-Rational(youngDiag[i],2)))/(t**(Rational(youngDiag[i], 2))-t**(- Rational(youngDiag[i], 2)))
res = factor(con)
return res #it returns answer
def quasiHadamarProduct(table, r):
res = [0 for unused_variable in range(0, len(table))]
for i in range(0,len(res)):
res[i] = table[i] * r
return res
def specialSchurPolynomial(number, index):
count = 0
qqq = list(accel_asc(number))
charaters = loadOfCharacterTableToMemory(number)
for i in range(0,len(qqq)):
count = count + Rational(charaters[len(qqq)-index-1][len(qqq)-i-1], denominatorSizeOfConjClass(qqq[i]))*productTermInSchur(qqq[i])
return count
|
from rebase.common.database import DB, PermissionMixin
class TalentPool(DB.Model, PermissionMixin):
__pluralname__ = 'talent_pools'
id = DB.Column(DB.Integer, primary_key=True)
@classmethod
def query_by_user(cls, user):
return cls.query
def allowed_to_be_created_by(self, user):
return True
def allowed_to_be_modified_by(self, user):
return self.allowed_to_be_created_by(user)
def allowed_to_be_deleted_by(self, user):
return self.allowed_to_be_created_by(user)
def allowed_to_be_viewed_by(self, user):
return self.allowed_to_be_created_by(user)
def __repr__(self):
return '<TalentPool[id:{}]>'.format(self.id)
|
import os
import unittest
import numpy as np
# by default, we don't run any actual s3 tests,
# because this will not work in CI due to missing credentials.
# set the environment variable "Z5PY_TEST_S3" to 1 in order to run tests
TEST_S3 = bool(os.environ.get("Z5PY_S3_RUN_TEST", False))
BUCKET_NAME = os.environ.get("Z5PY_S3_BUCKET_NAME", 'z5-test-data')
def get_test_data():
from skimage.data import astronaut
return astronaut()
@unittest.skipUnless(TEST_S3, "Disabled by default")
class TestS3(unittest.TestCase):
bucket_name = BUCKET_NAME
def setUp(self):
self.data = get_test_data()
@staticmethod
def make_test_data(bucket_name=None):
""" Make zarr test data in an s3 bucket.
The bucket `bucket_name` must already exist and
access credentials must be stored in a way that can be accessed by
s3fs.
"""
import s3fs
import zarr
import numcodecs
if bucket_name is None:
bucket_name = TestS3.bucket_name
# access the s3 filesystem
fs = s3fs.S3FileSystem(anon=False)
store = s3fs.S3Map(root=bucket_name, s3=fs)
# test data image
data = TestS3.data
# write remote zarr data
f = zarr.group(store)
f.attrs['Q'] = 42
# create dataset
ds = f.create_dataset('data', shape=data.shape, chunks=(256, 256, 3), dtype=data.dtype,
compressor=numcodecs.Zlib())
ds[:] = data
ds.attrs['x'] = 'y'
# create a and dataset group
g = f.create_group('group')
ds = g.create_dataset('data', shape=data.shape, chunks=(256, 256, 3), dtype=data.dtype,
compressor=numcodecs.Zlib())
ds[:] = data
ds.attrs['x'] = 'y'
# this is just a dummy test that checks the
# handle imports and constructors
def test_dummy(self):
from z5py import _z5py
m = _z5py.FileMode(_z5py.FileMode.a)
fhandle = _z5py.S3File(self.bucket_name, m)
ghandle = _z5py.S3Group(fhandle, "test")
_z5py.S3DatasetHandle(ghandle, "test")
def test_s3_file(self):
from z5py import S3File
f = S3File(self.bucket_name)
self.assertTrue('data' in f)
self.assertTrue('group' in f)
self.assertTrue('group/data' in f)
keys = f.keys()
expected = {'data'}
expected = {'data', 'group'}
self.assertEqual(set(keys), expected)
attrs = f.attrs
self.assertEqual(attrs['Q'], 42)
def test_s3_group(self):
from z5py import S3File
f = S3File(self.bucket_name)
g = f['group']
self.assertTrue('data' in g)
keys = g.keys()
expected = {'data'}
self.assertEqual(set(keys), expected)
# currently fails with:
# RuntimeError: Exception during zlib decompression: (-3)
def test_s3_dataset(self):
from z5py import S3File
def check_ds(ds):
# check the data
data = ds[:]
self.assertEqual(data.shape, self.data.shape)
self.assertTrue(np.allclose(data, self.data))
# check the attributes
attrs = ds.attrs
self.assertEqual(attrs['x'], 'y')
f = S3File(self.bucket_name)
ds = f['data']
check_ds(ds)
# g = f['group']
# ds = g['data']
# check_ds(ds)
if __name__ == '__main__':
# TestS3.make_test_data()
unittest.main()
|
import sys
import requests
import PyPDF2
from glob import glob
import os
import shutil
crossref = 'http://api.crossref.org/'
if __name__ == '__main__':
dr = os.getcwd() ## Get directory
nRenamedArticles = 0
files = glob(os.path.join(dr, "*.pdf"))
nFiles = len(files) ## Lenght of list equal to total number of selected files
for pdf in files:
pdfReader = PyPDF2.PdfFileReader(pdf, strict=False)
if pdfReader.isEncrypted:
# print (pdf)
continue
title = None
year = None
doi = None
for k, v in pdfReader.documentInfo.items():
if 'doi' in k.lower(): ## Get doi from metadata (for when it is properly registered)
doi = v
if not doi and 'doi:' in v.lower(): ## Get doi from metadata (for when they filled the wrong field)
doi = v.replace('doi:','')
if not doi and 'doi ' in v.lower(): ## Get doi from metadata (for when they filled the wrong field)
doi = v.replace('doi ','')
if not doi: ## Get desperate and try to get the doi from the text in the .pdf file
## Read the number of pages
numberOfPages = pdfReader.getNumPages()
## Register text from page 1
text_in_file = pdfReader.getPage(0).extractText().lower()
if numberOfPages > 1: ## Register text from page 2
text_in_file = text_in_file + ' ' + pdfReader.getPage(1).extractText().lower()
if numberOfPages > 5 and 'doi' not in text_in_file: ## Register text from page 3-6, aiming to get the doi of books
text_in_file = text_in_file + ' ' + pdfReader.getPage(2).extractText().lower() + ' ' + pdfReader.getPage(3).extractText().lower() + ' ' + pdfReader.getPage(4).extractText().lower()+ ' ' + pdfReader.getPage(5).extractText().lower()
## Clean registered text and isolate 'doi*code'
text_in_file = text_in_file.replace('\n', ' ').replace('correspondingauthor', ' ').replace('contentslistsavailable', ' ').replace(']', ' ').replace('[', ' ').replace('©', ' ')
## Extract the actual doi from 'doi*code'
if 'doi:' in text_in_file:
doi_index_start = text_in_file.find('doi:')
doi_index_end = text_in_file.find(' ', doi_index_start)
DIO = text_in_file[slice(doi_index_start+4,doi_index_end)]
if DIO.startswith('10'):
# print('DIOOO = ' + DIO)
doi = DIO
if not doi and 'doi.org/' in text_in_file:
doi_index_start = text_in_file.find('doi.org/')
doi_index_end = text_in_file.find(' ', doi_index_start)
DIO = text_in_file[slice(doi_index_start+8,doi_index_end)]
if DIO.startswith('10'):
# print('DIOOO = ' + DIO)
doi = DIO
if not doi and 'doi' in text_in_file:
doi_index_start = text_in_file.find('doi')
doi_index_end = text_in_file.find(' ', doi_index_start)
DIO = text_in_file[slice(doi_index_start+3,doi_index_end)]
if DIO.startswith('10'):
# print('DIOOO = ' + DIO)
doi = DIO
if not doi and 'title' in k.lower(): ## Assume that somehow they properly filled the title field in the metadata
title = v
if doi: ## Use acquired doi to get document info from the web
try:
url = '{}works/{}'.format(crossref, doi)
r = requests.get(url)
item = r.json()
year = item['message']['created']['date-parts'][0][0]
title = item['message']['title'][0]
except ValueError: # (╯°□°)╯︵ ┻━┻ Give up from current file if acquired doi is invalid or if website does not return usefull data
continue
## Name format: title, year.pdf
name = None
if title and not title.isspace() and len(title.split(' '))>=2:
name = title.replace(':', " -").replace('/','-').replace('?','').replace('&','and') ## Assign name while cleanning title of invalid characters
remaining_lenght = 240 - len(dr) - len('.pdf') ## Crop name string to fit MAX_LENGHT
if len(name) > remaining_lenght:
name = name[0:remaining_lenght]
if year:
name = name+', '+str(year)
if name: ## Rename file
filename = os.path.join(dr, str(name)) + '.pdf'
if os.path.exists(filename):
filename = filename.replace('.pdf', '-RPT.pdf') ## If file already exists add -RPT to file name
shutil.move(pdf, filename)
nRenamedArticles = nRenamedArticles + 1 ## Counter for the number of renamed files
print('DONE!')
print(f'{nRenamedArticles} out of {nFiles} .pdf files were renamed')
## by Adriano.s.p.p and Denise.a.g - 09/07/2020; inpired by NECESSITY and "Pu Du" from https://www.researchgate.net/post/What_is_the_best_way_to_rename_a_collection_of_academic_papers
|
import bs4
from requests import get
from urllib.request import urlopen as ureq
from urllib.request import urlretrieve as uret
from bs4 import BeautifulSoup as soup
from openpyxl import *
from tkinter import *
import time
import re
from selenium import webdriver
import os
import datetime
#script for GUI
root = Tk()
root.title('EPL Player Stat Importer')
urlframe = Frame()
urlframe.pack(side = TOP)
def shooting():
myurl = urlentry.get()
uclient = ureq(myurl)
pagehtml = uclient.read()
# parsing as html
pagesoup = soup(pagehtml, "html.parser")
workbook = 'shootingtest.xlsx'
playerdb = load_workbook(workbook)
sheet = playerdb.get_sheet_by_name('Data')
eventdetails = pagesoup.find("div", {"class", "whitebg"})
event = eventdetails.find("p", {"class": "row3"}).text
stage = eventdetails.find("p", {"class": "row4"}).text
datetime = eventdetails.find("p", {"class": "row5"}).text
datetime = re.split(',', datetime)
date = str(datetime[0])
timeplayed = datetime[1]
print(date)
timeplayed = re.findall(r'[0-9]{2}:[0-9]{2}', timeplayed)
print(timeplayed)
print('Event: ' + event + ' ' + stage)
if stage == 'Qualification':
print('Qualification Stages')
for x in range(1, 38):
global maxrow
maxrow = sheet.max_row
print('Writing to row ' + str(maxrow) + '...')
test = pagesoup.find("tbody", {"rank": x})
name = test.find("td", {"colspan": "4"}).text
base = pagesoup.find("tbody", {"rank": x}).text
base = base.split()
print(base)
country = re.findall(r'[A-Z]{3}', base[-1])
series = re.sub(r'[A-Za-z]', "", base[-1])
series = re.findall(r'[0-9]+.[0-9]{1}', series)
if not country:
print('no country found')
country = re.findall(r'[A-Z]{3}', base[-2])
series = re.sub(r'[A-Za-z]', "", base[-2])
series = re.findall(r'[0-9]+.[0-9]{1}', series)
print(country)
print(series)
country = country[0]
print(country)
score = test.find("td", {"class": "totalcontent"}).text
eventdetails = pagesoup.find("div", {"class", "whitebg"})
event = eventdetails.find("p", {"class": "row3"}).text
stage = eventdetails.find("p", {"class": "row4"}).text
# test2 = test2.split()
print(name, score, country, event, stage)
sheet.cell(row=maxrow + 1, column=1).value = name
sheet.cell(row=maxrow + 1, column=2).value = score
sheet.cell(row=maxrow + 1, column=3).value = country
y = x + 1
y = str(y)
sheet.cell(row=maxrow + 1, column=4).value = '=VLOOKUP(C' + y + ',Sheet3!$B$2:$C$247,2,TRUE)'
for series, count in zip(series, range(1, len(series) + 1)):
sheet.cell(row=maxrow + 1, column=count + 4).value = series
time.sleep(2.5)
playerdb.save(workbook)
print('/////////////////')
if stage == 'Final':
print('Final Stages')
# this gives us rank id and total score
test = pagesoup.find_all("td", {"align": "right"})
print(test)
# for x in range(1, 38):
# maxrow = sheet.max_row
# print('Writing to row ' + str(maxrow) + '...')
# myurl = urlentry.get()
# uclient = ureq(myurl)
# pagehtml = uclient.read()
# # opens browser and scrapes
# # browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
# # browser.get(myurl)
# # time.sleep(3)
# # pagehtml = browser.page_source
#
# # parsing as html
# pagesoup = soup(pagehtml, "html.parser")
#
# # uclient.close()
# # player details
# details = pagesoup.find("div", {"class": "personalLists"}).text
# details = details.split()
#
# nationality = details[1]
# age = details[3]
# DOB = details[7]
# height = details[9]
# weight = details[11]
# print('Nationality: ' + nationality + '\n' +
# 'Age: ' + age + '\n' +
# 'DOB: ' + DOB + '\n' +
# 'Height: ' + height + '\n' +
# 'Weight: ' + weight)
#
def appearance():
workbook = 'testbook1.xlsx'
playerdb = load_workbook(workbook)
sheet = playerdb.get_sheet_by_name('Sheet1')
textfile = open("linklist.txt")
print('File Opened')
lines = textfile.read().split("\n")
linkslist = []
for lines in lines:
if lines[:3] != 'htt':
continue
linkslist.append(lines)
print(linkslist)
#loop through all link
for links in linkslist:
print(str(linkslist.index(links)) + ' out of ' + str(len(linkslist)))
myurl = links
uclient = ureq(myurl)
time.sleep(1.5)
pagehtml = uclient.read()
# parsing as html
pagesoup = soup(pagehtml, "html.parser")
# get match details
matchtype = pagesoup.find("div", {"class", "details clearfix"}).text
matchtype = matchtype.split('\n')
x = matchtype.index('Competition')
y = matchtype.index('Date')
competition = matchtype[x + 1]
date = matchtype[y + 1]
print(competition)
print(date)
# DETERMINE TEAMS
teams = pagesoup.find_all("h3", {"class": "thick"})
teamlist = []
for div in teams:
x = div.text
x = ''.join(x.split())
teamlist.append(x)
hometeam = teamlist[0]
awayteam = teamlist[2]
print('Home Team: ' + hometeam)
print('Away Team: ' + awayteam)
if hometeam == 'England':
print('England Home')
# MATCH DETAILS
bigcolumn = pagesoup.find("div", {"class", "combined-lineups-container"})
leftcolumn = bigcolumn.find("div", {"class", "container left"}).text
starting = re.findall(r'[A-Z]. [A-Za-z]*', leftcolumn)
starting.pop()
if awayteam == 'England':
print('England Away')
bigcolumn = pagesoup.find("div", {"class", "combined-lineups-container"})
leftcolumn = bigcolumn.find("div", {"class", "container right"}).text
starting = re.findall(r'[A-Z]. [A-Za-z]*', leftcolumn)
starting.pop()
print(starting)
# SUBSTITUTES
allsubs = bigcolumn.find_next("div", {"class", "combined-lineups-container"})
if hometeam == 'England':
actualsubs = allsubs.find("div", {"class", "container left"})
actualsubs = actualsubs.find("table", {"class", "playerstats lineups substitutions table"}).text
if awayteam == 'England':
actualsubs = allsubs.find("div", {"class", "container right"})
actualsubs = actualsubs.find("table", {"class", "playerstats lineups substitutions table"}).text
dnpsubs = re.findall(r'[A-Z][.]\s[A-Za-z-]+', actualsubs)
subsmade = re.findall(r'[A-Z].\s[A-Za-z]+\s+[for]*\s[A-Z][.]\s[A-Za-z-]+\s[0-9]+', actualsubs)
subs = []
print(dnpsubs)
for lines in subsmade:
sub = re.sub("\n", "", lines)
sub = re.sub(r"for ", ' for ', sub)
subs.append(sub)
print(subs)
matchdetails = pagesoup.find("div", {"class", "details clearfix"})
matchdetails2 = matchdetails.find_next("div", {"class", "details clearfix"}).text
extratimecheck = re.findall(r'\bExtra-time\b', matchdetails2)
print(extratimecheck)
if len(extratimecheck) > 0:
extratime = '1'
print('Total Playtime = 120 mins (Extra Time)')
if len(extratimecheck) == 0:
extratime = '0'
print('Total Playtime = 90 mins (Normal Time)')
# we eventually want a dictionary
playtime = {}
for subs in subs:
global timein
global timeout
both = re.findall(r'[A-Z][.] [A-Za-z]+', subs)
timeplayed = re.findall(r'[0-9]+', subs)
subin = both[0]
subout = both[1]
if subin in dnpsubs:
number = dnpsubs.index(subin)
dnpsubs.pop(number)
if subout in dnpsubs:
number = dnpsubs.index(subout)
dnpsubs.pop(number)
if extratime == '1':
timein = 120 - int(timeplayed[0])
timeout = timeplayed[0]
if extratime == '0':
timein = 90 - int(timeplayed[0])
timeout = timeplayed[0]
print(subin, timein)
print(subout, timeout)
if subout in starting:
number = starting.index(subout)
starting.pop(number)
playtime[subin] = timein
playtime[subout] = timeout
for name in starting:
if extratime == '1':
timeplayed = '120'
if extratime == '0':
timeplayed = '90'
print(name, timeplayed)
playtime[name] = timeplayed
for name in dnpsubs:
timeplayed = '0'
print(name, timeplayed)
playtime[name] = timeplayed
print(playtime)
for key, val in playtime.items():
global maxrow
maxrow = sheet.max_row
sheet.cell(row = maxrow+1, column = 1).value = key
sheet.cell(row = maxrow+1, column = 2).value = val
sheet.cell(row=maxrow+1, column=3).value = competition
sheet.cell(row=maxrow+1, column=4).value = date
sheet.cell(row = maxrow+1, column=5).value = links
playerdb.save(workbook)
def test():
workbook = 'testbook1.xlsx'
playerdb = load_workbook(workbook)
sheet = playerdb.get_sheet_by_name('Injury_Off')
os.system('say "LETS GET IT ON"')
#get links
textfile = open("linklist.txt")
print('File Opened')
lines = textfile.read().split("\n")
#List of Daylight Saving dates
ds2018 = datetime.datetime.strptime('Mar 25, 2018', "%b %d, %Y")
ds2017 = datetime.datetime.strptime('Mar 26, 2017', "%b %d, %Y")
ds2016 = datetime.datetime.strptime('Mar 27, 2016', "%b %d, %Y")
ds2015 = datetime.datetime.strptime('Mar 29, 2015', "%b %d, %Y")
ds2014 = datetime.datetime.strptime('Mar 30, 2014', "%b %d, %Y")
ds2013 = datetime.datetime.strptime('Mar 31, 2013', "%b %d, %Y")
ds2012 = datetime.datetime.strptime('Mar 25, 2012', "%b %d, %Y")
ds2011 = datetime.datetime.strptime('Mar 27, 2011', "%b %d, %Y")
ds2010 = datetime.datetime.strptime('Mar 28, 2010', "%b %d, %Y")
doff2018 = datetime.datetime.strptime('Oct 28, 2018', "%b %d, %Y")
doff2017 = datetime.datetime.strptime('Oct 29, 2017', "%b %d, %Y")
doff2016 = datetime.datetime.strptime('Oct 30, 2016', "%b %d, %Y")
doff2015 = datetime.datetime.strptime('Oct 25, 2015', "%b %d, %Y")
doff2014 = datetime.datetime.strptime('Oct 26, 2014', "%b %d, %Y")
doff2013 = datetime.datetime.strptime('Oct 27, 2013', "%b %d, %Y")
doff2012 = datetime.datetime.strptime('Oct 28, 2012', "%b %d, %Y")
doff2011 = datetime.datetime.strptime('Oct 30, 2011', "%b %d, %Y")
doff2010 = datetime.datetime.strptime('Oct 31, 2010', "%b %d, %Y")
linkslist = []
for lines in lines:
if lines[:3] != 'htt':
continue
linkslist.append(lines)
print(linkslist)
#loop through all link
for links in linkslist:
starttime = time.time()
print(str(linkslist.index(links)) + ' out of ' + str(len(linkslist)))
global maxrow
maxrow = sheet.max_row
noreport = 0
datecat = 1
myurl = links
browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
browser.get(myurl)
time.sleep(0.50)
pagehtml = browser.page_source
# parsing as html
pagesoup = soup(pagehtml, "html.parser")
# FOR TESTING
tabs = pagesoup.find_all("div", {"class", "subnavi_box"})
statistics = re.findall(r'Statistics..\n.*Statistics', str(tabs))
statistics = re.findall(r'/.*[0-9]', str(statistics))
statspagelink = 'https://www.transfermarkt.co.uk' + str(statistics[0])
print(statspagelink)
#MATCH DETAILS
gamescore = pagesoup.find("div", {"class", "sb-endstand"}).text
gamescore = re.findall(r'[0-9{1,}]:[0-9{1,}]', gamescore)
gamescore = gamescore[0]
gamescore = gamescore.split(":")
homescore = gamescore[0]
awayscore = gamescore[1]
totalconceded = int(homescore) + int(awayscore)
date = pagesoup.find("div", {"class", "sb-spieldaten"})
date = date.find("p", {"class", "sb-datum hide-for-small"})
date = re.sub("<.*?>", "", str(date))
dateplayed = re.findall(r'[a-zA-Z]{3} [0-9]{1,}, [0-9]{4}', date)
dateplayed = str(dateplayed[0])
# print(dateplayed)
dateplayed = datetime.datetime.strptime(dateplayed, "%b %d, %Y")
# print(dateplayed.year)
# For calculating days from Daylight Savings Day
def calculatedate():
global category
if datecat != 0:
if 8 <= int(dsONdiff[0]) <= 14:
print('Category A - Pre')
category = '0'
if -8 < int(dsONdiff[0]) < 8:
print('Category B - Week of ')
category = '1'
if -14 <= int(dsONdiff[0]) <= -8:
print('Category C - Post')
category = '2'
if int(dsONdiff[0]) > 14:
print('Category D - DNC')
category = '3'
if int(dsONdiff[0]) < -14:
print('Category D - DNC')
category = '3'
if datecat == 0:
category = '1'
# Calculating Offset data
if int(dateplayed.year) == int(doff2010.year):
print(doff2010 - dateplayed)
if doff2010 != dateplayed:
dsONdiff = doff2010 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2010 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2011.year):
print(doff2011 - dateplayed)
if doff2011 != dateplayed:
dsONdiff = doff2011 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2011 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2012.year):
if doff2012 != dateplayed:
dsONdiff = doff2012 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2012 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2013.year):
if doff2013 != dateplayed:
dsONdiff = doff2013 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2013 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2014.year):
if doff2014 != dateplayed:
dsONdiff = doff2014 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2014 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2015.year):
if doff2015 != dateplayed:
dsONdiff = doff2015 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2015 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2016.year):
if doff2016 != dateplayed:
dsONdiff = doff2016 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2016 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2017.year):
if doff2017 != dateplayed:
dsONdiff = doff2017 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2017 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
if int(dateplayed.year) == int(doff2018.year):
if doff2018 != dateplayed:
dsONdiff = doff2018 - dateplayed
dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
print(dsONdiff)
calculatedate()
if doff2018 == dateplayed:
dsONdiff = 0
datecat = 0
calculatedate()
# EXTRACTING DLS ONSET DATA
# if int(dateplayed.year) == int(ds2010.year):
# print(ds2010 - dateplayed)
#
# if ds2010 != dateplayed:
# dsONdiff = ds2010 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2010 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
# if int(dateplayed.year) == int(ds2011.year):
# print(ds2011 - dateplayed)
#
# if ds2011 != dateplayed:
# dsONdiff = ds2011 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2011 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
#
# if int(dateplayed.year) == int(ds2012.year):
# if ds2012 != dateplayed:
# dsONdiff = ds2012 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2012 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
#
# if int(dateplayed.year) == int(ds2013.year):
# if ds2013 != dateplayed:
# dsONdiff = ds2013 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2013 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
#
# if int(dateplayed.year) == int(ds2014.year):
# if ds2014 != dateplayed:
# dsONdiff = ds2014 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2014 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
#
# if int(dateplayed.year) == int(ds2015.year):
# if ds2015 != dateplayed:
# dsONdiff = ds2015 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2015 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
#
# if int(dateplayed.year) == int(ds2016.year):
# if ds2016 != dateplayed:
# dsONdiff = ds2016 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2016 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
#
# if int(dateplayed.year) == int(ds2017.year):
# if ds2017 != dateplayed:
# dsONdiff = ds2017 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2017 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#
# if int(dateplayed.year) == int(ds2018.year):
# if ds2018 != dateplayed:
# dsONdiff = ds2018 - dateplayed
#
# dsONdiff = re.findall(r'[-0-9]{1,} day', str(dsONdiff))
# dsONdiff = re.findall(r'[-0-9]{1,}', str(dsONdiff))
# print(dsONdiff)
#
# calculatedate()
#
# if ds2018 == dateplayed:
# dsONdiff = 0
# datecat = 0
# calculatedate()
#Extract time that match was played
timeplayed = re.findall(r'[0-9]+:[0-9]{2}', date)
timeplayed = timeplayed[0] + ' PM'
league = pagesoup.find("div", {"class", "spielername-profil"}).text
league = re.sub("\n", "", league)
league = str(league)
print(league)
#INJURY DETAILS
matchdetails = pagesoup.find_all("div", {"class", "sb-ereignisse"})
breakdown = re.sub("<.*?>", "", str(matchdetails))
#for fouls
yellowcards = re.findall('Yellow card', breakdown)
redcards = re.findall('Red card', breakdown)
yellowcards = str(len(yellowcards))
redcards = str(len(redcards))
#for injuries
injuries = re.findall('Injury', breakdown)
injuries = str(len(injuries))
print(injuries)
#not reported findings
notreported = re.findall('Not reported', breakdown)
if len(notreported) > 0:
noreport = 1
print('Date Played: ' + str(dateplayed))
print('Time Played: ' + timeplayed)
print('Amount of Injuries: ' + injuries)
print('Yellow Cards: ' + yellowcards)
print('Red Cards: ' + redcards)
print('Home Team Score: ' + homescore)
print('Away Team Score: ' + awayscore)
print('Total Goals Conceded: ' + str(totalconceded))
## This portion writes to excel, comment out when testing
sheet.cell(row=maxrow + 1, column=1).value = dateplayed
sheet.cell(row=maxrow + 1, column=2).value = timeplayed
sheet.cell(row=maxrow + 1, column=3).value = injuries
sheet.cell(row=maxrow + 1, column=4).value = league
sheet.cell(row=maxrow + 1, column=5).value = yellowcards
sheet.cell(row=maxrow + 1, column=6).value = redcards
sheet.cell(row=maxrow + 1, column=7).value = links
sheet.cell(row=maxrow + 1, column=9).value = homescore
sheet.cell(row=maxrow + 1, column=10).value = awayscore
sheet.cell(row=maxrow + 1, column=11).value = str(totalconceded)
sheet.cell(row=maxrow + 1, column=12).value = category
if datecat != 0:
sheet.cell(row=maxrow + 1, column=13).value = str(dsONdiff[0])
if datecat == 0:
sheet.cell(row=maxrow + 1, column=13).value = '0'
print('DAY OF DLS')
if noreport == 1:
sheet.cell(row=maxrow + 1, column=8).value = '1'
print('Missing Data: YES')
if noreport != 1:
sheet.cell(row=maxrow + 1, column=8).value = '0'
print('Missing Data: NO')
noreport = 0
datecat = 1
browser.close()
# Open new browser to get match statistics
browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
myurl = statspagelink
browser.get(myurl)
time.sleep(0.50)
pagehtml = browser.page_source
# parsing as html
pagesoup = soup(pagehtml, "html.parser")
# Get match statistics
matchstatistics = pagesoup.find_all("div", {"class", "sb-statistik"})
matchstatistics = re.findall(r'>[0-9]{1,}<', str(matchstatistics))
matchstatsclean = []
for i in matchstatistics:
i = re.sub("[><]", "", i)
print(i)
matchstatsclean.append(i)
print(matchstatsclean)
print(len(matchstatistics))
counting = 0
missingdata = matchstatsclean.count('0')
for i in matchstatsclean:
if len(matchstatsclean) == 0:
break
if missingdata == 14:
break
counting += 1
sheet.cell(row = maxrow + 1, column = 13+counting).value = i
browser.close()
playerdb.save(workbook)
endtime = time.time()
onelooptime = round(endtime-starttime,2)
print('This loop took: ' + str(onelooptime) + ' secs.')
print('//////////////////////////////////////////////')
def multiscrape():
def findlinks():
textfile = open("linklist.txt")
print('File Opened')
lines = textfile.read().split("\n")
global linkslist
global linkslist2
linkslist = []
linkslist2 = []
for lines in lines:
if lines[:3] != 'htt':
continue
# one list for overview (height, nationality, DOB, age stats)
linkslist2.append(lines)
#this list is is for stats page
lines = lines[:-9]
# if 17/18 data wanted, edit url
lines = lines + 'stats?co=1&se=21'
linkslist.append(lines)
print(linkslist)
print(linkslist2)
workbook = 'EPL 1213.xlsx'
findlinks()
playerdb = load_workbook(workbook)
# for links, count in zip(linkslist2, range(1,len(linkslist2)+1)):
# myurl = links
# uclient = ureq(myurl)
# pagehtml = uclient.read()
# # opens browser and scrapes
# # browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
# # browser.get(myurl)
# # time.sleep(3)
# # pagehtml = browser.page_source
#
# # parsing as html
# pagesoup = soup(pagehtml, "html.parser")
#
# # uclient.close()
# # player details
# details = pagesoup.find("div", {"class": "personalLists"}).text
# details = details.split()
# if len(details) < 7:
# nationality = details[1]
# age = '0'
# DOB = details[5]
# YOB = details[5][6:]
# height = '0'
# weight = '0'
# if len(details) > 7:
# nationality = details[1]
# age = details[3]
# DOB = details[7]
# YOB = details[7][6:]
# height = details[9]
# weight = details[11]
# print('Nationality: ' + nationality + '\n' +
# 'Age: ' + age + '\n' +
# 'DOB: ' + DOB + '\n' +
# 'Year Birth: ' + YOB + '\n' +
# 'Height: ' + height + '\n' +
# 'Weight: ' + weight + '\n' +
# '///////////////////////////////////')
#
#
# time.sleep(1.5)
for links, count in zip(linkslist, range(1,len(linkslist)+1)):
#for personal details
myurl1 = linkslist2[count-1]
uclient = ureq(myurl1)
pagehtml = uclient.read()
# opens browser and scrapes
# browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
# browser.get(myurl)
# time.sleep(3)
# pagehtml = browser.page_source
# parsing as html
pagesoup = soup(pagehtml, "html.parser")
# uclient.close()
# player details
details = pagesoup.find("div", {"class": "personalLists"}).text
details = details.split()
if len(details) < 7:
nationality = details[1]
age = '0'
DOB = details[5]
YOB = details[5][6:]
height = '0'
weight = '0'
if len(details) > 7:
nationality = details[1]
age = details[3]
DOB = details[7]
YOB = details[7][6:]
height = details[9]
weight = details[11]
print('Nationality: ' + nationality + '\n' +
'Age: ' + age + '\n' +
'DOB: ' + DOB + '\n' +
'Year Birth: ' + YOB + '\n' +
'Height: ' + height + '\n' +
'Weight: ' + weight + '\n' +
'///////////////////////////////////')
time.sleep(1.5)
myurl = links
print('Scrape No. ' + str(count) + ' out of ' + str(len(linkslist)))
# opens browser and scrapes
browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
browser.get(myurl)
time.sleep(3.50)
pagehtml = browser.page_source
# parsing as html
pagesoup = soup(pagehtml, "html.parser")
# uclient.close()
# player details
name = pagesoup.find("div", {"class": "name"}).text
try:
jerseyno = pagesoup.find("div", {"class": "number"}).text
except:
jerseyno = '0'
positionget = pagesoup.find_all("div", {"class": "info"})
global position
position = 'null'
for div in positionget:
x = div.text
x = ''.join(x.split())
if x == 'Goalkeeper':
position = 'Goalkeeper'
if x == 'Defender':
position = 'Defender'
if x == 'Midfielder':
position = 'Midfielder'
if x == 'Forward':
position = 'Forward'
print('Position: ' + position)
team = pagesoup.find("div", {"class": "info"}).text
if team == position:
team = '0'
if position == 'Defender':
# determine sheet
sheet = playerdb.get_sheet_by_name('Defender')
# determine number of rows existing, write to that row number plus one
global maxrow
maxrow = sheet.max_row
print('Number of rows is ' + str(maxrow))
# general stats
appearances = pagesoup.find("span", {"class": "allStatContainer statappearances"}).text
wins = pagesoup.find("span", {"class": "allStatContainer statwins"}).text
losses = pagesoup.find("span", {"class": "allStatContainer statlosses"}).text
# defence stats
try:
cleansheet = pagesoup.find("span", {"class": "allStatContainer statclean_sheet"}).text
except:
cleansheet = '0'
try:
goalconceded = pagesoup.find("span", {"class": "allStatContainer statgoals_conceded"}).text
except:
goalconceded = '0'
tackles = pagesoup.find("span", {"class": "allStatContainer stattotal_tackle"}).text
tacklessuccess = pagesoup.find("span", {"class": "allStatContainer stattackle_success"}).text
try:
lastmantackle = pagesoup.find("span", {"class": "allStatContainer statlast_man_tackle"}).text
except:
lastmantackle = '0'
blocks = pagesoup.find("span", {"class": "allStatContainer statblocked_scoring_att"}).text
interceptions = pagesoup.find("span", {"class": "allStatContainer statinterception"}).text
clearances = pagesoup.find("span", {"class": "allStatContainer stattotal_clearance"}).text
headedclearance = pagesoup.find("span", {"class": "allStatContainer stateffective_head_clearance"}).text
try:
clearanceoffline = pagesoup.find("span", {"class": "allStatContainer statclearance_off_line"}).text
except:
clearanceoffline = '0'
recovery = pagesoup.find("span", {"class": "allStatContainer statball_recovery"}).text
duelswon = pagesoup.find("span", {"class": "allStatContainer statduel_won"}).text
duelslost = pagesoup.find("span", {"class": "allStatContainer statduel_lost"}).text
fiftyfiftywon = pagesoup.find("span", {"class": "allStatContainer statwon_contest"}).text
aerialwon = pagesoup.find("span", {"class": "allStatContainer stataerial_won"}).text
aeriallost = pagesoup.find("span", {"class": "allStatContainer stataerial_lost"}).text
try:
owngoals = pagesoup.find("span", {"class": "allStatContainer statown_goals"}).text
except:
owngoals = '0'
errortogoal = pagesoup.find("span", {"class": "allStatContainer staterror_lead_to_goal"}).text
# discipline stats
yellowcard = pagesoup.find("span", {"class": "allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span", {"class": "allStatContainer statred_card"}).text
fouls = pagesoup.find("span", {"class": "allStatContainer statfouls"}).text
offsides = pagesoup.find("span", {"class": "allStatContainer stattotal_offside"}).text
# teamplay stats
assists = pagesoup.find("span", {"class": "allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span", {"class": "allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span", {"class": "allStatContainer stattotal_pass_per_game"}).text
bigchancecreated = pagesoup.find("span", {"class": "allStatContainer statbig_chance_created"}).text
crosses = pagesoup.find("span", {"class": "allStatContainer stattotal_cross"}).text
crossacc = pagesoup.find("span", {"class": "allStatContainer statcross_accuracy"}).text
throughballs = pagesoup.find("span", {"class": "allStatContainer stattotal_through_ball"}).text
acclongballs = pagesoup.find("span", {"class": "allStatContainer stataccurate_long_balls"}).text
# attack stats
goals = pagesoup.find("span", {"class": "allStatContainer statgoals"}).text
headgoals = pagesoup.find("span", {"class": "allStatContainer statatt_hd_goal"}).text
goalsright = pagesoup.find("span", {"class": "allStatContainer statatt_rf_goal"}).text
goalsleft = pagesoup.find("span", {"class": "allStatContainer statatt_lf_goal"}).text
woodwork = pagesoup.find("span", {"class": "allStatContainer stathit_woodwork"}).text
# using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
defendinglist = [cleansheet, goalconceded, tackles, tacklessuccess, lastmantackle, blocks, interceptions,
clearances, headedclearance, clearanceoffline, recovery, duelswon, duelslost,
fiftyfiftywon, aerialwon, aeriallost, owngoals, errortogoal]
disciplinelist = [yellowcard, redcard, fouls, offsides]
teamplaylist = [assists, passes, passespergame, bigchancecreated, crosses, crossacc, throughballs,
acclongballs]
attacklist = [goals, headgoals, goalsright, goalsleft, woodwork]
detailslist = [nationality, age, DOB, YOB, height, weight]
for x, y in zip(playerdetails, range(0, len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x, y in zip(generallist, range(0, len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
generallist[y] = x
for x, y in zip(defendinglist, range(0, len(defendinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
defendinglist[y] = x
for x, y in zip(attacklist, range(0, len(attacklist))):
x = ''.join(x.split())
x = x.replace(',', '')
attacklist[y] = x
for x, y in zip(disciplinelist, range(0, len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
disciplinelist[y] = x
for x, y in zip(teamplaylist, range(0, len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',', '')
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(defendinglist)
print(attacklist)
print(disciplinelist)
print(teamplaylist)
print(detailslist)
# to remove all whitespace
# saves = ''.join(saves.split())
# write to the csv file
for x, y in zip(playerdetails, range(1, len(playerdetails) + 1)):
sheet.cell(row=maxrow + 1, column=y).value = x
for x, y in zip(generallist, range(1, len(generallist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 4).value = x
for x, y in zip(defendinglist, range(1, len(defendinglist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 7).value = x
for x, y in zip(attacklist, range(1, len(attacklist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 25).value = x
for x, y in zip(disciplinelist, range(1, len(disciplinelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 30).value = x
for x, y in zip(teamplaylist, range(1, len(teamplaylist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 34).value = x
for x, y in zip(detailslist, range(1, len(detailslist)+ 1)):
sheet.cell(row = maxrow + 1, column = y + 42).value = x
playerdb.save(workbook)
if position == 'Goalkeeper':
# determine sheet
sheet = playerdb.get_sheet_by_name('Goalkeeper')
# determine number of rows existing, write to that row number plus one
maxrow = sheet.max_row
print('number of rows is ' + str(maxrow))
# general stats
appearances = pagesoup.find("span", {"class": "allStatContainer statappearances"}).text
wins = pagesoup.find("span", {"class": "allStatContainer statwins"}).text
losses = pagesoup.find("span", {"class": "allStatContainer statlosses"}).text
# goalkeeping stats
saves = pagesoup.find("span", {"class": "allStatContainer statsaves"}).text
penaltysaves = pagesoup.find("span", {"class": "allStatContainer statpenalty_save"}).text
punches = pagesoup.find("span", {"class": "allStatContainer statpunches"}).text
highclaim = pagesoup.find("span", {"class": "allStatContainer statgood_high_claim"}).text
catches = pagesoup.find("span", {"class": "allStatContainer statcatches"}).text
sweepclearance = pagesoup.find("span", {"class": "allStatContainer stattotal_keeper_sweeper"}).text
throws = pagesoup.find("span", {"class": "allStatContainer statkeeper_throws"}).text
goalkicks = pagesoup.find("span", {"class": "allStatContainer statgoal_kicks"}).text
# defence stats
cleansheet = pagesoup.find("span", {"class": "allStatContainer statclean_sheet"}).text
goalconceded = pagesoup.find("span", {"class": "allStatContainer statgoals_conceded"}).text
errortogoal = pagesoup.find("span", {"class": "allStatContainer staterror_lead_to_goal"}).text
owngoal = pagesoup.find("span", {"class": "allStatContainer statown_goals"}).text
# discipline stats
yellowcard = pagesoup.find("span", {"class": "allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span", {"class": "allStatContainer statred_card"}).text
fouls = pagesoup.find("span", {"class": "allStatContainer statfouls"}).text
# teamplay stats
goals = pagesoup.find("span", {"class": "allStatContainer statgoals"}).text
assists = pagesoup.find("span", {"class": "allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span", {"class": "allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span", {"class": "allStatContainer stattotal_pass_per_game"}).text
longballs = pagesoup.find("span", {"class": "allStatContainer stataccurate_long_balls"}).text
# using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
goalkeepinglist = [saves, penaltysaves, punches, highclaim, catches, sweepclearance, throws, goalkicks]
defencelist = [cleansheet, goalconceded, errortogoal, owngoal]
disciplinelist = [yellowcard, redcard, fouls]
teamplaylist = [goals, assists, passes, passespergame, longballs]
detailslist = [nationality, age, DOB, YOB, height, weight]
for x, y in zip(playerdetails, range(0, len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x, y in zip(generallist, range(0, len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
generallist[y] = x
for x, y in zip(goalkeepinglist, range(0, len(goalkeepinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
goalkeepinglist[y] = x
for x, y in zip(defencelist, range(0, len(defencelist))):
x = ''.join(x.split())
x = x.replace(',', '')
defencelist[y] = x
for x, y in zip(disciplinelist, range(0, len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
disciplinelist[y] = x
for x, y in zip(teamplaylist, range(0, len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',', '')
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(goalkeepinglist)
print(defencelist)
print(disciplinelist)
print(teamplaylist)
# to remove all whitespace
# saves = ''.join(saves.split())
# write to the csv file
for x, y in zip(playerdetails, range(1, len(playerdetails) + 1)):
sheet.cell(row=maxrow + 1, column=y).value = x
for x, y in zip(generallist, range(1, len(generallist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 4).value = x
for x, y in zip(goalkeepinglist, range(1, len(goalkeepinglist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 7).value = x
for x, y in zip(defencelist, range(1, len(defencelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 15).value = x
for x, y in zip(disciplinelist, range(1, len(disciplinelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 19).value = x
for x, y in zip(teamplaylist, range(1, len(teamplaylist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 22).value = x
for x, y in zip(detailslist, range(1, len(detailslist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 27).value = x
playerdb.save(workbook)
if position == 'Midfielder':
# determine sheet
sheet = playerdb.get_sheet_by_name('Midfielder')
# determine number of rows existing, write to that row number plus one
maxrow = sheet.max_row
print('number of rows is ' + str(maxrow))
# general stats
appearances = pagesoup.find("span", {"class": "allStatContainer statappearances"}).text
wins = pagesoup.find("span", {"class": "allStatContainer statwins"}).text
losses = pagesoup.find("span", {"class": "allStatContainer statlosses"}).text
# defence stats
tackles = pagesoup.find("span", {"class": "allStatContainer stattotal_tackle"}).text
try:
tacklessuccess = pagesoup.find("span", {"class": "allStatContainer stattackle_success"}).text
except:
tacklessuccess = '0'
blocks = pagesoup.find("span", {"class": "allStatContainer statblocked_scoring_att"}).text
interceptions = pagesoup.find("span", {"class": "allStatContainer statinterception"}).text
clearances = pagesoup.find("span", {"class": "allStatContainer stattotal_clearance"}).text
headedclearance = pagesoup.find("span", {"class": "allStatContainer stateffective_head_clearance"}).text
try:
recovery = pagesoup.find("span", {"class": "allStatContainer statball_recovery"}).text
except:
recovery = '0'
try:
duelswon = pagesoup.find("span", {"class": "allStatContainer statduel_won"}).text
except:
duelswon = '0'
try:
duelslost = pagesoup.find("span", {"class": "allStatContainer statduel_lost"}).text
except:
duelslost = '0'
try:
fiftyfiftywon = pagesoup.find("span", {"class": "allStatContainer statwon_contest"}).text
except:
fiftyfiftywon = '0'
try:
aerialwon = pagesoup.find("span", {"class": "allStatContainer stataerial_won"}).text
except:
aerialwon = '0'
try:
aeriallost = pagesoup.find("span", {"class": "allStatContainer stataerial_lost"}).text
except:
aeriallost = '0'
try:
errortogoal = pagesoup.find("span", {"class": "allStatContainer staterror_lead_to_goal"}).text
except:
errortogoal = '0'
# discipline stats
yellowcard = pagesoup.find("span", {"class": "allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span", {"class": "allStatContainer statred_card"}).text
fouls = pagesoup.find("span", {"class": "allStatContainer statfouls"}).text
offsides = pagesoup.find("span", {"class": "allStatContainer stattotal_offside"}).text
# teamplay stats
assists = pagesoup.find("span", {"class": "allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span", {"class": "allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span", {"class": "allStatContainer stattotal_pass_per_game"}).text
bigchancecreated = pagesoup.find("span", {"class": "allStatContainer statbig_chance_created"}).text
crosses = pagesoup.find("span", {"class": "allStatContainer stattotal_cross"}).text
try:
crossacc = pagesoup.find("span", {"class": "allStatContainer statcross_accuracy"}).text
except:
crossacc = '0'
try:
throughballs = pagesoup.find("span", {"class": "allStatContainer stattotal_through_ball"}).text
except:
throughballs = '0'
try:
acclongballs = pagesoup.find("span", {"class": "allStatContainer stataccurate_long_balls"}).text
except:
acclongballs = '0'
# attack stats
goals = pagesoup.find("span", {"class": "allStatContainer statgoals"}).text
goalspg = pagesoup.find("span", {"class": "allStatContainer statgoals_per_game"}).text
headgoals = pagesoup.find("span", {"class": "allStatContainer statatt_hd_goal"}).text
goalsright = pagesoup.find("span", {"class": "allStatContainer statatt_rf_goal"}).text
goalsleft = pagesoup.find("span", {"class": "allStatContainer statatt_lf_goal"}).text
penaltyscored = pagesoup.find("span", {"class": "allStatContainer statatt_pen_goal"}).text
freekickscored = pagesoup.find("span", {"class": "allStatContainer statatt_freekick_goal"}).text
shots = pagesoup.find("span", {"class": "allStatContainer stattotal_scoring_att"}).text
shotstarget = pagesoup.find("span", {"class": "allStatContainer statontarget_scoring_att"}).text
shotacc = pagesoup.find("span", {"class": "allStatContainer statshot_accuracy"}).text
woodwork = pagesoup.find("span", {"class": "allStatContainer stathit_woodwork"}).text
bigchancemiss = pagesoup.find("span", {"class": "allStatContainer statbig_chance_missed"}).text
# using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
defendinglist = [tackles, tacklessuccess, blocks, interceptions, clearances, headedclearance, recovery,
duelswon, duelslost, fiftyfiftywon, aerialwon, aeriallost, errortogoal]
disciplinelist = [yellowcard, redcard, fouls, offsides]
teamplaylist = [assists, passes, passespergame, bigchancecreated, crosses, crossacc, throughballs,
acclongballs]
attacklist = [goals, goalspg, headgoals, goalsright, goalsleft, penaltyscored, freekickscored, shots,
shotstarget, shotacc, woodwork, bigchancemiss]
detailslist = [nationality, age, DOB, YOB, height, weight]
print(len(defendinglist))
print(len(attacklist))
print(len(disciplinelist))
print(len(teamplaylist))
for x, y in zip(playerdetails, range(0, len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x, y in zip(generallist, range(0, len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
generallist[y] = x
for x, y in zip(defendinglist, range(0, len(defendinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
defendinglist[y] = x
for x, y in zip(attacklist, range(0, len(attacklist))):
x = ''.join(x.split())
x = x.replace(',', '')
attacklist[y] = x
for x, y in zip(disciplinelist, range(0, len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
disciplinelist[y] = x
for x, y in zip(teamplaylist, range(0, len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',', '')
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(defendinglist)
print(attacklist)
print(disciplinelist)
print(teamplaylist)
# to remove all whitespace
# saves = ''.join(saves.split())
# write to the csv file
for x, y in zip(playerdetails, range(1, len(playerdetails) + 1)):
sheet.cell(row=maxrow + 1, column=y).value = x
for x, y in zip(generallist, range(1, len(generallist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 4).value = x
for x, y in zip(defendinglist, range(1, len(defendinglist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 7).value = x
for x, y in zip(disciplinelist, range(1, len(disciplinelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 20).value = x
for x, y in zip(attacklist, range(1, len(attacklist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 24).value = x
for x, y in zip(teamplaylist, range(1, len(teamplaylist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 36).value = x
for x, y in zip(detailslist, range(1, len(detailslist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 44).value = x
playerdb.save(workbook)
if position == 'Forward':
# determine sheet
sheet = playerdb.get_sheet_by_name('Forward')
# determine number of rows existing, write to that row number plus one
maxrow = sheet.max_row
print('number of rows is ' + str(maxrow))
# general stats
appearances = pagesoup.find("span", {"class": "allStatContainer statappearances"}).text
wins = pagesoup.find("span", {"class": "allStatContainer statwins"}).text
losses = pagesoup.find("span", {"class": "allStatContainer statlosses"}).text
# defence stats 5
tackles = pagesoup.find("span", {"class": "allStatContainer stattotal_tackle"}).text
blocks = pagesoup.find("span", {"class": "allStatContainer statblocked_scoring_att"}).text
interceptions = pagesoup.find("span", {"class": "allStatContainer statinterception"}).text
clearances = pagesoup.find("span", {"class": "allStatContainer stattotal_clearance"}).text
headedclearance = pagesoup.find("span", {"class": "allStatContainer stateffective_head_clearance"}).text
# discipline stats 4
yellowcard = pagesoup.find("span", {"class": "allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span", {"class": "allStatContainer statred_card"}).text
fouls = pagesoup.find("span", {"class": "allStatContainer statfouls"}).text
offsides = pagesoup.find("span", {"class": "allStatContainer stattotal_offside"}).text
# teamplay stats 5
assists = pagesoup.find("span", {"class": "allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span", {"class": "allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span", {"class": "allStatContainer stattotal_pass_per_game"}).text
bigchancecreated = pagesoup.find("span", {"class": "allStatContainer statbig_chance_created"}).text
crosses = pagesoup.find("span", {"class": "allStatContainer stattotal_cross"}).text
# attack stats 12
goals = pagesoup.find("span", {"class": "allStatContainer statgoals"}).text
goalspg = pagesoup.find("span", {"class": "allStatContainer statgoals_per_game"}).text
headgoals = pagesoup.find("span", {"class": "allStatContainer statatt_hd_goal"}).text
goalsright = pagesoup.find("span", {"class": "allStatContainer statatt_rf_goal"}).text
goalsleft = pagesoup.find("span", {"class": "allStatContainer statatt_lf_goal"}).text
penaltyscored = pagesoup.find("span", {"class": "allStatContainer statatt_pen_goal"}).text
freekickscored = pagesoup.find("span", {"class": "allStatContainer statatt_freekick_goal"}).text
shots = pagesoup.find("span", {"class": "allStatContainer stattotal_scoring_att"}).text
shotstarget = pagesoup.find("span", {"class": "allStatContainer statontarget_scoring_att"}).text
shotacc = pagesoup.find("span", {"class": "allStatContainer statshot_accuracy"}).text
woodwork = pagesoup.find("span", {"class": "allStatContainer stathit_woodwork"}).text
bigchancemiss = pagesoup.find("span", {"class": "allStatContainer statbig_chance_missed"}).text
# using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
defendinglist = [tackles, blocks, interceptions, clearances, headedclearance]
disciplinelist = [yellowcard, redcard, fouls, offsides]
teamplaylist = [assists, passes, passespergame, bigchancecreated, crosses]
attacklist = [goals, goalspg, headgoals, goalsright, goalsleft, penaltyscored, freekickscored, shots,
shotstarget, shotacc, woodwork, bigchancemiss]
detailslist = [nationality, age, DOB, YOB, height, weight]
for x, y in zip(playerdetails, range(0, len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x, y in zip(generallist, range(0, len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
generallist[y] = x
for x, y in zip(defendinglist, range(0, len(defendinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
defendinglist[y] = x
for x, y in zip(attacklist, range(0, len(attacklist))):
x = ''.join(x.split())
x = x.replace(',', '')
attacklist[y] = x
for x, y in zip(disciplinelist, range(0, len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
disciplinelist[y] = x
for x, y in zip(teamplaylist, range(0, len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',', '')
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(defendinglist)
print(attacklist)
print(disciplinelist)
print(teamplaylist)
# to remove all whitespace
# saves = ''.join(saves.split())
# write to the csv file
for x, y in zip(playerdetails, range(1, len(playerdetails) + 1)):
sheet.cell(row=maxrow + 1, column=y).value = x
for x, y in zip(generallist, range(1, len(generallist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 4).value = x
for x, y in zip(defendinglist, range(1, len(defendinglist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 7).value = x
for x, y in zip(disciplinelist, range(1, len(disciplinelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 12).value = x
for x, y in zip(attacklist, range(1, len(attacklist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 16).value = x
for x, y in zip(teamplaylist, range(1, len(teamplaylist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 28).value = x
for x, y in zip(detailslist, range(1, len(detailslist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 33).value = x
playerdb.save(workbook)
browser.close()
def startscrape():
playerdb = load_workbook('EPL 1617.xlsx')
#url to be scraped
myurl = urlentry.get()
#adds text to URL for 2017/18 data, delete if want full stats
# myurl = myurl + '?co=1&se=54'
#opens browser and scrapes
browser = webdriver.Chrome("/Users/Qixiang/Dropbox/ICS/venv/chromedriver")
browser.get(myurl)
time.sleep(3)
pagehtml = browser.page_source
# uret(myurl, 'testpage.html')
#
# uclient = ureq(myurl)
# pagehtml = uclient.read()
#parsing as html
pagesoup = soup(pagehtml, "html.parser")
# uclient.close()
#player details
name = pagesoup.find("div",{"class":"name"}).text
jerseyno = pagesoup.find("div",{"class":"number"}).text
positionget = pagesoup.find_all("div",{"class":"info"})
global position
position = 'null'
for div in positionget:
x = div.text
x = ''.join(x.split())
if x == 'Goalkeeper':
position = 'Goalkeeper'
if x == 'Defender':
position = 'Defender'
if x == 'Midfielder':
position = 'Midfielder'
if x == 'Forward':
position = 'Forward'
print('Position: ' + position)
team = pagesoup.find("div",{"class":"info"}).text
if position == 'Defender':
# determine sheet
sheet = playerdb.get_sheet_by_name('Defender')
# determine number of rows existing, write to that row number plus one
global maxrow
maxrow = sheet.max_row
print('Number of Rows ' + str(maxrow))
#general stats
appearances = pagesoup.find("span",{"class":"allStatContainer statappearances"}).text
wins = pagesoup.find("span",{"class":"allStatContainer statwins"}).text
losses = pagesoup.find("span",{"class":"allStatContainer statlosses"}).text
#defence stats
cleansheet = pagesoup.find("span",{"class":"allStatContainer statclean_sheet"}).text
goalconceded = pagesoup.find("span",{"class":"allStatContainer statgoals_conceded"}).text
tackles = pagesoup.find("span",{"class":"allStatContainer stattotal_tackle"}).text
tacklessuccess = pagesoup.find("span",{"class":"allStatContainer stattackle_success"}).text
lastmantackle = pagesoup.find("span",{"class":"allStatContainer statlast_man_tackle"}).text
blocks = pagesoup.find("span",{"class":"allStatContainer statblocked_scoring_att"}).text
interceptions = pagesoup.find("span",{"class":"allStatContainer statinterception"}).text
clearances = pagesoup.find("span",{"class":"allStatContainer stattotal_clearance"}).text
headedclearance = pagesoup.find("span",{"class":"allStatContainer stateffective_head_clearance"}).text
clearanceoffline = pagesoup.find("span",{"class":"allStatContainer statclearance_off_line"}).text
recovery = pagesoup.find("span",{"class":"allStatContainer statball_recovery"}).text
duelswon = pagesoup.find("span",{"class":"allStatContainer statduel_won"}).text
duelslost = pagesoup.find("span",{"class":"allStatContainer statduel_lost"}).text
fiftyfiftywon = pagesoup.find("span",{"class":"allStatContainer statwon_contest"}).text
aerialwon = pagesoup.find("span",{"class":"allStatContainer stataerial_won"}).text
aeriallost = pagesoup.find("span",{"class":"allStatContainer stataerial_lost"}).text
owngoals = pagesoup.find("span",{"class":"allStatContainer statown_goals"}).text
errortogoal = pagesoup.find("span",{"class":"allStatContainer staterror_lead_to_goal"}).text
#discipline stats
yellowcard = pagesoup.find("span",{"class":"allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span",{"class":"allStatContainer statred_card"}).text
fouls = pagesoup.find("span",{"class":"allStatContainer statfouls"}).text
offsides = pagesoup.find("span",{"class":"allStatContainer stattotal_offside"}).text
#teamplay stats
assists = pagesoup.find("span",{"class":"allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span",{"class":"allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span",{"class":"allStatContainer stattotal_pass_per_game"}).text
bigchancecreated = pagesoup.find("span", {"class": "allStatContainer statbig_chance_created"}).text
crosses = pagesoup.find("span", {"class": "allStatContainer stattotal_cross"}).text
crossacc = pagesoup.find("span", {"class": "allStatContainer statcross_accuracy"}).text
throughballs = pagesoup.find("span", {"class": "allStatContainer stattotal_through_ball"}).text
acclongballs = pagesoup.find("span", {"class": "allStatContainer stataccurate_long_balls"}).text
#attack stats
goals = pagesoup.find("span",{"class":"allStatContainer statgoals"}).text
headgoals = pagesoup.find("span",{"class":"allStatContainer statatt_hd_goal"}).text
goalsright = pagesoup.find("span",{"class":"allStatContainer statatt_rf_goal"}).text
goalsleft = pagesoup.find("span",{"class":"allStatContainer statatt_lf_goal"}).text
woodwork = pagesoup.find("span",{"class":"allStatContainer stathit_woodwork"}).text
#using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
defendinglist = [cleansheet,goalconceded,tackles,tacklessuccess,lastmantackle,blocks,interceptions,clearances,headedclearance,clearanceoffline,recovery,duelswon,duelslost,fiftyfiftywon,aerialwon, aeriallost, owngoals, errortogoal]
disciplinelist = [yellowcard,redcard,fouls, offsides]
teamplaylist = [assists,passes,passespergame,bigchancecreated,crosses,crossacc, throughballs, acclongballs]
attacklist = [goals,headgoals,goalsright,goalsleft,woodwork]
print(len(defendinglist))
print(len(teamplaylist))
print(len(attacklist))
for x,y in zip(playerdetails, range(0,len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x,y in zip(generallist, range(0,len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
generallist[y] = x
for x,y in zip(defendinglist, range(0,len(defendinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
defendinglist[y] = x
for x,y in zip(attacklist, range(0,len(attacklist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
attacklist[y] = x
for x,y in zip(disciplinelist, range(0,len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
disciplinelist[y] = x
for x,y in zip(teamplaylist, range(0,len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',','')
print(x)
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(defendinglist)
print(attacklist)
print(disciplinelist)
print(teamplaylist)
#to remove all whitespace
# saves = ''.join(saves.split())
#write to the csv file
for x,y in zip(playerdetails, range(1,len(playerdetails)+1)):
sheet.cell(row = maxrow +1, column = y).value = x
for x,y in zip(generallist, range(1,len(generallist)+1)):
sheet.cell(row = maxrow +1, column = y + 4).value = x
for x,y in zip(defendinglist, range(1,len(defendinglist)+1)):
sheet.cell(row = maxrow +1, column = y + 7).value = x
for x,y in zip(attacklist, range(1,len(attacklist)+1)):
sheet.cell(row = maxrow +1, column = y + 25).value = x
for x,y in zip(disciplinelist, range(1,len(disciplinelist)+1)):
sheet.cell(row = maxrow +1, column = y + 30).value = x
for x,y in zip(teamplaylist, range(1,len(teamplaylist)+1)):
sheet.cell(row = maxrow +1, column = y + 34).value = x
playerdb.save('EPL 1617.xlsx')
if position == 'Goalkeeper':
# determine sheet
sheet = playerdb.get_sheet_by_name('Goalkeeper')
# determine number of rows existing, write to that row number plus one
maxrow = sheet.max_row
print('Number of Rows ' + str(maxrow))
# general stats
appearances = pagesoup.find("span", {"class": "allStatContainer statappearances"}).text
wins = pagesoup.find("span", {"class": "allStatContainer statwins"}).text
losses = pagesoup.find("span", {"class": "allStatContainer statlosses"}).text
# goalkeeping stats
saves = pagesoup.find("span", {"class": "allStatContainer statsaves"}).text
penaltysaves = pagesoup.find("span", {"class": "allStatContainer statpenalty_save"}).text
punches = pagesoup.find("span", {"class": "allStatContainer statpunches"}).text
highclaim = pagesoup.find("span", {"class": "allStatContainer statgood_high_claim"}).text
catches = pagesoup.find("span", {"class": "allStatContainer statcatches"}).text
sweepclearance = pagesoup.find("span", {"class": "allStatContainer stattotal_keeper_sweeper"}).text
throws = pagesoup.find("span", {"class": "allStatContainer statkeeper_throws"}).text
goalkicks = pagesoup.find("span", {"class": "allStatContainer statgoal_kicks"}).text
# defence stats
cleansheet = pagesoup.find("span", {"class": "allStatContainer statclean_sheet"}).text
goalconceded = pagesoup.find("span", {"class": "allStatContainer statgoals_conceded"}).text
errortogoal = pagesoup.find("span", {"class": "allStatContainer staterror_lead_to_goal"}).text
owngoal = pagesoup.find("span", {"class": "allStatContainer statown_goals"}).text
# discipline stats
yellowcard = pagesoup.find("span", {"class": "allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span", {"class": "allStatContainer statred_card"}).text
fouls = pagesoup.find("span", {"class": "allStatContainer statfouls"}).text
# teamplay stats
goals = pagesoup.find("span", {"class": "allStatContainer statgoals"}).text
assists = pagesoup.find("span", {"class": "allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span", {"class": "allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span", {"class": "allStatContainer stattotal_pass_per_game"}).text
longballs = pagesoup.find("span", {"class": "allStatContainer stataccurate_long_balls"}).text
# using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
goalkeepinglist = [saves, penaltysaves, punches, highclaim, catches, sweepclearance, throws, goalkicks]
defencelist = [cleansheet, goalconceded, errortogoal, owngoal]
disciplinelist = [yellowcard, redcard, fouls]
teamplaylist = [goals, assists, passes, passespergame, longballs]
for x, y in zip(playerdetails, range(0, len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x, y in zip(generallist, range(0, len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
generallist[y] = x
for x, y in zip(goalkeepinglist, range(0, len(goalkeepinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
goalkeepinglist[y] = x
for x, y in zip(defencelist, range(0, len(defencelist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
defencelist[y] = x
for x, y in zip(disciplinelist, range(0, len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
disciplinelist[y] = x
for x, y in zip(teamplaylist, range(0, len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(goalkeepinglist)
print(defencelist)
print(disciplinelist)
print(teamplaylist)
# to remove all whitespace
# saves = ''.join(saves.split())
# write to the csv file
for x, y in zip(playerdetails, range(1, len(playerdetails) + 1)):
sheet.cell(row=maxrow + 1, column=y).value = x
for x, y in zip(generallist, range(1, len(generallist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 4).value = x
for x, y in zip(goalkeepinglist, range(1, len(goalkeepinglist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 7).value = x
for x, y in zip(defencelist, range(1, len(defencelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 15).value = x
for x, y in zip(disciplinelist, range(1, len(disciplinelist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 19).value = x
for x, y in zip(teamplaylist, range(1, len(teamplaylist) + 1)):
sheet.cell(row=maxrow + 1, column=y + 22).value = x
playerdb.save('EPL 1617.xlsx')
if position == 'Midfielder':
# determine sheet
sheet = playerdb.get_sheet_by_name('Midfielder')
# determine number of rows existing, write to that row number plus one
maxrow = sheet.max_row
print('Number of Rows: ' + str(maxrow))
#general stats
appearances = pagesoup.find("span",{"class":"allStatContainer statappearances"}).text
wins = pagesoup.find("span",{"class":"allStatContainer statwins"}).text
losses = pagesoup.find("span",{"class":"allStatContainer statlosses"}).text
#defence stats
tackles = pagesoup.find("span",{"class":"allStatContainer stattotal_tackle"}).text
tacklessuccess = pagesoup.find("span",{"class":"allStatContainer stattackle_success"}).text
blocks = pagesoup.find("span",{"class":"allStatContainer statblocked_scoring_att"}).text
interceptions = pagesoup.find("span",{"class":"allStatContainer statinterception"}).text
clearances = pagesoup.find("span",{"class":"allStatContainer stattotal_clearance"}).text
headedclearance = pagesoup.find("span",{"class":"allStatContainer stateffective_head_clearance"}).text
recovery = pagesoup.find("span",{"class":"allStatContainer statball_recovery"}).text
duelswon = pagesoup.find("span",{"class":"allStatContainer statduel_won"}).text
duelslost = pagesoup.find("span",{"class":"allStatContainer statduel_lost"}).text
fiftyfiftywon = pagesoup.find("span",{"class":"allStatContainer statwon_contest"}).text
aerialwon = pagesoup.find("span",{"class":"allStatContainer stataerial_won"}).text
aeriallost = pagesoup.find("span",{"class":"allStatContainer stataerial_lost"}).text
errortogoal = pagesoup.find("span",{"class":"allStatContainer staterror_lead_to_goal"}).text
#discipline stats
yellowcard = pagesoup.find("span",{"class":"allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span",{"class":"allStatContainer statred_card"}).text
fouls = pagesoup.find("span",{"class":"allStatContainer statfouls"}).text
offsides = pagesoup.find("span",{"class":"allStatContainer stattotal_offside"}).text
#teamplay stats
assists = pagesoup.find("span",{"class":"allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span",{"class":"allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span",{"class":"allStatContainer stattotal_pass_per_game"}).text
bigchancecreated = pagesoup.find("span", {"class": "allStatContainer statbig_chance_created"}).text
crosses = pagesoup.find("span", {"class": "allStatContainer stattotal_cross"}).text
crossacc = pagesoup.find("span", {"class": "allStatContainer statcross_accuracy"}).text
throughballs = pagesoup.find("span", {"class": "allStatContainer stattotal_through_ball"}).text
acclongballs = pagesoup.find("span", {"class": "allStatContainer stataccurate_long_balls"}).text
#attack stats
goals = pagesoup.find("span",{"class":"allStatContainer statgoals"}).text
goalspg = pagesoup.find("span",{"class":"allStatContainer statgoals_per_game"}).text
headgoals = pagesoup.find("span",{"class":"allStatContainer statatt_hd_goal"}).text
goalsright = pagesoup.find("span",{"class":"allStatContainer statatt_rf_goal"}).text
goalsleft = pagesoup.find("span",{"class":"allStatContainer statatt_lf_goal"}).text
penaltyscored = pagesoup.find("span",{"class":"allStatContainer statatt_pen_goal"}).text
freekickscored = pagesoup.find("span",{"class":"allStatContainer statatt_freekick_goal"}).text
shots = pagesoup.find("span",{"class":"allStatContainer stattotal_scoring_att"}).text
shotstarget = pagesoup.find("span",{"class":"allStatContainer statontarget_scoring_att"}).text
shotacc = pagesoup.find("span",{"class":"allStatContainer statshot_accuracy"}).text
woodwork = pagesoup.find("span",{"class":"allStatContainer stathit_woodwork"}).text
bigchancemiss = pagesoup.find("span",{"class":"allStatContainer statbig_chance_missed"}).text
#using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
defendinglist = [tackles,tacklessuccess,blocks,interceptions,clearances,headedclearance,recovery,duelswon,duelslost,fiftyfiftywon,aerialwon,aeriallost,errortogoal]
disciplinelist = [yellowcard,redcard,fouls, offsides]
teamplaylist = [assists,passes,passespergame,bigchancecreated,crosses,crossacc, throughballs, acclongballs]
attacklist = [goals,goalspg,headgoals,goalsright,goalsleft,penaltyscored,freekickscored,shots,shotstarget,shotacc,woodwork,bigchancemiss]
for x,y in zip(playerdetails, range(0,len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x,y in zip(generallist, range(0,len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
generallist[y] = x
for x,y in zip(defendinglist, range(0,len(defendinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
defendinglist[y] = x
for x,y in zip(attacklist, range(0,len(attacklist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
attacklist[y] = x
for x,y in zip(disciplinelist, range(0,len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
disciplinelist[y] = x
for x,y in zip(teamplaylist, range(0,len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',','')
print(x)
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(defendinglist)
print(attacklist)
print(disciplinelist)
print(teamplaylist)
#to remove all whitespace
# saves = ''.join(saves.split())
#write to the csv file
for x,y in zip(playerdetails, range(1,len(playerdetails)+1)):
sheet.cell(row = maxrow +1, column = y).value = x
for x,y in zip(generallist, range(1,len(generallist)+1)):
sheet.cell(row = maxrow +1, column = y + 4).value = x
for x,y in zip(defendinglist, range(1,len(defendinglist)+1)):
sheet.cell(row = maxrow +1, column = y + 7).value = x
for x,y in zip(disciplinelist, range(1,len(disciplinelist)+1)):
sheet.cell(row = maxrow +1, column = y + 20).value = x
for x,y in zip(attacklist, range(1,len(attacklist)+1)):
sheet.cell(row = maxrow +1, column = y + 24).value = x
for x,y in zip(teamplaylist, range(1,len(teamplaylist)+1)):
sheet.cell(row = maxrow +1, column = y + 36).value = x
playerdb.save('EPL 1617.xlsx')
if position == 'Forward':
# determine sheet
sheet = playerdb.get_sheet_by_name('Forward')
# determine number of rows existing, write to that row number plus one
maxrow = sheet.max_row
print('Number of Rows: ' + str(maxrow))
#general stats
appearances = pagesoup.find("span",{"class":"allStatContainer statappearances"}).text
wins = pagesoup.find("span",{"class":"allStatContainer statwins"}).text
losses = pagesoup.find("span",{"class":"allStatContainer statlosses"}).text
#defence stats 5
tackles = pagesoup.find("span",{"class":"allStatContainer stattotal_tackle"}).text
blocks = pagesoup.find("span",{"class":"allStatContainer statblocked_scoring_att"}).text
interceptions = pagesoup.find("span",{"class":"allStatContainer statinterception"}).text
clearances = pagesoup.find("span",{"class":"allStatContainer stattotal_clearance"}).text
headedclearance = pagesoup.find("span",{"class":"allStatContainer stateffective_head_clearance"}).text
#discipline stats 4
yellowcard = pagesoup.find("span",{"class":"allStatContainer statyellow_card"}).text
redcard = pagesoup.find("span",{"class":"allStatContainer statred_card"}).text
fouls = pagesoup.find("span",{"class":"allStatContainer statfouls"}).text
offsides = pagesoup.find("span",{"class":"allStatContainer stattotal_offside"}).text
#teamplay stats 5
assists = pagesoup.find("span",{"class":"allStatContainer statgoal_assist"}).text
passes = pagesoup.find("span",{"class":"allStatContainer stattotal_pass"}).text
passespergame = pagesoup.find("span",{"class":"allStatContainer stattotal_pass_per_game"}).text
bigchancecreated = pagesoup.find("span", {"class": "allStatContainer statbig_chance_created"}).text
crosses = pagesoup.find("span", {"class": "allStatContainer stattotal_cross"}).text
#attack stats 12
goals = pagesoup.find("span",{"class":"allStatContainer statgoals"}).text
goalspg = pagesoup.find("span",{"class":"allStatContainer statgoals_per_game"}).text
headgoals = pagesoup.find("span",{"class":"allStatContainer statatt_hd_goal"}).text
goalsright = pagesoup.find("span",{"class":"allStatContainer statatt_rf_goal"}).text
goalsleft = pagesoup.find("span",{"class":"allStatContainer statatt_lf_goal"}).text
penaltyscored = pagesoup.find("span",{"class":"allStatContainer statatt_pen_goal"}).text
freekickscored = pagesoup.find("span",{"class":"allStatContainer statatt_freekick_goal"}).text
shots = pagesoup.find("span",{"class":"allStatContainer stattotal_scoring_att"}).text
shotstarget = pagesoup.find("span",{"class":"allStatContainer statontarget_scoring_att"}).text
shotacc = pagesoup.find("span",{"class":"allStatContainer statshot_accuracy"}).text
woodwork = pagesoup.find("span",{"class":"allStatContainer stathit_woodwork"}).text
bigchancemiss = pagesoup.find("span",{"class":"allStatContainer statbig_chance_missed"}).text
#using lists to trim text and for writing to CSV later
playerdetails = [name, jerseyno, position, team]
generallist = [appearances, wins, losses]
defendinglist = [tackles,blocks,interceptions,clearances,headedclearance]
disciplinelist = [yellowcard,redcard,fouls, offsides]
teamplaylist = [assists,passes,passespergame,bigchancecreated,crosses]
attacklist = [goals,goalspg,headgoals,goalsright,goalsleft,penaltyscored,freekickscored,shots,shotstarget,shotacc,woodwork,bigchancemiss]
print(len(defendinglist))
print(len(attacklist))
print(len(disciplinelist))
print(len(teamplaylist))
for x,y in zip(playerdetails, range(0,len(playerdetails))):
if x != name:
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
playerdetails[y] = x
if x == name:
print('Player: ' + name + ' ' + position)
playerdetails[y] = x
for x,y in zip(generallist, range(0,len(generallist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
generallist[y] = x
for x,y in zip(defendinglist, range(0,len(defendinglist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
defendinglist[y] = x
for x,y in zip(attacklist, range(0,len(attacklist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
attacklist[y] = x
for x,y in zip(disciplinelist, range(0,len(disciplinelist))):
x = ''.join(x.split())
x = x.replace(',', '')
print(x)
disciplinelist[y] = x
for x,y in zip(teamplaylist, range(0,len(teamplaylist))):
x = ''.join(x.split())
x = x.replace(',','')
print(x)
teamplaylist[y] = x
print(playerdetails)
print(generallist)
print(defendinglist)
print(attacklist)
print(disciplinelist)
print(teamplaylist)
#to remove all whitespace
# saves = ''.join(saves.split())
#write to the csv file
for x,y in zip(playerdetails, range(1,len(playerdetails)+1)):
sheet.cell(row = maxrow +1, column = y).value = x
for x,y in zip(generallist, range(1,len(generallist)+1)):
sheet.cell(row = maxrow +1, column = y + 4).value = x
for x,y in zip(defendinglist, range(1,len(defendinglist)+1)):
sheet.cell(row = maxrow +1, column = y + 7).value = x
for x,y in zip(disciplinelist, range(1,len(disciplinelist)+1)):
sheet.cell(row = maxrow +1, column = y + 12).value = x
for x,y in zip(attacklist, range(1,len(attacklist)+1)):
sheet.cell(row = maxrow +1, column = y + 16).value = x
for x,y in zip(teamplaylist, range(1,len(teamplaylist)+1)):
sheet.cell(row = maxrow +1, column = y + 28).value = x
print('////////////////////////////////////////////////')
playerdb.save('EPL 1617.xlsx')
browser.close()
def startscrape2():
playerdb = load_workbook('testbook.xlsx')
sheet1 = playerdb.get_sheet_by_name('MatchData')
# determine number of rows existing, write to that row number plus one
global maxrow
maxrow = sheet1.max_row
print('Number of Rows: ' + str(maxrow))
#url to be scraped
myurl = urlentry.get()
uclient = ureq(myurl)
pagehtml = uclient.read()
uclient.close()
#parsing as html
pagesoup = soup(pagehtml, "html.parser")
left = pagesoup.find_all("div", {"class": "container left"})
left = left.find_all("table", {"class": "playerstats lineups table"})
print(left)
homegoals = pagesoup.find_all("td", {"class": "player player-a"})
awaygoals = pagesoup.find_all("td", {"class": "player player-b"})
coachlist = []
coaches = pagesoup.find_all("table", {"class": "playerstats lineups table"})
for div in coaches:
x = div.text
x = ''.join(x.split())
sep = 'Coach:'
x = x.split(sep, 1)[1]
print('Home Coach: ' + x)
coachlist.append(x)
print(coachlist)
teams = pagesoup.find_all("h3", {"class": "thick"})
teamlist = []
for div in teams:
x = div.text
x = ''.join(x.split())
teamlist.append(x)
hometeam = teamlist[0]
awayteam = teamlist[2]
print('Home Team: ' + hometeam)
print('Away Team: ' + awayteam)
hometeamgoals = []
awayteamgoals = []
for div in homegoals:
x = div.text
x = re.sub("[^0-9]", "", x)
if len(x) > 2:
if x[0] == '4':
x = int(45) + int(x[2])
x = str(x)
if x[0] == '9':
x = int(90) + int(x[2])
x = str(x)
if x != '':
hometeamgoals.append(x)
for div in awaygoals:
x = div.text
x = re.sub("[^0-9]", "", x)
if x != '':
awayteamgoals.append(x)
print(hometeamgoals)
print(awayteamgoals)
#this is to determine match outcome
if len(hometeamgoals) > len(awayteamgoals):
print(hometeam + ' beat ' + awayteam)
matchoutcome = 1
if len(hometeamgoals) < len(awayteamgoals):
print(awayteam + ' beat ' + hometeam)
matchoutcome = 2
if len(hometeamgoals) == len(awayteamgoals):
print(hometeam + ' drew with ' + awayteam)
matchoutcome = 3
#calculate goal difference
goaldifference = int(len(hometeamgoals)) - int(len(awayteamgoals))
#write to csv
sheet1.cell(row=maxrow + 1, column=2).value = hometeam
sheet1.cell(row=maxrow + 1, column=4).value = awayteam
sheet1.cell(row=maxrow + 1, column=6).value = len(hometeamgoals)
sheet1.cell(row=maxrow + 1, column=7).value = len(awayteamgoals)
if matchoutcome == '1':
sheet1.cell(row=maxrow + 1, column=8).value = '1'
if matchoutcome == '2':
sheet1.cell(row=maxrow + 1, column=8).value = '2'
if matchoutcome == '3':
sheet1.cell(row=maxrow + 1, column=8).value = '3'
sheet1.cell(row=maxrow + 1, column=9).value = goaldifference
for x,y in zip(hometeamgoals, range(1,len(hometeamgoals)+1)):
sheet1.cell(row=maxrow + 1, column = 9+y).value = x
for x,y in zip(awayteamgoals, range(1,len(awayteamgoals)+1)):
sheet1.cell(row=maxrow + 1, column = 19+y).value = x
sheet1.cell(row=maxrow + 1, column=30).value = coachlist[0]
sheet1.cell(row=maxrow + 1, column=31).value = coachlist[1]
playerdb.save('EPL 1617.xlsx')
######
urlentry = Entry(urlframe, width = 100)
urlentry.grid(row = 1, column = 1)
urlconfirm = Button(urlframe, text = 'Player Scrape', command = startscrape)
urlconfirm.grid(row = 1, column = 2)
urlconfirm2 = Button(urlframe, text = 'Match Scrape', command = startscrape2)
urlconfirm2.grid(row = 2, column = 2)
test = Button(urlframe, text = 'TEST', command = test)
test.grid(row = 3, column = 2)
appearance = Button(urlframe, text = 'APPEARANCE', command = appearance)
appearance.grid(row = 5, column = 2)
multiscraping = Button(urlframe, text = 'Multi-Scraping', command = multiscrape)
multiscraping.grid(row = 4, column = 2)
##
root.mainloop() |
# Generated by Django 2.0.1 on 2018-02-04 21:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('test_app', '0037_thirdobject_with_pk'),
]
operations = [
migrations.RenameModel(
old_name='ThirdObject',
new_name='OtherObject',
),
]
|
from django.db import models
# Create your models here.
class BookInfoManager(models.Manager):
'''图书模型管理器类'''
# 1.改变原有查询的结果集
def all(self):
# 1.调用父类的all方法,获取所有数据
books = super().all() # QuerySet
# 2.对books中的数据进行过滤
books = books.filter(isDelete=False)
# 返回books
return books
# 2.封装方法,操作模型类对应的数据表(增删改查)
def create_book(self, btitle, bpub_date):
'''添加一本图书'''
# 1.创建一个图书对象
# 获取self所在的模型类
model_class = self.model
book = model_class()
# book = BookInfo()
book.btitle = btitle
book.bpub_date = bpub_date
# 2.添加进数据库
book.save()
# 3.返回book
return book
# 一类
# booktest2_bookinfo
class BookInfo(models.Model):
'''图书模型类'''
# 图书名称
btitle = models.CharField(max_length=20, db_column='title')
# 图书名字唯一
# btitle = models.CharField(max_length=20, unique=True, db_index=True)
# 价格,最大位数为10,小数为2
# bprice = models.DecimalField(max_digits=10, decimal_places=2)
# 出版日期
bpub_date = models.DateField()
# bpub_date = models.DateField(auto_now_add=True) # 创建时间
# bpub_date = models.DateField(auto_now=True) # 更新时间
# 阅读量
bread = models.IntegerField(default=0)
# 评论量
bcomment = models.IntegerField(default=0)
# 删除标记
isDelete = models.BooleanField(default=False)
# book = models.Manager() # 自定一个Manager类对象,管理器对象
objects = BookInfoManager() # 自定义一个BookInfoManager类的对象
# @classmethod
# def create_book(cls, btitle, bpub_date):
# '''添加一本图书'''
# # 创建一个cls类的对象
# obj = cls()
# obj.btitle = btitle
# obj.bpub_date = bpub_date
# # 添加进数据库
# obj.save()
# # 返回obj
# return obj
class Meta:
db_table = 'bookinfo' # 指定模型类对应表名
# 多类
class HeroInfo(models.Model):
'''英雄人物模型类'''
# 英雄名
hname = models.CharField(max_length=20)
# 性别
hgender = models.BooleanField(default=False)
# 备注
hcomment = models.CharField(max_length=200, null=True, blank=False)
# 关系属性
hbook = models.ForeignKey('BookInfo')
# 删除标记
isDelete = models.BooleanField(default=False)
'''
# 新闻类型类
class NewsType(models.Model):
# 类型名
type_name = models.CharField(max_length=20)
# 关系属性,代表类型下面的信息
type_news = models.ManyToManyField('NewsInfo')
# 新闻类
class NewsInfo(models.Model):
# 新闻标题
title = models.CharField(max_length=128)
# 发布时间
pub_date = models.DateTimeField(auto_now_add=True)
# 信息内容
content = models.TextField()
# 关系属性, 代表信息所属的类型
# news_type = models.ManyToManyField('NewsType')
# 员工基本信息类
class EmployeeBasicInfo(models.Model):
# 姓名
name = models.CharField(max_length=20)
# 性别
gender = models.BooleanField(default=False)
# 年龄
age = models.IntegerField()
# 关系属性,代表员工的详细信息
employee_detail = models.OneToOneField('EmployeeDetailInfo')
# 员工详细信息类
class EmployeeDetailInfo(models.Model):
# 联系地址
addr = models.CharField(max_length=256)
# 教育经历
# 关系属性,代表员工基本信息
# employee_basic = models.OneToOneField('EmployeeBasicInfo')
'''
class AreaInfo(models.Model):
'''地区模型类'''
# 地区名称
atitle = models.CharField(max_length=20)
# 关系属性,代表当前地区的父级地区
aParent = models.ForeignKey('self', null=True, blank=True)
# class Meta:
# db_table = 'areas'
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
import os
import time
import RPi.GPIO as GPIO
from config.variables import luz_hora_encendido, luz_hora_apagado
#configurando GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(21, GPIO.OUT)
#GPIO.input(21)
def comienza_dia():
GPIO.output(21, True)
print("Comenzando el día!, Luces ON!", GPIO.input(21))
def comienza_noche():
GPIO.output(21, False)
print("Terminando el día. Buenas noches", GPIO.input(21))
print("Encendiendo a las: ", luz_hora_encendido)
print("Apagando a las: ", luz_hora_apagado)
while True:
hora_actual = int(time.strftime("%H"))
minuto_actual = int(time.strftime("%M"))
hora_encendido = int(luz_hora_encendido[0:2])
minuto_encendido = int(luz_hora_encendido[3:5])
hora_apagado = int(luz_hora_apagado[0:2])
minuto_apagado = int(luz_hora_apagado[3:5])
print("Hora actual:", hora_actual)
print("Minuto actual:", minuto_actual)
print("Hora encendido:", hora_encendido)
print("Minuto encendido:", minuto_encendido)
print("Hora apagado:", hora_apagado)
print("Minuto apagado:", minuto_apagado)
if (hora_actual == hora_encendido) and (minuto_actual == minuto_encendido) and (GPIO.input(21) == 0):
comienza_dia()
if (hora_actual == hora_apagado) and (minuto_actual == minuto_apagado) and (GPIO.input(21) == 1):
comienza_noche()
else:
print("Luces en estado correcto:", GPIO.input(21))
time.sleep(30)
|
from django import forms
from apps.post.models import PostModel, PostCommentModel
from apps.user.models import VisitorModel
class PostForm(forms.ModelForm):
""" Post admin form """
class Meta:
model = PostModel
exclude = ('author', "created_at", "updated_at")
class PostCommentForm(forms.ModelForm):
""" Post comment admin form """
class Meta:
model = PostCommentModel
fields = ('text',)
widgets = {
'text': forms.Textarea(attrs={'cols': 20, 'rows': 5}),
}
def save(self, commit=True, author: VisitorModel = None, post_id: int = None):
""" save passing author and post with given comment """
comment = super().save(commit=False)
comment.author = author
comment.post_id = post_id
if commit:
comment.save()
return comment
|
# Generated by Django 2.0 on 2020-11-23 11:22
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20201121_2244'),
]
operations = [
migrations.AddField(
model_name='blog',
name='readed_num',
field=models.IntegerField(default=0, verbose_name='阅读计数'),
),
migrations.AlterField(
model_name='blog',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='作者'),
),
migrations.AlterField(
model_name='blog',
name='blog_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='blog.BlogType', verbose_name='博客类型'),
),
migrations.AlterField(
model_name='blog',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(verbose_name='正文'),
),
migrations.AlterField(
model_name='blog',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='添加时间'),
),
migrations.AlterField(
model_name='blog',
name='last_updated_time',
field=models.DateTimeField(auto_now=True, verbose_name='最后修改'),
),
migrations.AlterField(
model_name='blog',
name='title',
field=models.CharField(max_length=50, verbose_name='标题'),
),
migrations.AlterField(
model_name='blogtype',
name='created_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='添加时间'),
),
migrations.AlterField(
model_name='blogtype',
name='type_name',
field=models.CharField(max_length=20, verbose_name='博客类型'),
),
]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# __author__ = '__JonPan__'
from .app import Flask
from flask import current_app
from app.api.v1 import init_blueprint_v1
from app.libs.error import APIException, HTTPException
from app.libs.error_handle import ServerError
def register_blueprint(app):
app.register_blueprint(init_blueprint_v1(), url_prefix='/v1')
def framework_error(e):
if isinstance(e, APIException):
return e
elif isinstance(e, HTTPException):
code = e.code
msg = e.description
error_code = 1007
return APIException(msg, code, error_code)
else:
if not current_app.config['DEBUG']:
return ServerError()
else:
raise e
def register_plugin(app):
from app.models.base import db
db.init_app(app)
db.create_all(app=app)
# with app.app_context():
# db.create_all()
def create_app():
app = Flask(__name__)
app.config.from_object('app.config.setting')
app.config.from_object('app.config.secure')
register_blueprint(app)
app.errorhandler(Exception)(framework_error)
register_plugin(app)
return app
|
from .trader import (update_trade_account, quote_detail,
order, withdraw, transfer, trade_account_all,
list_offers, apply_status, apply_offer, withdraw_apply)
__all__ = ['update_trade_account', 'quote_detail',
'order', 'withdraw', 'transfer', 'trade_account_all',
'list_offers', 'apply_status', 'apply_offer', 'withdraw_apply']
|
# this is the base application file.
# We import all the needed libraries and prep the server to be exported into run.py
from flask import Flask, g
from flask_cors import CORS
from utils.config import config
from db import connect_db
server = Flask(__name__)
server.secret_key = config['flask_login_secret']
cors = CORS(server, supports_credentials=True)
@server.before_request
def before_request():
# open a connection to postgresql
g.db = connect_db()
@server.teardown_request
def teardown_request(exception):
# check the postgres connection and close it if it exists
db = getattr(g, 'db', None)
if db is not None:
db.close()
# The routes to our application are located in the API.
import api
|
from model.sale_class import Sale
def test_delete_sale(app):
app.session.login_to_feefo('testmailnd@gmail.com', 'Dima!qa2ws1')
app.sale.delete_sale(Sale(name='delete test name', email='testemail@gmail.com',order_ref='or_001', mobile='11111111111'))
app.session.log_out()
|
from django.urls import path
from .views import image_view, success
app_name='task'
urlpatterns = [
#path('', views.doc_list, name='doc_list'),
path('', image_view, name = 'image_upload'),
path('success', success, name = 'success'),
] |
N, K = map(int, input().split())
A = list(map(int, input().split()))
B = [1 for a in A]
K = min(K, 500)
for k in range(K):
for i in range(N):
for j in range(1, A[i]+1):
if i - j >= 0:
B[i-j] += 1
if i + j < N:
B[i+j] += 1
A = [b for b in B]
B = [1 for a in A]
print(' '.join(map(str, A))) |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker, get_backend, rc
grey, gold, lightblue, green = '#808080', '#cab18c', '#0096d6', '#008367'
pink, yellow, orange, purple = '#ef7b9d', '#fbd349', '#ffa500', '#a35cff'
darkblue, brown, red = '#004065', '#731d1d', '#E31937'
g = np.load("sample.npy") # Load CCD recordings
# Initialize arrays for storing statistical information
z, k, p, q = np.zeros((100, 600)), np.zeros((100, 600)), np.zeros((100, 600)), np.zeros((100, 600))
t_decay = 5300
y = g[34][113][:t_decay]
x = np.linspace(1, t_decay, t_decay)
y2 = np.sort(y)
y3 = y2
for i, j in enumerate(y2):
y2[i] = round(y2[i] / 10) * 10
vbreaks = []
hbreaks = np.unique(y2)
for i in range(len(y2) - 1):
if y2[i+1] > y2[i]:
vbreaks.append(i)
quiver_params = {'angles': 'xy',
'scale_units': 'xy',
'scale': 0.1,
'width': 0.012}
grid_params = {'linewidth': 0.1,
'alpha': 0.1}
fontsize = 8
def set_rc(func):
def wrapper(*args, **kwargs):
rc('font', family='sans-serif', size=20)
rc('figure', dpi=700)
rc('axes', axisbelow=True, titlesize=20)
rc('lines', linewidth=0.1)
rc('legend', fontsize=20, loc='upper left')
func(*args, **kwargs)
return wrapper
@set_rc
def plot_time_series(x, y):
fig, ax1 = plt.subplots(1, figsize=(10, 5))
ax1.plot(y, color=(183/255, 207/255, 246/255), linewidth=1, zorder=1)
ax1.scatter(x=x, y=y, marker='o', edgecolor='k', color=(65/255, 105/255, 225/255), linewidth=0.2, s=20, zorder=2)
ax1.set_title('Data from flow stop measurement')
# ax1.axes.xaxis.set_ticklabels([])
# ax1.axes.yaxis.set_ticklabels([])
ax1.set_ylabel('$Intensity$ [raw]')
ax1.set_xlabel('$N$ measurements')
plt.subplots_adjust(left=0.15, bottom=0.2, right=0.95, top=0.85, wspace=None, hspace=None)
plt.savefig("flow_stop_time_series.jpeg")
# plt.show()
@set_rc
def plot_quantization(x, y2, y3):
fig, ax1 = plt.subplots(1, figsize=(10, 5))
# ax1.plot(y2, color=(183/255, 207/255, 246/255), linewidth=0.2, zorder=2)
ax1.plot(y3, color=red, linewidth=0.5, zorder=2, label='CCD')
ax1.scatter(x=x, y=y2, marker='o', edgecolor='k', color=(65/255, 105/255, 225/255),
linewidth=0.01, s=2.5, zorder=3, label='Quantized')
for line in vbreaks:
ax1.vlines(line, ymin=1900, ymax=2600, color=(86/255, 101/255, 105/255),
linestyles='dashed', linewidth=1, zorder=1)
for line in hbreaks:
ax1.hlines(line, xmin=0, xmax=5300, color=(86/255, 101/255, 105/255),
linestyles='dashed', linewidth=1, zorder=1)
ax1.set_title('Quantization')
ax1.set_ylabel('$I$ [raw intensity]')
ax1.set_xlabel('$N$ measurements')
plt.subplots_adjust(left=0.15, bottom=0.2, right=0.95, top=0.85, wspace=None, hspace=None)
plt.legend()
plt.savefig("flow_stop_quantization.jpeg")
# plt.show()
plot_time_series(x, y)
plot_quantization(x, y2, y3)
|
# -*- coding: utf8 -*-
import telegram
import requests
import random
from transitions import State
from transitions.extensions import GraphMachine as mach
from bs4 import BeautifulSoup
bot = telegram.Bot(token='496063592:AAH9ux0XtCDTDQm2lANJE7Sg9F3SbQaiPFg')
actorURL = 'http://www.imdb.com/search/name?gender=male,female&ref_=nv_cel_m_3'
workURL = 'http://www.imdb.com/movies-in-theaters/?ref_=nv_tp_inth_1'
def actorIMDB():
web = requests.get(actorURL)
soup = BeautifulSoup(web.text, 'lxml')
headerList = soup.find_all("h3", {"class": "lister-item-header"})
picHeaderList = soup.find_all("div", {"class": "lister-item-image"})
starlist = []
nameList = []
picList = []
for item in headerList:
starlist.append(item.find('a')['href'])
temp = item.find('a').string
nameList.append(temp[1:])
for item in picHeaderList:
picList.append(item.a.img['src'])
return starlist, nameList, picList
def workIMDB():
web = requests.get(workURL)
soup = BeautifulSoup(web.text, 'lxml')
headerList = soup.find_all("h4", {"itemprop": "name"})
picHeaderList = soup.find_all("div", {"class": "hover-over-image zero-z-index"})
worklist = []
nameList = []
picList = []
for item in headerList:
worklist.append(item.find('a')['href'])
temp = item.find('a')['title']
tempcut = temp.split('(')
nameList.append(tempcut[0])
for item in picHeaderList:
picList.append(item.img['src'])
return worklist, nameList, picList
class Engine(object):
def intoInfo(self, replyid):
infoText = 'hello! this is work_search bot. I can get you the information you want to know. You can find the information you\'re interested in, or i\'ll introduce you an actor or a movie.\nif you want to search by yourself, please use "search"\nif you want me to recommend, please use "recommend"'
bot.sendMessage(replyid, infoText)
def intoInput(self, replyid):
text = 'please enter the one you want to search'
bot.sendMessage(replyid, text)
def intoLangu(self, replyid, name):
if (name[0] >= u'\u0041' and name[0] <=u'\u005a') or (name[0] >= u'\u0061' and name[0] <=u'\u007a'):
name = name.replace(' ', '_')
searchResult = 'en.wikipedia.org/wiki/'+name
print('!!!!english!!!!')
else:
searchResult = 'zh.wikipedia.org/wiki/'+name
print('!!!!chinese!!!!')
text = 'Good! Now choose a language'
markup = [
[
{'text': 'chinese', 'callback_data': 'zh'+'+'+searchResult},
{'text': 'english', 'callback_data': 'en'+'+'+searchResult}
]
]
keyboard = {'inline_keyboard':markup}
bot.sendMessage(replyid, text, reply_markup = keyboard)
def intoResultZH(self, replyid, data):
if(data[0:2] == 'en'):
url = 'https://'+data;
web = requests.get(url)
soup = BeautifulSoup(web.text, 'lxml')
temp = soup.find('li', {"class":"interlanguage-link interwiki-zh"})
newURL = temp.a['href']
else:
newURL = 'https://'+data
html = '<a href="'+newURL+'">'+'there you go</a>'
bot.sendMessage(replyid, html, parse_mode=telegram.ParseMode.HTML)
def intoResultEN(self, replyid, data):
if(data[0:2] == 'zh'):
url = 'https://'+data;
web = requests.get(url)
soup = BeautifulSoup(web.text, 'lxml')
temp = soup.find('li', {"class":"interlanguage-link interwiki-en"})
newURL = temp.a['href']
print(temp)
else:
newURL = 'https://'+data
html = '<a href="'+newURL+'">'+'there you go</a>'
bot.sendMessage(replyid, html, parse_mode=telegram.ParseMode.HTML)
def intoRecommendCat(self, replyid):
text = 'What do you want me to recommend?'
markup = [
[
{'text': 'actor', 'callback_data': 'actor'},
{'text': 'work', 'callback_data': 'work'}
]
]
keyboard = {'inline_keyboard':markup}
bot.sendMessage(replyid, text, reply_markup = keyboard)
def intoActorResult(self, replyid):
myUrlList, actorNameList, picList = actorIMDB()
length = len(myUrlList)
ran = random.randint(0,length-1)
picUrl = picList[ran]
newURL = 'http://www.imdb.com/' + myUrlList[ran]
html = '<a href="'+newURL+'">'+'there you go</a>'
bot.sendPhoto(replyid, picUrl)
bot.sendMessage(replyid, html, parse_mode=telegram.ParseMode.HTML)
text = 'would you like to take a look at his/hers wiki page?'
yesCallback = 'yes' + '+' + actorNameList[ran]
markup = [
[
{'text': 'Yes' , 'callback_data': yesCallback},
{'text': 'No', 'callback_data': 'no'}
]
]
keyboard = {'inline_keyboard':markup}
bot.sendMessage(replyid, text, reply_markup = keyboard)
def intoWorkResult(self, replyid):
myUrlList, workNameList, picList = workIMDB()
length = len(myUrlList)
ran = random.randint(0,length-1)
picUrl = picList[ran]
newURL = 'http://www.imdb.com/' + myUrlList[ran]
html = '<a href="'+newURL+'">'+'there you go</a>'
bot.sendPhoto(replyid, picUrl)
bot.sendMessage(replyid, html, parse_mode=telegram.ParseMode.HTML)
text = 'would you like to take a look at its wiki page?'
yesCallback = 'yes' + '+' + workNameList[ran]
markup = [
[
{'text': 'Yes' , 'callback_data': yesCallback},
{'text': 'No', 'callback_data': 'no'}
]
]
keyboard = {'inline_keyboard':markup}
bot.sendMessage(replyid, text, reply_markup = keyboard) |
def remove_duplicates(l):
l1 = []
[l1.append(i) for i in l if i not in l1]
return l1
if __name__ == "__main__":
l = [1,2,3,4,1,2,5,6,7,4,6,7,8,8,9,5,4,3,2,6,7,33,44,22,3,33,44]
result = remove_duplicates(l)
print result
|
import spruceData
import Editor.UI.NounUI as NounUI
from Editor.AbstractModel import abstractModel
from PyQt4 import QtCore
class NounModel(NounUI.Ui_Form,abstractModel):
def setupUi(self):
NounUI.Ui_Form.setupUi(self, self)
abstractModel.setupUi(self)
QtCore.QObject.connect(self.tSingular, QtCore.SIGNAL("textChanged(QString)"), self.singular)
QtCore.QObject.connect(self.tPlural, QtCore.SIGNAL("textChanged(QString)"), self.plural)
QtCore.QObject.connect(self.cPlural, QtCore.SIGNAL("toggled(bool)"), self.noPlural)
QtCore.QObject.connect(self.bRelative, QtCore.SIGNAL("clicked()"), self.openRelative)
self.defText=self.tSingular
self.defSyl=self.singularSyl
self.defClass=spruceData.Noun
def ladd(self):
new1=abstractModel.ladd(self)
if self.cMass.isChecked():
new1.uncount=True
if self.cFeminine.isChecked():
new1.gender=spruceData.FEM
elif self.cMasculine.isChecked():
new1.gender=spruceData.MASC
if self.cPlural.isChecked():
new1.pluralizable=False
else:
if new1.getPlural()!=self.tPlural.text():
new1.plural=str(self.tPlural.text())
if spruceData.determineSyllables(new1.getPlural())!=self.pluralSyl.value():
new1.pSyllables=self.pluralSyl.value()
def populate(self, dic):
abstractModel.populate(self, dic.nouns)
def singular(self, str):
word=self.process(str)
if word != None:
self.tPlural.setText(word.plural)
def plural(self,str):
str=str.__str__()
self.pluralSyl.setValue(spruceData.determineSyllables(str))
def noPlural(self, bool):
self.tPlural.setEnabled(not bool)
self.pluralSyl.setEnabled(not bool)
|
import pandas as pd
import mlflow
import mlflow.keras
import flask
import tensorflow as tf
import keras as k
def auc(y_true, y_pred):
auc = tf.metrics.auc(y_true, y_pred)[1]
k.backend.get_session().run(tf.local_variables_initializer())
return auc
global graph
graph = tf.get_default_graph()
model_path = 'models/keras_games_v1'
model = mlflow.keras.load_model(model_path, custom_objects={'auc': auc})
app = flask.Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def predict():
data = {"sucess": False}
params = flask.request.args
if "G1" in params.keys():
new_row = {"G1": params.get("G1"), "G2": params.get("G2"),
"G3": params.get("G3"), "G4": params.get("G4"),
"G5": params.get("G5"), "G6": params.get("G6"),
"G7": params.get("G7"), "G8": params.get("G8"),
"G9": params.get("G9"), "G10": params.get("G10")}
new_x = pd.DataFrame.from_dict(new_row, orient='index').transpose()
with graph.as_default():
data['response'] = str(model.predict(new_x)[0][0])
data['sucess'] = True
return flask.jsonify(data)
if __name__ == '__main__':
app.run(host='0.0.0.0') |
from __future__ import division
from DTLZ import *
from Problem import *
from hypervolume import *
import random
import math
import sys
def random_value(low, high, decimals=2):
"""
Generate a random number between low and high.
decimals incidicate number of decimal places
"""
return round(random.uniform(low, high), decimals)
def bdom(problem, one, two):
"""
Return if one dominates two
"""
objs_one = problem.evaluate(problem,one)
objs_two = problem.evaluate(problem,two)
dominates = False
for i in xrange(len(objs_two)):
if(objs_one[i]>objs_two[i]):
return False
if(objs_one[i]<objs_two[i]):
dominates = True
return dominates
def cdom(problem, one, two):
def expLoss(w, x1, y1, n):
return -1 * (w * (x1 - y1) / n)
def loss(x, y):
losses = []
n = min(len(x), len(y))
for obj in range(n):
x1, y1 = x[obj], y[obj]
losses += [expLoss(-1, x1, y1, n)]
return sum(losses) / n
x = problem.evaluate(problem,one)
y = problem.evaluate(problem,two)
l1 = loss(x, y)
return int(l1)
def fast_non_dominated_sort(problem,population,dom_func=bdom):
fronts = []
first_front = []
for p in population:
p.dom_set = []
p.dom_count = 0
for q in population:
if(dom_func(problem,p,q)):
p.dom_set.append(q)
elif(bdom(problem,p,q)):
p.dom_count+=1
if p.dom_count==0:
p.rank = 0
first_front.append(p)
fronts.append(first_front)
curr = 0
while(curr<len(fronts)):
next_front = []
for p1 in fronts[curr]:
for p2 in p1.dom_set:
p2.dom_count-=1
if(p2.dom_count == 0):
p2.rank = curr+1
next_front.append(p2)
curr+=1
if(len(next_front)>0):
fronts.append(next_front)
return fronts
def reproduce(problem,population,pop_size,mutation,crossover_rate):
children = []
for _ in xrange(pop_size):
mom = random.choice(population)
dad = random.choice(population)
while mom == dad:
dad = random.choice(population)
child = mutate(problem, crossover(mom,dad,crossover_rate),mutation)
if problem.is_valid(problem,child) and child not in population + children:
children.append(child)
return children
def calculate_crowding_distance(problem, population):
for point in population:
point.dist = 0.0
for i in xrange(len(problem.objectives)):
population.sort(key=lambda point: point.objectives[i])
rge = population[-1].objectives[i] - population[0].objectives[i]
population[0].dist = float("inf")
population[-1].dist = float("inf")
if rge == 0:
continue
for j in xrange(1,len(population)-1):
population[j].dist += (population[j+1].objectives[i] - population[j-1].objectives[i]) / rge
def compare(a,b):
return (a>b) - (a<b)
def crowded_comp_operator(x,y):
if(x.rank == y.rank):
return compare(y.dist,x.dist)
return compare(x.rank,y.rank)
def select_parents(problem,fronts,pop_size):
[calculate_crowding_distance(problem,front) for front in fronts]
offspring = []
last_front = 0
for front in fronts:
if((len(offspring)+ len(front)) > pop_size):
break
for point in front:
offspring.append(point)
if(fronts.index(front) < len(fronts)-1):
last_front+=1
remaining = pop_size - len(offspring)
if remaining > 0 :
fronts[last_front].sort(cmp=crowded_comp_operator)
offspring += fronts[last_front][0:remaining]
return offspring
def populate(problem, size):
population = []
for _ in xrange(size):
population.append(problem.any())
return population
def crossover(mom, dad, crossover_rate=1):
"""
Create a new point which contains decisions from
the first half of mom and second half of dad
"""
if random.random() > crossover_rate:
return mom
n = len(mom.decisions)
return Point(mom.decisions[:n // 2] + dad.decisions[n // 2:])
def mutate(problem, point, mutation_rate=0.05):
for i,d in enumerate(problem.decisions):
if random.random() < mutation_rate:
point.decisions[i] = random_value(d.low,d.high)
return point
def fitness(problem, population, point):
dominates = 0
for pt in population:
if(bdom(problem,point,pt)):
dominates = dominates+1
return dominates
def elitism(problem, population, retain_size):
fitnesses = []
for n in population:
fitnesses.append((n, fitness(problem,population,n)))
fitnesses = sorted(fitnesses, key = lambda x:x[1])
final = []
for p in fitnesses:
final.append(p[0])
return final[:retain_size]
def hv(population, num_objectives):
referencePoint = [11 for _ in range(num_objectives)]
hv = InnerHyperVolume(referencePoint)
volume = hv.compute(individual.objectives for individual in population)
return volume
def norm_hypervol(hv, obj):
exp = 1 if obj == 2 else obj / 2
return hv / (122 ** exp)
def nsga2(problem = DTLZ1(), pop_size=20, gens=3, mutation=0.05, crossover_rate=1, dom_func=cdom):
population = populate(problem, pop_size)
[problem.evaluate(problem,point) for point in population]
initial_population = [point.clone for point in population]
fast_non_dominated_sort(problem,population,dom_func)
children = reproduce(problem,population,pop_size,mutation,crossover_rate)
gen = 0
while gen < gens:
union = population + children
fronts = fast_non_dominated_sort(problem,union,dom_func)
parents = select_parents(problem,fronts,pop_size)
population = children
children = reproduce(problem,parents,pop_size,mutation,crossover_rate)
gen = gen+1
union = population + children
fronts = fast_non_dominated_sort(problem,union,dom_func)
parents = select_parents(problem,fronts,pop_size)
hypervolume = norm_hypervol(hv(parents, len(problem.objectives)), len(problem.objectives))
return hypervolume
|
from flask import Flask
__version__ = '1.0'
app = Flask('satellite')
app.config.from_object('config')
app.debug = True
from satellite.controllers import *
|
from __future__ import annotations
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import Iterable, Optional, Tuple
from drsloader import DrsLoader
from trigger.baseinterface.drstrigger import IDrsTrigger
from .apibridge import ApiBridge
from .localdb import DataCache
from .manager import RealtimeStateCache, start_realtime
from .process import CalibrationStateCache, RealtimeProcessor, init_realtime_process, process_from_queues
def load_and_start_realtime(num_processes: int, file_queue: Queue[Path],
config_subdir: Optional[str], steps: Optional[Iterable[str]], trace: Optional[bool]):
loader, trigger = __load_realtime_trigger(config_subdir, steps, trace)
remote_api = ApiBridge(file_queue, trigger)
realtime_cache: RealtimeStateCache = DataCache(loader.config_path.joinpath('.drstrigger-realtime.cache'))
process_from_queues_part = partial(__process_from_queues, config_subdir, steps, trace)
start_realtime(trigger.find_sequences, remote_api, realtime_cache, init_realtime_process, process_from_queues_part,
num_processes, 10, 1, 1)
def __load_realtime_trigger(config_subdir: Optional[str], steps: Optional[Iterable[str]],
trace: Optional[bool]) -> Tuple[DrsLoader, IDrsTrigger]:
loader = DrsLoader(config_subdir)
cfht = loader.get_loaded_trigger_module()
steps = cfht.CfhtDrsSteps.all() if steps is None else cfht.CfhtDrsSteps.from_keys(steps)
trigger = cfht.CfhtRealtimeTrigger(steps, trace)
return loader, trigger
def __process_from_queues(config_subdir: Optional[str], steps: Optional[Iterable[str]], trace: Optional[bool]):
loader, trigger = __load_realtime_trigger(config_subdir, steps, trace)
calibration_cache: CalibrationStateCache = DataCache(loader.config_path.joinpath('.drstrigger-calib.cache'), True)
processor = RealtimeProcessor(trigger, calibration_cache)
return process_from_queues(processor)
|
#!/usr/bin/env python
"""
model tests
"""
# import model specific functions and variables
from model import model_train
from model import model_load
from model import model_predict
from pathlib import Path
import os
import unittest
import warnings
warnings.filterwarnings("ignore")
# data_dir = os.path.join("..", "data", "cs-train")
base_dir = Path(__file__).parent
data_dir = Path(base_dir / ".." / "data" / "cs-train").resolve()
model_dir = Path(base_dir / ".." / "models").resolve()
class ModelTest(unittest.TestCase):
"""
test the essential functionality
"""
def test_01_train(self):
"""
test the train functionality
"""
# train the model
model_train(data_dir, prefix='test', test=True)
self.assertTrue(os.path.exists(os.path.join(model_dir, "test-all-0_1.joblib")))
def test_02_load(self):
"""
test the train functionality
"""
# Load the model
model_data, models = model_load(country='united_kingdom', prefix='test', data_dir=data_dir, training=False)
model = list(models.values())[0]
self.assertTrue('predict' in dir(model))
self.assertTrue('fit' in dir(model))
def test_03_predict(self):
"""
test the predict function input
"""
# load model first
prefix = 'test'
country = 'united_kingdom'
year = '2018'
month = '01'
day = '05'
test = True
result = model_predict(prefix, country, year, month, day, test=test)
y_pred = result['y_pred']
self.assertTrue(result is not None)
### Run the tests
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# Conversion Fahrenheit to Kelvin
# Formula: K = (F - 32)/ 1.8 + 273.15
def fahrenheit_to_kelvin(fahrenheit):
celsius = 5 / 9 * (fahrenheit - 32)
return celsius + 273.15
print fahrenheit_to_kelvin(1)
|
#TRENTON MORGAN
#CS 2200
#HW 1
#09/05/19
#This program performs the addition and subtraction of values represented with
#Roman numerals. It takes in the specified number of inputs (between 2 and 10
#inclusive)in the form of valid Roman numerals, then directly calculates the
#result and outputs it in the correct form.
#UTILITIES#
#---------#
#List of valid Roman numerals, sorted in order highest to lowest value.
romanNumerals = ['M','D','C','L','X','V','I']
#Dictionary of equivalences, used in expansion and compaction of values.
expansionTable = {"V" : "IIIII",
"X" : "VV",
"L" : "XXXXX",
"C" : "LL",
"D" : "CCCCC",
"M" : "DD"}
#Dictionary of subtractives, used in expansion and compaction of values.
subtractiveTable = {"IV" : "IIII",
"IX" : "VIIII",
"XL" : "XXXX",
"XC" : "LXXXX",
"CD" : "CCCC",
"CM" : "DCCCC"}
#Using regex, check that the input is a properly formatted Roman numeral.
import re #import regex module
def checkFormatting(num):
goodFormatting = False
#The expression below will match any valid Roman numeral, providing a basis
#to verify the inputs against
validExpression = "^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$"
#If the num matches the expression, the formatting is valid
goodFormatting = re.match(validExpression, num)
return goodFormatting
#Valid inputs consist of only Roman numerals, verified using the list
#romanNumerals defined above. They also must be formatted correctly, as checked
#by the checkFormatting() function. Returns the validity as a boolean value.
def checkValid(num):
validInput = False
while (not validInput):
for char in num:
if char in romanNumerals:
validInput = True
else:
validInput = False
break
#After checking that all characters in the input are Roman numerals,
#check that the number is formatted correctly
if validInput:
validInput = checkFormatting(num)
return validInput
#Directly adds two Roman numeral values and returns the result in the correct
#format.
def romanAdd(first, second):
#First we must replace any subtractives with their expansions
for subtractive in subtractiveTable.keys():
first = first.replace(subtractive, subtractiveTable[subtractive])
second = second.replace(subtractive, subtractiveTable[subtractive])
#Next, concatenate the two strings of numerals
combined = first + second
#Next, we sort the numerals in descending value order (defined in the list
#romanNumerals above)
result = ""
for numeral in romanNumerals:
for char in combined:
if char == numeral:
result += char
#Last, we compact any values that can be using the expansionTable and insert
#subtractives where needed using the subtractiveTable
for compactor in expansionTable.keys():
result = result.replace(expansionTable[compactor], compactor)
for subtractive in subtractiveTable.keys():
result = result.replace(subtractiveTable[subtractive], subtractive)
return result
#Directly subtracts second from first and returns the result.
def romanSubtract(first, second):
#First we replace subtractives with their expanded value
for subtractive in subtractiveTable.keys():
first = first.replace(subtractive, subtractiveTable[subtractive])
second = second.replace(subtractive, subtractiveTable[subtractive])
#Next, eliminate any common symbols
for char in second:
if char in first:
#Replace one instance of the symbol with an empty string
first = first.replace(char, '', 1)
#Do the same for the second input
second = second.replace(char, '', 1)
#If there are characters left over, we borrow to complete the difference
if len(second) == 0:
result = first
return result
else:
#Replace all numerals in the inputs with I
for numeral in romanNumerals[:6]:
first = first.replace(numeral, expansionTable[numeral])
second = second.replace(numeral, expansionTable[numeral])
#Eliminate common symbols
for char in second:
if char in first:
first = first.replace(char, '', 1)
second = second.replace(char, '', 1)
#The remainder of the first input is the result
result = first
#Lastly, compact the result
for compactor in expansionTable.keys():
result = result.replace(expansionTable[compactor], compactor)
for subtractive in subtractiveTable.keys():
result = result.replace(subtractiveTable[subtractive], subtractive)
return result
#DRIVER#
#------#
#The number of inputs is specified by the user (2-10 terms).
numAllowed = False
while (not numAllowed):
numInputs = input("Enter the number of terms: ")
#Ensure numInputs consists of only integer digits
if numInputs.isdigit():
numInputs = int(numInputs)
else:
numInputs = 0
#Check numInputs between 2 and 10
if numInputs >= 2 and numInputs <= 10:
numAllowed = True
else:
print("Invalid number of terms. Must be between 2 and 10 inclusive.")
terms = [] #List of terms
operators = [] #List of operators, + or - only
#Get an input for each term, as well as an operator to go between each term. All
#operations are performed on the terms in the order they are given.
for i in range(numInputs):
validInput = False
while (not validInput):
num = input("Enter the term: ")
if not checkValid(num):
print("Invalid input. Please try again.")
else:
validInput = True
terms.append(num)
#After all inputs except the last, a valid operator (+ or -) is required
if i < numInputs - 1:
validOperator = False
while (not validOperator):
operator = input("Enter operation type: ")
if operator == '+' or operator == '-':
validOperator = True
operators.append(operator)
else:
print("Invalid operator. Please try again.")
#With valid inputs, perform the operations designated by their list, going
#first to last.
print("Performing calculation...")
numTerms = len(terms)
for i in range(numTerms - 1):
#Get first two terms and an operator
first = terms.pop(0)
second = terms.pop(0)
operator = operators.pop(0)
#Add if +, subtract if -
if operator == '+':
result = romanAdd(first, second)
else:
result = romanSubtract(first, second)
#Re-insert the result in the list so it will be used in the next operation
terms.insert(0, result)
finalResult = terms[0]
print("Final result:")
print(finalResult)
|
from django.urls import path, include
from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from .viewsets import TodoListViewSet, ListItemViewSet
app_name = "api"
schema_view = get_swagger_view(title="Todovoodoo API")
router = routers.DefaultRouter(trailing_slash=True)
router.register(r"lists", ListItemViewSet, basename="TodoList")
router.register(r"items", TodoListViewSet, basename="ListItem")
urlpatterns = [
path("", schema_view),
path("rest-auth/", include("rest_auth.urls")),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
] + router.urls
|
TempStr = input()
baseStr = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
result = ""
for i in TempStr:
if i in baseStr:
result = result + i
print(result) |
from django.urls import path
from appLivraria.views import index
from . import views
urlpatterns = (
path('index/', index.as_view(), name='index'),
path('empresa/', views.EmpresaListView.as_view(), name='app_name_empresa_list'),
path('empresa/formulario', views.questionario, name='questionario'),
path('empresa/create/', views.EmpresaCreateView.as_view(), name='app_name_empresa_create'),
path('empresa/detail/<int:pk>/', views.EmpresaDetailView.as_view(), name='app_name_empresa_detail'),
path('empresa/update/<int:pk>/', views.EmpresaUpdateView.as_view(), name='app_name_empresa_update'),
)
|
'''
Script to train the neural network for road labeling.
Author: John ADAS Doe
Email: john.adas.doe@gmail.com
License: Apache-2.0
'''
import numpy as np
from PIL import Image
from data_loader import CityscapeDataset
from network import Network
from common import *
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import csv
def plot_stats(v, t, fn, label):
fig, ax = plt.subplots()
ax.plot(v, 'r')
ax.plot(t, 'b')
ax.set(xlabel='Epochs', ylabel=label,
title='Loss function.')
ax.grid()
fig.savefig(checkpoints_dir + fn)
def write_net(file_path, model, in_shape):
# Works only for convolution, transpose conv and for relu
file_name = file_path + "/model.txt"
f = open(file_name, 'w', newline='')
id = 0
f.write("-------------------------------------------\n")
f.write('Layer_Type = Image\n')
f.write('Channels = %d\n' % in_shape[1])
f.write('Height = %d\n' % in_shape[2])
f.write('Width = %d\n' % in_shape[3])
for layer in net.children(): # write the just the convolution and relu
if layer._get_name() == "Conv2d" or layer._get_name() == "ConvTranspose2d":
f.write("-------------------------------------------\n")
f.write('Layer_Type = %s\n' % layer._get_name())
f.write('ID = %d\n' % id)
f.write('in_channels = %d\n' % layer.in_channels)
f.write('out_channels = %d\n' % layer.out_channels)
f.write('kernel_size = (%d,%d)\n' % (layer.kernel_size[0], layer.kernel_size[1]))
f.write('padding = (%d,%d)\n' % (layer.padding[0], layer.padding[1]))
f.write('stride = (%d,%d)\n' % (layer.stride[0], layer.stride[1]))
fl_name = file_path + "/" + str(id) + ".txt"
weights = layer.weight.data.numpy()
weights = weights.reshape(weights.shape[0], -1)
np.savetxt(fl_name, weights)
fl_name = file_path + "/" + str(id) + "_bias.txt"
bias = layer.bias.data.numpy()
np.savetxt(fl_name, bias)
id += 1
if layer._get_name() == "ReLU":
f.write("-------------------------------------------\n")
f.write('Layer_Type = %s\n' % layer._get_name())
f.write('ID = %d\n' % id)
id += 1
f.close()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
train_dataset = CityscapeDataset(csv_file='/mnt/nvme/work/Cityscape/leftImg8bit/train.csv',
root_dir='/mnt/nvme/work/Cityscape/leftImg8bit')
valid_dataset = CityscapeDataset(csv_file='/mnt/nvme/work/Cityscape/leftImg8bit/train.csv',
root_dir='/mnt/nvme/work/Cityscape/leftImg8bit')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=True, num_workers=2,
collate_fn=collate_func)
# net = Network()
tool = NetworkTool(path=checkpoints_dir)
net, opt, ep, l = tool.load_checkpoint(74)
#write_net('/mnt/nvme', net,[1, 3, 256, 512])
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)
'''
#betas - this are the values provided in the Adam paper
#eps - 1e-4 to 1e-8 is suggested in the paper
#weight decay - it cannot be too much as then we prioratize small weights to the goal, fastai puts 0.01
'''
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print(device)
device = torch.device("cpu")
net.train()
torch.backends.quantized.engine = 'qnnpack'
net.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack')
net_fused = torch.quantization.fuse_modules(net, [['conv1', 'relu1'], ['conv2', 'relu2'], ['conv3', 'relu3'],
['conv4', 'relu4'], ['conv5', 'relu5']])
net_prepared = torch.quantization.prepare_qat(net_fused)
train_loss = []
valid_loss = []
accuracy = []
IU = []
# Loop over epochs
torch.manual_seed(0)
for epoch in range(80):
# Training
running_loss = 0.0
net_prepared.train()
for i, data in enumerate(train_loader, 0):
local_labels = data['labels'].long()
local_batch = data['image'].float()
x = net(local_batch)
local_batch, local_labels = local_batch.to(device), local_labels.to(device)
net_prepared.to(device)
optimizer.zero_grad()
# forward + backward + optimize
outputs = net_prepared(local_batch)
loss = criterion(outputs, local_labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 1000 == 0:
print('TRAINING: epoch: %d loss: %.3f' % (epoch + 1, running_loss / (i + 1)))
train_loss.append(running_loss / float(i + 1))
if epoch % 2 == 0:
tool.save_checkpoint(net_prepared, epoch, loss, optimizer)
# validation
running_loss = 0.0
net_prepared.eval()
net_int8 = torch.quantization.convert(net_prepared)
m = Metric()
for i, data in enumerate(valid_loader, 0):
local_labels = data['labels'].long()
local_batch = data['image'].float()
local_batch, local_labels = local_batch.to(device), local_labels.to(device)
net_int8.to(device)
outputs = net_int8(local_batch)
loss = criterion(outputs, local_labels)
running_loss += loss.item()
pred = torch.argmax(outputs, dim=1) # we get the predictions
pred = np.array(pred.cpu().numpy(), dtype='uint8') # take things back in cpu space and make them uint8
ref = np.array(data['labels'], dtype='uint8')
m.get_metrics(pred, ref)
valid_loss.append(running_loss / float(i + 1))
accuracy.append(m.accuracy)
IU.append(m.IU)
print('VALIDATION: epoch: %d loss: %.3f accuracy: %f mIU: %f' % (
epoch + 1, running_loss / (i + 1), m.accuracy, m.IU))
plot_stats(valid_loss, train_loss, 'loss.png', 'r- validation loss/b - training loss')
plot_stats(accuracy, IU, 'acc_iu.png', 'r- accuracy/b - IU')
|
#!/usr/bin/python
import xmlrpclib
import sys
from datetime import datetime
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ConfigParser import SafeConfigParser as ConfigParser
class NumbexDaemonController(object):
def __init__(self, address):
self.url = address
self.rpc = xmlrpclib.ServerProxy(address)
def export_to_p2p(self, options, args):
r = self.rpc.export_to_p2p()
if not r:
sys.stderr.write('p2p-export failed, check signatures and logs\n')
return r
def import_from_p2p(self, options, args):
r, msg = self.rpc.import_from_p2p()
if not r:
sys.stderr.write('''p2p-import failed: %s
check signatures and logs\n'''%msg)
return r
def p2p_start(self, options, args):
r = self.rpc.p2p_start()
if not r:
sys.stderr.write('p2p-start failed, check logs\n')
return r
def p2p_stop(self, options, args):
r = self.rpc.p2p_stop()
if not r:
sys.stderr.write('p2p-stop failed, check logs\n')
return r
def updater_stop(self, options, args):
r = self.rpc.updater_stop()
if not r:
sys.stderr.write('updater-stop failed, check logs\n')
return r
def updater_start(self, options, args):
r = self.rpc.updater_start()
if not r:
sys.stderr.write('updater-start failed, check logs\n')
return r
def status(self, options, args):
r = self.rpc.status()
if not r:
sys.stderr.write('status failed, check logs\n')
return False
print 'server status for %s:'%self.url
flags = r['flags']
for k in flags:
print '%-20s: %s'%(k, flags[k])
lastupdate = 'never' if not r['updater']['last_update'] else \
datetime.fromtimestamp(r['updater']['last_update'])
print '%-20s: %s'%('last git update', lastupdate)
print '%-20s: %s'%('db has changed data',
r['database']['has_changed_data'])
print
print 'trackers:'
for t in r['p2p']['trackers']:
print '\t%s'%t
print 'peers:'
for p in r['p2p']['peers']:
print '\t%s'%p
print
return True
def clear_errors(self, options, args):
self.rpc.clear_status()
return True
def main():
from optparse import OptionParser
op = OptionParser(usage=""""%prog [options] <command>
(use '%prog help' for available commands)""")
op.add_option("-u", "--control-url", help="control url",
metavar="LOG_CONF_FILE", default="http://localhost:44880")
options, args = op.parse_args()
ctl = NumbexDaemonController(options.control_url)
def help(options, args):
print """Available commands:
p2p-export \texport data to the p2p system from local database
p2p-import \timport data from the p2p system to the local database
p2p-start \tstart the p2p system, connect to trackers
p2p-stop \tdisconnect from trackers
status \tprint daemon status
updater-start\tstarts the updater thread
updater-stop \tstops the updater thread
"""
dispatch = {
'help': help,
'p2p-export': ctl.export_to_p2p,
'p2p-import': ctl.import_from_p2p,
'p2p-start': ctl.p2p_start,
'p2p-stop': ctl.p2p_stop,
'updater-stop': ctl.updater_stop,
'updater-start': ctl.updater_start,
'status': ctl.status,
'clearerrors': ctl.clear_errors,
}
if len(args) >= 1 and args[0] in dispatch:
sys.exit(not dispatch[args[0]](options, args))
else:
op.error('invalid command')
if __name__ == '__main__':
main()
|
import unittest
from model import prediction_with_model
import pandas as pd
import numpy as np
class PredictionWithModel(unittest.TestCase):
def test_prediction(self):
d = pd.read_csv(r"C:\Users\Toan\Documents\GitHub\colossi\static\temp\cc7deed8140745d89f2f42f716f6fd1b\out_imac_atlas_expression_v7.1.tsv", " ")
result = np.array([d['Freq'].to_list() + [0, 1800]])
print(prediction_with_model(result))
if __name__ == '__main__':
unittest.main()
|
def myfunc():
mylst=[101,'siva',38000]
return mylst
eid,ename,sal=myfunc()
print(eid,ename,sal)
#print(ename)
#print(sal)
|
import os
import sys
import csv
import json
import time
import boto3
import numpy as np
from keras.models import Sequential,load_model
from keras.layers import Dense,Dropout,BatchNormalization
from keras.callbacks import EarlyStopping
def get_bucket():
s3 = boto3.resource("s3")
myBucket=s3.Bucket('workspace.scitodate.com')
return myBucket
def load_sups():
homedir=os.environ['HOME']
f=open(homedir+"/results/ontology/ConCode2Vid.json",'r')
cc2vid=json.load(f)
f.close()
f=open(homedir+"/results/ontology/cc2vid_disease.json",'r')
cc2vid_d=json.load(f)
f.close()
return cc2vid,cc2vid_d
def build_model(_input_dim=133609,_output_dim=5227,_hidden_dim=512,_drate=0.5):
model=Sequential()
model.add(Dense(_hidden_dim,input_shape=(_input_dim,),activation='relu'))
model.add(Dropout(_drate))
model.add(BatchNormalization())
model.add(Dense(_output_dim,activation='relu'))
model.compile(optimizer='nadam',loss='binary_crossentropy')
return model
def train_on_batch_S3(_model,_source,_volume,_bcount,_batch,_mbatch,_epochs=5):
early_stopping=EarlyStopping(monitor='loss',patience=2)
early_stopping_val=EarlyStopping(monitor='val_loss',patience=2)
homedir=os.environ['HOME']
bucket=get_bucket()
cc2vid,cc2vid_d=load_sups()
sample_list=[]
batch_count=_bcount
for i in range(0,_volume):
abs_vec=[0.0 for i in range(0,len(cc2vid))]
abs_count=0.0
try:
bucket.download_file("yalun/"+_source+"/abs"+str(i)+".csv",homedir+"/temp/tmpsem.csv")
except:
continue
with open(homedir+"/temp/tmpsem.csv",'r',encoding='utf-8') as cf:
rd=csv.reader(cf)
for item in rd:
if item[0]=="Mention":
continue
try:
abs_count+=1.0
abs_vec[cc2vid[item[1]]]+=1.0
except:
pass
if not abs_count:
continue
abs_vec=list(np.array(abs_vec)/abs_count)
body_vec=[0.0 for i in range(0,len(cc2vid_d))]
body_count=0.0
try:
bucket.download_file("yalun/"+_source+"/body"+str(i)+".csv",homedir+"/temp/tmpsem.csv")
except:
continue
with open(homedir+"/temp/tmpsem.csv",'r',encoding='utf-8') as cf:
rd=csv.reader(cf)
for item in rd:
if item[0]=="Mention":
continue
try:
body_count+=1.0
body_vec[cc2vid_d[item[1]]]+=1.0
except:
pass
if not body_count:
continue
body_vec=list(np.array(body_vec)/body_count)
sample_list.append(abs_vec+body_vec)
if len(sample_list)>=_batch:
N_all=np.array(sample_list)
X_train=N_all[:,:len(cc2vid)]
Y_train=np.ceil(N_all[:,len(cc2vid):])
_model.fit(X_train,Y_train,shuffle=True,batch_size=_mbatch,verbose=0,epochs=_epochs,validation_split=1.0/17.0,callbacks=[early_stopping,early_stopping_val])
try:
os.remove(homedir+"/temp/tmpsem_model.h5")
except:
pass
_model.save(homedir+"/temp/tmpsem_model.h5")
s3f=open(homedir+"/temp/tmpsem_model.h5",'rb')
updata=s3f.read()
bucket.put_object(Body=updata,Key="yalun/results/models/MLPsparse_1hidden_sem_"+str(batch_count)+".h5")
s3f.close()
logf=open(homedir+"/results/logs/bow_training_log_sem.txt",'a')
logf.write("%s,%d\n"%(_source,batch_count))
logf.close()
batch_count+=1
sample_list=[]
if len(sample_list):
N_all=np.array(sample_list)
X_train=N_all[:,:len(cc2vid)]
Y_train=np.ceil(N_all[:,len(cc2vid):])
_model.fit(X_train,Y_train,shuffle=True,batch_size=_mbatch,verbose=0,epochs=_epochs,validation_split=1.0/17.0,callbacks=[early_stopping,early_stopping_val])
try:
os.remove(homedir+"/temp/tmpsem_model.h5")
except:
pass
_model.save(homedir+"/temp/tmpsem_model.h5")
s3f=open(homedir+"/temp/tmpsem_model.h5",'rb')
updata=s3f.read()
bucket.put_object(Body=updata,Key="yalun/results/models/MLPsparse_1hidden_sem_"+str(batch_count)+".h5")
s3f.close()
logf=open(homedir+"/results/logs/bow_training_log_sem.txt",'a')
logf.write("%s,%d\n"%(_source,batch_count))
logf.close()
batch_count+=1
return _model,batch_count
if __name__=="__main__":
model=build_model()
source_key="kdata"
model,bcount=train_on_batch_S3(model,source_key,12000,0,1088,1024)
source_key="annotated_papers"
model,bcount=train_on_batch_S3(model,source_key,14000,bcount,1088,1024)
source_key="annotated_papers_with_txt"
model,bcount=train_on_batch_S3(model,source_key,13000,bcount,1088,1024)
source_key="annotated_papers_with_txt_new"
model,bcount=train_on_batch_S3(model,source_key,15000,bcount,1088,1024)
source_key="annotated_papers_with_txt_new2"
model,bcount=train_on_batch_S3(model,source_key,95000,bcount,1088,1024) |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class PathItem(scrapy.Item):
title = scrapy.Field()
total_days = scrapy.Field()
views = scrapy.Field()
startTime = scrapy.Field()
endTime = scrapy.Field()
start_city = scrapy.Field()
transit_city = scrapy.Field()
plan = scrapy.Field()
class TriphoboItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
# Copyright (c) 2020-2022, Manfred Moitzi
# License: MIT License
from __future__ import annotations
from typing import (
Iterable,
Iterator,
cast,
BinaryIO,
Optional,
Union,
Any,
)
from io import StringIO
from pathlib import Path
from ezdxf.lldxf.const import DXFStructureError
from ezdxf.lldxf.extendedtags import ExtendedTags, DXFTag
from ezdxf.lldxf.tagwriter import TagWriter
from ezdxf.lldxf.tagger import tag_compiler, ascii_tags_loader
from ezdxf.filemanagement import dxf_file_info
from ezdxf.lldxf import fileindex
from ezdxf.entities import DXFGraphic, DXFEntity, Polyline, Insert
from ezdxf.entities import factory
from ezdxf.entities.subentity import entity_linker
from ezdxf.tools.codepage import toencoding
__all__ = ["opendxf", "single_pass_modelspace", "modelspace"]
SUPPORTED_TYPES = {
"ARC",
"LINE",
"CIRCLE",
"ELLIPSE",
"POINT",
"LWPOLYLINE",
"SPLINE",
"3DFACE",
"SOLID",
"TRACE",
"SHAPE",
"POLYLINE",
"VERTEX",
"SEQEND",
"MESH",
"TEXT",
"MTEXT",
"HATCH",
"INSERT",
"ATTRIB",
"ATTDEF",
"RAY",
"XLINE",
"DIMENSION",
"LEADER",
"IMAGE",
"WIPEOUT",
"HELIX",
"MLINE",
"MLEADER",
}
Filename = Union[Path, str]
class IterDXF:
"""Iterator for DXF entities stored in the modelspace.
Args:
name: filename, has to be a seekable file.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError`exception for invalid data
Raises:
DXFStructureError: invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
def __init__(self, name: Filename, errors: str = "surrogateescape"):
self.structure, self.sections = self._load_index(str(name))
self.errors = errors
self.file: BinaryIO = open(name, mode="rb")
if "ENTITIES" not in self.sections:
raise DXFStructureError("ENTITIES section not found.")
if self.structure.version > "AC1009" and "OBJECTS" not in self.sections:
raise DXFStructureError("OBJECTS section not found.")
def _load_index(
self, name: str
) -> tuple[fileindex.FileStructure, dict[str, int]]:
structure = fileindex.load(name)
sections: dict[str, int] = dict()
new_index = []
for e in structure.index:
if e.code == 0:
new_index.append(e)
elif e.code == 2:
sections[e.value] = len(new_index) - 1
# remove all other tags like handles (code == 5)
structure.index = new_index
return structure, sections
@property
def encoding(self):
return self.structure.encoding
@property
def dxfversion(self):
return self.structure.version
def export(self, name: Filename) -> IterDXFWriter:
"""Returns a companion object to export parts from the source DXF file
into another DXF file, the new file will have the same HEADER, CLASSES,
TABLES, BLOCKS and OBJECTS sections, which guarantees all necessary
dependencies are present in the new file.
Args:
name: filename, no special requirements
"""
doc = IterDXFWriter(name, self)
# Copy everything from start of source DXF until the first entity
# of the ENTITIES section to the new DXF.
location = self.structure.index[self.sections["ENTITIES"] + 1].location
self.file.seek(0)
data = self.file.read(location)
doc.write_data(data)
return doc
def copy_objects_section(self, f: BinaryIO) -> None:
start_index = self.sections["OBJECTS"]
try:
end_index = self.structure.get(0, "ENDSEC", start_index)
except ValueError:
raise DXFStructureError(f"ENDSEC of OBJECTS section not found.")
start_location = self.structure.index[start_index].location
end_location = self.structure.index[end_index + 1].location
count = end_location - start_location
self.file.seek(start_location)
data = self.file.read(count)
f.write(data)
def modelspace(
self, types: Optional[Iterable[str]] = None
) -> Iterable[DXFGraphic]:
"""Returns an iterator for all supported DXF entities in the
modelspace. These entities are regular :class:`~ezdxf.entities.DXFGraphic`
objects but without a valid document assigned. It is **not**
possible to add these entities to other `ezdxf` documents.
It is only possible to recreate the objects by factory functions base
on attributes of the source entity.
For MESH, POLYMESH and POLYFACE it is possible to use the
:class:`~ezdxf.render.MeshTransformer` class to render (recreate) this
objects as new entities in another document.
Args:
types: DXF types like ``['LINE', '3DFACE']`` which should be
returned, ``None`` returns all supported types.
"""
linked_entity = entity_linker()
queued = None
requested_types = _requested_types(types)
for entity in self.load_entities(
self.sections["ENTITIES"] + 1, requested_types
):
if not linked_entity(entity) and entity.dxf.paperspace == 0:
# queue one entity for collecting linked entities:
# VERTEX, ATTRIB
if queued:
yield queued
queued = entity
if queued:
yield queued
def load_entities(
self, start: int, requested_types: set[str]
) -> Iterable[DXFGraphic]:
def to_str(data: bytes) -> str:
return data.decode(self.encoding, errors=self.errors).replace(
"\r\n", "\n"
)
index = start
entry = self.structure.index[index]
self.file.seek(entry.location)
while entry.value != "ENDSEC":
index += 1
next_entry = self.structure.index[index]
size = next_entry.location - entry.location
data = self.file.read(size)
if entry.value in requested_types:
xtags = ExtendedTags.from_text(to_str(data))
yield factory.load(xtags) # type: ignore
entry = next_entry
def close(self):
"""Safe closing source DXF file."""
self.file.close()
class IterDXFWriter:
def __init__(self, name: Filename, loader: IterDXF):
self.name = str(name)
self.file: BinaryIO = open(name, mode="wb")
self.text = StringIO()
self.entity_writer = TagWriter(self.text, loader.dxfversion)
self.loader = loader
def write_data(self, data: bytes):
self.file.write(data)
def write(self, entity: DXFGraphic):
"""Write a DXF entity from the source DXF file to the export file.
Don't write entities from different documents than the source DXF file,
dependencies and resources will not match, maybe it will work once, but
not in a reliable way for different DXF documents.
"""
# Not necessary to remove this dependencies by copying
# them into the same document frame
# ---------------------------------
# remove all possible dependencies
# entity.xdata = None
# entity.appdata = None
# entity.extension_dict = None
# entity.reactors = None
# reset text stream
self.text.seek(0)
self.text.truncate()
if entity.dxf.handle is None: # DXF R12 without handles
self.entity_writer.write_handles = False
entity.export_dxf(self.entity_writer)
if entity.dxftype() == "POLYLINE":
polyline = cast(Polyline, entity)
for vertex in polyline.vertices:
vertex.export_dxf(self.entity_writer)
polyline.seqend.export_dxf(self.entity_writer) # type: ignore
elif entity.dxftype() == "INSERT":
insert = cast(Insert, entity)
if insert.attribs_follow:
for attrib in insert.attribs:
attrib.export_dxf(self.entity_writer)
insert.seqend.export_dxf(self.entity_writer) # type: ignore
data = self.text.getvalue().encode(self.loader.encoding)
self.file.write(data)
def close(self):
"""Safe closing of exported DXF file. Copying of OBJECTS section
happens only at closing the file, without closing the new DXF file is
invalid.
"""
self.file.write(b" 0\r\nENDSEC\r\n") # for ENTITIES section
if self.loader.dxfversion > "AC1009":
self.loader.copy_objects_section(self.file)
self.file.write(b" 0\r\nEOF\r\n")
self.file.close()
def opendxf(filename: Filename, errors: str = "surrogateescape") -> IterDXF:
"""Open DXF file for iterating, be sure to open valid DXF files, no DXF
structure checks will be applied.
Use this function to split up big DXF files as shown in the example above.
Args:
filename: DXF filename of a seekable DXF file.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
return IterDXF(filename, errors=errors)
def modelspace(
filename: Filename,
types: Optional[Iterable[str]] = None,
errors: str = "surrogateescape",
) -> Iterable[DXFGraphic]:
"""Iterate over all modelspace entities as :class:`DXFGraphic` objects of
a seekable file.
Use this function to iterate "quick" over modelspace entities of a DXF file,
filtering DXF types may speed up things if many entity types will be skipped.
Args:
filename: filename of a seekable DXF file
types: DXF types like ``['LINE', '3DFACE']`` which should be returned,
``None`` returns all supported types.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
info = dxf_file_info(str(filename))
prev_code: int = -1
prev_value: Any = ""
entities = False
requested_types = _requested_types(types)
with open(filename, mode="rt", encoding=info.encoding, errors=errors) as fp:
tagger = ascii_tags_loader(fp)
queued: Optional[DXFEntity] = None
tags: list[DXFTag] = []
linked_entity = entity_linker()
for tag in tag_compiler(tagger):
code = tag.code
value = tag.value
if entities:
if code == 0:
if len(tags) and tags[0].value in requested_types:
entity = factory.load(ExtendedTags(tags))
if (
not linked_entity(entity)
and entity.dxf.paperspace == 0
):
# queue one entity for collecting linked entities:
# VERTEX, ATTRIB
if queued:
yield queued # type: ignore
queued = entity
tags = [tag]
else:
tags.append(tag)
if code == 0 and value == "ENDSEC":
if queued:
yield queued # type: ignore
return
continue # if entities - nothing else matters
elif code == 2 and prev_code == 0 and prev_value == "SECTION":
entities = value == "ENTITIES"
prev_code = code
prev_value = value
def single_pass_modelspace(
stream: BinaryIO,
types: Optional[Iterable[str]] = None,
errors: str = "surrogateescape",
) -> Iterable[DXFGraphic]:
"""Iterate over all modelspace entities as :class:`DXFGraphic` objects in
a single pass.
Use this function to 'quick' iterate over modelspace entities of a **not**
seekable binary DXF stream, filtering DXF types may speed up things if many
entity types will be skipped.
Args:
stream: (not seekable) binary DXF stream
types: DXF types like ``['LINE', '3DFACE']`` which should be returned,
``None`` returns all supported types.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: Invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
fetch_header_var: Optional[str] = None
encoding = "cp1252"
version = "AC1009"
prev_code: int = -1
prev_value: str = ""
entities = False
requested_types = _requested_types(types)
for code, value in binary_tagger(stream):
if code == 0 and value == b"ENDSEC":
break
elif code == 2 and prev_code == 0 and value != b"HEADER":
# (0, SECTION), (2, name)
# First section is not the HEADER section
entities = value == b"ENTITIES"
break
elif code == 9 and value == b"$DWGCODEPAGE":
fetch_header_var = "ENCODING"
elif code == 9 and value == b"$ACADVER":
fetch_header_var = "VERSION"
elif fetch_header_var == "ENCODING":
encoding = toencoding(value.decode())
fetch_header_var = None
elif fetch_header_var == "VERSION":
version = value.decode()
fetch_header_var = None
prev_code = code
if version >= "AC1021":
encoding = "utf-8"
queued: Optional[DXFGraphic] = None
tags: list[DXFTag] = []
linked_entity = entity_linker()
for tag in tag_compiler(binary_tagger(stream, encoding, errors)):
code = tag.code
value = tag.value
if entities:
if code == 0 and value == "ENDSEC":
if queued:
yield queued
return
if code == 0:
if len(tags) and tags[0].value in requested_types:
entity = cast(DXFGraphic, factory.load(ExtendedTags(tags)))
if not linked_entity(entity) and entity.dxf.paperspace == 0:
# queue one entity for collecting linked entities:
# VERTEX, ATTRIB
if queued:
yield queued
queued = entity
tags = [tag]
else:
tags.append(tag)
continue # if entities - nothing else matters
elif code == 2 and prev_code == 0 and prev_value == "SECTION":
entities = value == "ENTITIES"
prev_code = code
prev_value = value
def binary_tagger(
file: BinaryIO,
encoding: Optional[str] = None,
errors: str = "surrogateescape",
) -> Iterator[DXFTag]:
while True:
try:
try:
code = int(file.readline())
except ValueError:
raise DXFStructureError(f"Invalid group code")
value = file.readline().rstrip(b"\r\n")
yield DXFTag(
code,
value.decode(encoding, errors=errors) if encoding else value,
)
except IOError:
return
def _requested_types(types: Optional[Iterable[str]]) -> set[str]:
if types:
requested = SUPPORTED_TYPES.intersection(set(types))
if "POLYLINE" in requested:
requested.add("SEQEND")
requested.add("VERTEX")
if "INSERT" in requested:
requested.add("SEQEND")
requested.add("ATTRIB")
else:
requested = SUPPORTED_TYPES
return requested
|
import mock_catalyst
from mock_catalyst import EndOfApplication
from vocollect_lut_odr_test.mock_server import MockServer, BOTH_SERVERS
from main import main
#create a simulated host server
ms = MockServer(True,15004, 15005)
ms.start_server(BOTH_SERVERS)
#ms.set_pass_through_host('127.0.0.1', 15004, 15005)
ms.load_server_responses("Test/Data/test1.xml")
ms.set_server_response('Y', 'prTaskODR')
#Post responses
#mock_catalyst.post_dialog_responses('ready',
# '3!',
# 'yes',
# '1!',
# 'yes',
# 'ready',
# 'no',
# 'ready',
# 'ready',
# 'ready',
# '00!',
# '12!',
# '5!')
#exam 2
#mock_catalyst.post_dialog_responses('ready',
# '2!',
# 'yes',
# '1234!',
# 'yes',
# '3!',
# 'yes',
# '1!',
# 'yes',
# 'ready',
# 'no',
# 'ready',
# 'ready',
# 'ready',
# '00!',
# 'skip slot',
# 'no')
try:
main()
except EndOfApplication as err:
print('Application ended')
ms.stop_server(BOTH_SERVERS)
#Sample test case creation
#from CreateTestFile import CreateTestFile
#test = CreateTestFile('Sample', ms)
#path = '' #should end with slash if specified (i.e. test\functional_tests\Selection_tests\)
#test.write_test_to_file(path)
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 14:18:44 2019
@author: 张鹏举
"""
import re
import glob
if __name__ == '__main__':
path = ''
else:
path = 'trait\\'
def trait_info():
file_list = glob.glob(path + '*.py')
#print(file_list)
file_list.remove(path + '__init__.py')
file_list.remove(path + 'spider.py')
file_list.remove(path + 'script_trait.py')
info_dict = {}
for i in file_list:
regex = 'class (.*?):'
with open(i, encoding = 'utf-8') as f:
content = f.read()
class_name = re.findall(regex, content)[0]
info_dict[i] = class_name
return info_dict
if __name__ == '__main__':
info = trait_info()
|
import numpy as np
from numpy.random.mtrand import RandomState
import matplotlib.pyplot as plt
import math
from node import Sum, Product, NDProduct, Leaf, Bernoulli, Categorical, Gaussian
from utils import get_nodes, get_topological_order_layers, sample, mpe, gradient_backward, sgd, add_ids_to_spn
from learning import generate_dense_spn, random_region_graph, initialize_weights, learn_spn
from mcmc import *
from scipy.stats import *
# SPN training gaussian
def run_test():
def test_gaussian(spn, data):
cor_count = 0
tot_count = 0
for d in data:
evidence = d.copy()
label = evidence[-1]
evidence[-1] = np.nan
spn.value(evidence=evidence, ll=True)
lls = [c._ll + np.log(w) - spn._ll for c, w in zip(spn.children, spn.weights)]
guess = np.argmax(lls)
# s = mpe(spn, evidence, rand_gen=RandomState())
# guess = s[-1]
if int(guess) == int(label):
cor_count += 1
tot_count += 1
print(cor_count, tot_count, cor_count / tot_count)
np.random.seed(123)
data = np.c_[np.r_[np.random.normal(6, 1, (500, 2)), np.random.normal(9, 1, (500, 2))],
np.r_[np.zeros((500, 1)), np.ones((500, 1))]]
rvs = [0,1,2]
epochs = 1
lr = 0.8
gaussian_spn_l = learn_spn(data[:500], rvs, epochs=epochs, lr=lr, leaf_types=[Gaussian, Gaussian, Bernoulli])
gaussian_spn_r = learn_spn(data[500:], rvs, epochs=epochs, lr=lr, leaf_types=[Gaussian, Gaussian, Bernoulli])
gaussian_spn = Sum(weights=[0.5, 0.5], children=[gaussian_spn_l, gaussian_spn_r])
print()
print('Gaussian SPN:')
test_gaussian(gaussian_spn, data)
if __name__ == '__main__':
run_test()
|
from classCreation import main
print("")
print("")
print("Welcome to our contact tracing program!")
print("")
print("What would you like to do? ")
print("")
print("Create Class (1) | Exit (2)")
action = int(input())
print("")
if action == 1:
main()
if action == 2:
exit()
print("")
print("")
print("What would you like to do? ")
print("")
print("Make A New Class (1) | Exit (2)")
action = int(input())
print("")
if action == 1:
main()
if action == 2:
exit()
print("")
print("")
print("What would you like to do? ")
print("")
print("Make A New Class (1) | Exit (2)")
action = int(input())
print("")
if action == 1:
main()
if action == 2:
exit()
print("")
print("")
print("What would you like to do? ")
print("")
print("Make A New Class (1) | Exit (2)")
action = int(input())
print("")
if action == 1:
main()
if action == 2:
exit()
print("")
print("")
print("What would you like to do? ")
print("")
print("Make A New Class (1) | Exit (2)")
action = int(input())
print("")
if action == 1:
main()
if action == 2:
exit()
print("")
print("")
print("Thank You For Using Our Program")
print("Please Press (2) To Quit...")
action = int(input())
print("")
if action == 2:
exit() |
#
#Copyright (c) 2018 Jie Zheng
#
from e3net.db.db_base import init_database
from e3net.common.e3config import get_config
from e3net.db.db_base import create_database_entries
from e3net.common.e3log import get_e3loger
from e3net.e3neta.db import invt_e3neta_database
DB_NAME = 'e3net_agent'
e3loger = get_e3loger('e3neta')
def e3neta_db_init():
connection = get_config(None, 'database', 'connection')
e3loger.info('local database connection: %s' % (connection))
init_database(DB_NAME, connection, False)
create_database_entries(DB_NAME)
|
import tqdm
from multiprocessing import Pool
import logging
from dsrt.config.defaults import DataConfig
class Filter:
def __init__(self, properties, parallel=True, config=DataConfig()):
self.properties = properties
self.config = config
self.parallel = parallel
self.init_logger()
def init_logger(self):
self.logger = logging.getLogger()
self.logger.setLevel(self.config['logging-level'])
def transform(self, dialogues):
chunksize=self.config['chunksize']
p = Pool() if self.parallel else Pool(1)
if self.config['filter-long-dialogues']:
self.max_dl = self.config['max-dialogue-length']
self.log('info', 'Filtering long dialogues (> {} utterances) ...'.format(self.max_dl))
res = []
total = len(dialogues)
self.log('info', '[filter running on {} cores]'.format(p._processes))
for d in tqdm.tqdm(p.imap(self.filter_long_dialogues, dialogues, chunksize=chunksize), total=total):
res.append(d)
dialogues = list(filter(None, res))
if self.config['filter-dialogues-with-long-utterances']:
self.max_ul = self.config['max-utterance-length']
self.log('info', 'Filtering dialogues with long utterances (> {} tokens) ...'.format(self.max_ul))
res = []
total = len(dialogues)
self.log('info', '[filter running on {} cores]'.format(p._processes))
for d in tqdm.tqdm(p.imap(self.filter_dialogues_with_long_utterances, dialogues, chunksize=chunksize), total=total):
res.append(d)
dialogues = list(filter(None, res))
p.close()
p.join()
return dialogues
def filter_long_dialogues(self, dialogue):
if len(dialogue) > self.max_dl:
return None
def filter_dialogues_with_long_utterances(self, dialogue):
for utterance in dialogue:
if len(utterance) > self.max_ul:
return None
return dialogue
####################
# UTILITIES #
####################
def log(self, priority, msg):
"""
Just a wrapper, for convenience.
NB1: priority may be set to one of:
- CRITICAL [50]
- ERROR [40]
- WARNING [30]
- INFO [20]
- DEBUG [10]
- NOTSET [0]
Anything else defaults to [20]
NB2: the levelmap is a defaultdict stored in Config; it maps priority
strings onto integers
"""
self.logger.log(logging.CRITICAL, msg)
|
# code to calculate Rouge precision and recall for various texts, by taking the two text files
# that have the original summaries and the new ones.
# Inputs:
# 1) File containing original summaries
# 2) File containing new summaries
# 3) n-gram model to use (1, 2, 3 ...) (Should be less than the total number of words in any paragraph)
# Output
# Baseline_n_gram.txt containing tab delimited Precision and Recall values for each pair.
import numpy as np
def rouge_metrics(system_list,reference_list):
reference_word_count = len(reference_list)
system_word_count = len(system_list)
if (system_word_count == 0) or (reference_word_count == 0):
rouge_recall = 0
rouge_precision = 0
else:
rouge_recall = len(intersection(system_list,reference_list))*1.0/reference_word_count
rouge_precision = len(intersection(system_list,reference_list))*1.0/system_word_count
return rouge_recall, rouge_precision
def intersection(system_lst, ref_lst):
intersection_lst = [value for value in system_lst if value in ref_lst]
return intersection_lst
def create_ngrams(text_list,n=2):
iterations = len(text_list)-n
ngrams = []
gram = []
for i in range(iterations+1):
gram = text_list[i:n+i]
ngrams.append(gram)
return ngrams
original_summaries_file_path = "y_data_train.txt"
new_summaries_file_path = "baseline.txt"
ngram = 2
with open(original_summaries_file_path, "r") as f:
original = f.read()
with open(new_summaries_file_path, "r") as f:
new = f.read()
rrecall = []
rprecision = []
for row_original, row_new in zip(original.split("\n"), new.split("\n")):
with open("baseline_" + str(ngram) + "_gram.txt", "a") as f:
system_list = row_original.split(" ")
reference_list = row_new.split(" ")
system_2grams = create_ngrams(system_list,ngram)
reference_2grams = create_ngrams(reference_list,ngram)
rouge_2_recall, rouge_2_precision = rouge_metrics(system_2grams,reference_2grams)
rrecall.append(rouge_2_recall)
rprecision.append(rouge_2_precision)
line = str(rouge_2_recall) + "\t" + str(rouge_2_precision) + "\n"
f.write(line)
rrecall = np.mean(rrecall)
rprecision = np.mean(rprecision)
f_score = 2*rprecision*rrecall/(rprecision + rrecall)
print ("Recall:", rrecall)
print ("Precision:", rprecision)
print ("FScore:", f_score)
|
Import('env')
env = env.Clone()
env.AppendUnique(CC=['-fsampler-scheme=bounds'], CCFLAGS=['-fassign-across-pointer'])
enum = env.CBIProgram('enum.c')
Alias('test:bounds', env.Expect([
env.CBIResolvedSamples(env.CBIReports(enum), objects=enum),
]))
Alias('test', 'test:bounds')
File(Glob('*.expected'))
|
# -*- coding: utf-8 -*-
import hashlib
import hmac
import requests
VERSION_KHIPU_SERVICE = '1.3'
class KhipuService(object):
"""
A client for the Khipu API.
"""
# Url del servicio
api_url = 'https://khipu.com/api/%s/' % VERSION_KHIPU_SERVICE
# diccionario de datos que se enviarán al servicio
data = None
# mensaje en caso de error u otro evento
message = None
requests = requests
def __init__(self, receiver_id, secret, service_name, **kwargs):
"""
Por defecto iniciamos el servicio identificando al cobrador.
"""
# id del cobrador
self.receiver_id = receiver_id
# Llave del cobrador
self.secret = secret
# Nombre del servicio
self.service_name = service_name
def set_parameter(self, name, value):
"""
Método para adjuntar el valor a uno de los elementos que
contempla el diccionario self.data. Esta función solo registrará los
valores que estan definidos en el arreglo
"""
if 'name' in self.data:
self.data[name] = value
def set_parameters(self, values):
"""
Método para guardar, desde un diccionario, todos los elementos que debe
tener el diccionario data
"""
for name, value in values.items():
self.set_parameter(name, value)
def do_hash(self):
"""
Genera el Hash que requiere khipu.
"""
return hmac.new(self.secret, self.concatenated(), hashlib.sha256).hexdigest()
def concatenated(self):
cad = ''
for key, value in self.data.items():
cad += '{0}={1}&'.format(key, value)
return cad[0:-1].encode('UTF-8')
def get_url_service(self):
return self.api_url + self.service_name
def request(self):
self.data['hash'] = self.do_hash()
return self.requests.post(self.get_url_service(), self.data).json()
|
from django.db import models
# Create your models here.
class Attendance(models.Model):
timestamp = models.DateTimeField(db_index=True,null=True,default=None)
name = models.CharField(max_length=250)
student_id = models.CharField(max_length=64)
university_id = models.CharField(max_length=64)
def __str__(self):
return self.name
#return self.name + ' - ' + self.student_id
|
import numpy as np
import torch
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(255)
# if accimage is not None and isinstance(pic, accimage.Image):
# nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
# pic.copyto(nppic)
# return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img |
from math import isnan
import pytest
import numpy as np
from confseq.betting import *
from confseq.misc import superMG_crossing_fraction, expand_grid
from scipy.stats import binomtest
@pytest.mark.random
@pytest.mark.parametrize("theta", [0, 0.5, 1])
def test_betting_mart_crossing_probabilities(theta):
# Note that these tests are random and will each individually
# fail at most 5% of the time.
for m in [0.2, 0.5, 0.8]:
repeats = 500
alpha = 0.1
dist_fn = lambda: np.random.binomial(1, m, 10000)
mart_fn = lambda x: betting_mart(x, m, alpha=alpha, theta=theta)
crossing_frac = superMG_crossing_fraction(
mart_fn, dist_fn, alpha=alpha, repeats=repeats
)
crossing_test = binomtest(
int(crossing_frac * repeats), n=repeats, p=alpha, alternative="greater"
)
lower_ci = crossing_test.proportion_ci(confidence_level=0.95)[0]
assert lower_ci > 0
assert lower_ci <= alpha
@pytest.mark.random
def test_betting_mart_power():
# Make sure theta=0,1 have one-sided power, while theta=1/2 has two-sided power
theta = 1 / 2
x = np.random.binomial(1, 0.5, 10000)
mart1 = betting_mart(x, 0.25, theta=theta)
mart2 = betting_mart(x, 0.75, theta=theta)
# Should have two-sided power
# This will fail with some small probability
assert any(mart1 > 20)
assert any(mart2 > 20)
theta = 1
mart1 = betting_mart(x, 0.4, theta=theta)
mart2 = betting_mart(x, 0.6, theta=theta)
# Should only have power against 0.4, but not 0.6
assert any(mart1 > 20)
assert not any(mart2 > 20)
theta = 0
mart1 = betting_mart(x, 0.4, theta=theta)
mart2 = betting_mart(x, 0.6, theta=theta)
# Should only have power against 0.6, but not 0.4
assert not any(mart1 > 20)
assert any(mart2 > 20)
@pytest.mark.parametrize("m", [0.1, 0.4, 0.5, 0.6, 0.9])
def test_betting_mart_convex_comb(m):
# Convex combination should always be larger than maximum
x = np.random.beta(1, 1, 10000)
mart1 = betting_mart(x, m, theta=1 / 2, convex_comb=True)
mart2 = betting_mart(x, m, theta=1 / 2, convex_comb=False)
assert all(mart1 >= mart2)
@pytest.mark.random
def test_betting_mart_WoR():
N = 1000
x = np.random.binomial(1, 0.5, N)
alpha = 0.05
assert betting_mart(x, m=np.mean(x), N=N)[-1] < 1 / alpha
# Martingale should be large for all m_null not equal to m.
# This may fail with small probability
assert betting_mart(x, m=np.mean(x) + 0.01, N=N)[-1] > 1 / alpha
assert betting_mart(x, m=np.mean(x) - 0.01, N=N)[-1] > 1 / alpha
@pytest.mark.parametrize("m", [0.4, 0.5, 0.6])
def test_diversified_betting_mart(m):
# Betting mart should be the same as averaging a bunch of betting_marts
n = 1000
x = np.random.beta(1, 1, n)
K = 1
div_mart1 = diversified_betting_mart(
x,
m=m,
lambdas_fns_positive=[lambda x, m, i=i: (i + 1) / (K + 1) for i in range(K)],
convex_comb=True,
trunc_scale=1,
)
lambdas_matrix = np.tile(
np.array([(i + 1) / (K + 1) for i in range(K)])[:, None], n
)
x_matrix = np.tile(x, (K, 1))
div_mart2 = np.mean(
0.5 * np.cumprod(1 + lambdas_matrix * (x_matrix - m), axis=1)
+ 0.5 * np.cumprod(1 - lambdas_matrix * (x_matrix - m), axis=1),
axis=0,
)
assert all(div_mart1 == div_mart2)
@pytest.mark.parametrize("m,alpha", expand_grid([0.45, 0.5, 0.55], [0.05, 0.1]))
def test_cs_from_martingale(m, alpha):
# At each time, find the boundary of the CS.
# Check to make sure it's exactly where the martingale exceeds 1/alpha
n = 1000
breaks = 1000
mart_fn = lambda x, m: betting_mart(x, m)
x = np.random.beta(1, 1, n)
l, u = cs_from_martingale(x, mart_fn, alpha=alpha, breaks=breaks)
mart = mart_fn(x, m)
assert all(mart[np.logical_or(m < l, u < m)] > 1 / alpha)
# Here, we are checking whether m is in [l, u] up to some 1/breaks deviation. This
# is due to the fact that grid-based CSs are conservative up to such a deviation.
assert all(
mart[np.logical_and(l + 1 / breaks <= m, m <= u - 1 / breaks)] <= 1 / alpha
)
def test_mu_t():
# Check that mu_t is always the mean of the remaining population
N = 10000
x = np.random.binomial(1, 0.5, N)
mu_t_list = mu_t(x, m=np.mean(x), N=N)
mean_remaining_pop_list = np.array([np.mean(x[i:N]) for i in range(N)])
assert all(abs(mu_t_list - mean_remaining_pop_list) < 10e-12)
def test_logical_cs():
# If just receiving 1s, lower cs should be 1/N, 2/N, 3/N, ...
# and upper cs should be 1, 1, 1, ...
N = 100
x = np.ones(N)
l, u = logical_cs(x, N=N)
assert all(l == [i / N for i in np.arange(1, N + 1)])
assert all(u == np.ones(N))
# The opposite phenomenon should be observed if all observations are 0
x = np.zeros(N)
l, u = logical_cs(x, N=N)
assert all(l == np.zeros(N))
assert all(u == [1 - i / N for i in np.arange(1, N + 1)])
def test_get_ci_seq():
# Just test the CI at a given time
ci_fn = lambda x: betting_ci(x, alpha=0.05)
times = [10, 50, 100]
x = np.random.beta(1, 1, 100)
l_seq, u_seq = get_ci_seq(x, ci_fn=ci_fn, times=times)
for i in range(len(times)):
l, u = ci_fn(x[0 : times[i]])
assert l_seq[i] == l
assert u_seq[i] == u
def test_onesided_cs():
# Ensure that the two-sided CS recovers the lower CS with appropriate alpha.
n = 10000
x = np.random.beta(1, 1, n)
alpha = 0.5
theta = 1 / 2
lower_twosided, upper_twosided = betting_cs(
x,
alpha=alpha,
running_intersection=False,
theta=theta,
convex_comb=False,
)
lower_onesided = betting_lower_cs(
x,
alpha=theta * alpha,
running_intersection=False,
)
upper_onesided = 1 - betting_lower_cs(
1 - x, alpha=(1 - theta) * alpha, running_intersection=False
)
assert all(np.isclose(lower_twosided, lower_onesided).astype(bool))
assert all(np.isclose(upper_twosided, upper_onesided).astype(bool))
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from mock import MagicMock
@pytest.fixture()
def base_config_with_schema():
return {"SchemaVersion": "1.0"}
@pytest.fixture()
def valid_vpc_config():
return {"SecurityGroupIds": ["sg123"], "Subnets": ["subnet-1234"]}
@pytest.fixture()
def valid_iam_role_arn():
return "arn:aws:iam::555555555555:role/IMRole"
@pytest.fixture()
def valid_tags():
return [{"Key": "tag1", "Value": "tagValue1"}]
@pytest.fixture()
def valid_session_config():
return {
"DefaultS3Bucket": "sagemaker-python-sdk-test-bucket",
"DefaultS3ObjectKeyPrefix": "test-prefix",
}
@pytest.fixture()
def valid_estimator_config():
return {
"DebugHookConfig": False,
}
@pytest.fixture()
def valid_environment_config():
return {
"var1": "value1",
"var2": "value2",
}
@pytest.fixture()
def valid_containers_config(valid_environment_config):
return [{"Environment": valid_environment_config}]
@pytest.fixture()
def valid_feature_group_config(valid_iam_role_arn):
security_storage_config = {"KmsKeyId": "kmskeyid1"}
s3_storage_config = {"KmsKeyId": "kmskeyid2"}
online_store_config = {"SecurityConfig": security_storage_config}
offline_store_config = {"S3StorageConfig": s3_storage_config}
return {
"OnlineStoreConfig": online_store_config,
"OfflineStoreConfig": offline_store_config,
"RoleArn": valid_iam_role_arn,
}
@pytest.fixture()
def valid_edge_packaging_config(valid_iam_role_arn):
return {
"OutputConfig": {"KmsKeyId": "kmskeyid1"},
"RoleArn": valid_iam_role_arn,
"ResourceKey": "kmskeyid1",
}
@pytest.fixture()
def valid_model_config(
valid_iam_role_arn, valid_vpc_config, valid_environment_config, valid_containers_config
):
return {
"Containers": valid_containers_config,
"EnableNetworkIsolation": True,
"ExecutionRoleArn": valid_iam_role_arn,
"PrimaryContainer": {"Environment": valid_environment_config},
"VpcConfig": valid_vpc_config,
}
@pytest.fixture()
def valid_model_package_config(
valid_iam_role_arn, valid_environment_config, valid_containers_config
):
inference_specification = {
"Containers": valid_containers_config,
}
transform_job_definition = {
"Environment": valid_environment_config,
"TransformOutput": {"KmsKeyId": "kmskeyid1"},
"TransformResources": {"VolumeKmsKeyId": "volumekmskeyid1"},
}
validation_specification = {
"ValidationProfiles": [{"TransformJobDefinition": transform_job_definition}],
"ValidationRole": valid_iam_role_arn,
}
return {
"InferenceSpecification": inference_specification,
"ValidationSpecification": validation_specification,
}
@pytest.fixture()
def valid_processing_job_config(valid_iam_role_arn, valid_vpc_config, valid_environment_config):
network_config = {"EnableNetworkIsolation": True, "VpcConfig": valid_vpc_config}
dataset_definition = {
"AthenaDatasetDefinition": {"KmsKeyId": "kmskeyid1"},
"RedshiftDatasetDefinition": {
"KmsKeyId": "kmskeyid2",
"ClusterRoleArn": valid_iam_role_arn,
},
}
return {
"Environment": valid_environment_config,
"NetworkConfig": network_config,
"ProcessingInputs": [{"DatasetDefinition": dataset_definition}],
"ProcessingOutputConfig": {"KmsKeyId": "kmskeyid3"},
"ProcessingResources": {"ClusterConfig": {"VolumeKmsKeyId": "volumekmskeyid1"}},
"RoleArn": valid_iam_role_arn,
}
@pytest.fixture()
def valid_training_job_config(valid_iam_role_arn, valid_vpc_config, valid_environment_config):
return {
"EnableNetworkIsolation": True,
"Environment": valid_environment_config,
"OutputDataConfig": {"KmsKeyId": "kmskeyid1"},
"ResourceConfig": {"VolumeKmsKeyId": "volumekmskeyid1"},
"ProfilerConfig": {"DisableProfiler": False},
"RoleArn": valid_iam_role_arn,
"VpcConfig": valid_vpc_config,
}
@pytest.fixture()
def valid_pipeline_config(valid_iam_role_arn):
return {"RoleArn": valid_iam_role_arn}
@pytest.fixture()
def valid_compilation_job_config(valid_iam_role_arn, valid_vpc_config):
return {
"OutputConfig": {"KmsKeyId": "kmskeyid1"},
"RoleArn": valid_iam_role_arn,
"VpcConfig": valid_vpc_config,
}
@pytest.fixture()
def valid_transform_job_config(valid_environment_config):
return {
"DataCaptureConfig": {"KmsKeyId": "kmskeyid1"},
"Environment": valid_environment_config,
"TransformOutput": {"KmsKeyId": "kmskeyid2"},
"TransformResources": {"VolumeKmsKeyId": "volumekmskeyid1"},
}
@pytest.fixture()
def valid_automl_config(valid_iam_role_arn, valid_vpc_config):
return {
"AutoMLJobConfig": {
"SecurityConfig": {"VolumeKmsKeyId": "volumekmskeyid1", "VpcConfig": valid_vpc_config}
},
"OutputDataConfig": {"KmsKeyId": "kmskeyid1"},
"RoleArn": valid_iam_role_arn,
}
@pytest.fixture()
def valid_endpointconfig_config():
return {
"AsyncInferenceConfig": {"OutputConfig": {"KmsKeyId": "kmskeyid1"}},
"DataCaptureConfig": {"KmsKeyId": "kmskeyid2"},
"KmsKeyId": "kmskeyid3",
"ProductionVariants": [{"CoreDumpConfig": {"KmsKeyId": "kmskeyid4"}}],
}
@pytest.fixture()
def valid_endpoint_config(valid_tags):
return {"Tags": valid_tags}
@pytest.fixture()
def valid_monitoring_schedule_config(
valid_iam_role_arn, valid_vpc_config, valid_environment_config
):
network_config = {"EnableNetworkIsolation": True, "VpcConfig": valid_vpc_config}
return {
"MonitoringScheduleConfig": {
"MonitoringJobDefinition": {
"Environment": valid_environment_config,
"MonitoringOutputConfig": {"KmsKeyId": "kmskeyid1"},
"MonitoringResources": {"ClusterConfig": {"VolumeKmsKeyId": "volumekmskeyid1"}},
"NetworkConfig": network_config,
"RoleArn": valid_iam_role_arn,
}
}
}
@pytest.fixture()
def valid_remote_function_config(valid_iam_role_arn, valid_tags, valid_vpc_config):
return {
"Dependencies": "./requirements.txt",
"EnvironmentVariables": {"var1": "value1", "var2": "value2"},
"ImageUri": "123456789012.dkr.ecr.us-west-2.amazonaws.com/myimage:latest",
"IncludeLocalWorkDir": True,
"InstanceType": "ml.m5.xlarge",
"JobCondaEnvironment": "some_conda_env",
"RoleArn": valid_iam_role_arn,
"S3KmsKeyId": "kmskeyid1",
"S3RootUri": "s3://my-bucket/key",
"Tags": valid_tags,
"VolumeKmsKeyId": "kmskeyid2",
"VpcConfig": valid_vpc_config,
}
@pytest.fixture()
def valid_config_with_all_the_scopes(
valid_session_config,
valid_feature_group_config,
valid_monitoring_schedule_config,
valid_endpoint_config,
valid_endpointconfig_config,
valid_automl_config,
valid_transform_job_config,
valid_compilation_job_config,
valid_pipeline_config,
valid_model_config,
valid_model_package_config,
valid_processing_job_config,
valid_training_job_config,
valid_edge_packaging_config,
valid_remote_function_config,
valid_estimator_config,
):
return {
"PythonSDK": {
"Modules": {
"Estimator": valid_estimator_config,
"RemoteFunction": valid_remote_function_config,
"Session": valid_session_config,
}
},
"FeatureGroup": valid_feature_group_config,
"MonitoringSchedule": valid_monitoring_schedule_config,
"Endpoint": valid_endpoint_config,
"EndpointConfig": valid_endpointconfig_config,
"AutoMLJob": valid_automl_config,
"TransformJob": valid_transform_job_config,
"CompilationJob": valid_compilation_job_config,
"Pipeline": valid_pipeline_config,
"Model": valid_model_config,
"ModelPackage": valid_model_package_config,
"ProcessingJob": valid_processing_job_config,
"TrainingJob": valid_training_job_config,
"EdgePackagingJob": valid_edge_packaging_config,
}
@pytest.fixture()
def s3_resource_mock():
return MagicMock(name="s3")
@pytest.fixture()
def get_data_dir():
return os.path.join(os.path.dirname(__file__), "..", "..", "..", "data", "config")
|
# -*- coding: utf-8 -*-
import simple_draw as sd
# Часть 1.
# Написать функции рисования равносторонних геометрических фигур:
# - треугольника
# - квадрата
# - пятиугольника
# - шестиугольника
# Все функции должны принимать 3 параметра:
# - точка начала рисования
# - угол наклона
# - длина стороны
#
# Использование копи-пасты - обязательно! Даже тем кто уже знает про её пагубность. Для тренировки.
# Как работает копипаста:
# - одну функцию написали,
# - копипастим её, меняем название, чуть подправляем код,
# - копипастим её, меняем название, чуть подправляем код,
# - и так далее.
# В итоге должен получиться ПОЧТИ одинаковый код в каждой функции
# Пригодятся функции
# sd.get_point()
# sd.get_vector()
# sd.line()
# Результат решения см lesson_004/results/exercise_01_shapes.jpg
# def triangle(point, angle, length):
# t1 = sd.get_vector(point, angle, length, 5)
# sd.line(start_point=point, end_point=t1.end_point, color=sd.COLOR_YELLOW, width=1)
# t2 = sd.get_vector(t1.end_point, angle + 120, length, 5)
# sd.line(start_point=t1.end_point, end_point=t2.end_point, color=sd.COLOR_YELLOW, width=1)
# # t3 = sd.get_vector(t2.end_point, angle + 240, length, 5)
# sd.line(start_point=t2.end_point, end_point=point, color=sd.COLOR_YELLOW, width=1)
#
#
# def square(point, angle, length):
# s1 = sd.get_vector(point, angle, length, 5)
# sd.line(start_point=point, end_point=s1.end_point, color=sd.COLOR_YELLOW, width=1)
# s2 = sd.get_vector(s1.end_point, angle + 90, length, 5)
# sd.line(start_point=s1.end_point, end_point=s2.end_point, color=sd.COLOR_YELLOW, width=1)
# s3 = sd.get_vector(s2.end_point, angle + 180, length, 5)
# sd.line(start_point=s2.end_point, end_point=s3.end_point, color=sd.COLOR_YELLOW, width=1)
# # s4 = sd.get_vector(s3.end_point, angle + 270, length, 5)
# sd.line(start_point=s3.end_point, end_point=point, color=sd.COLOR_YELLOW, width=1)
#
#
# def pentagon(point, angle, length):
# p1 = sd.get_vector(point, angle, length, 5)
# sd.line(start_point=point, end_point=p1.end_point, color=sd.COLOR_YELLOW, width=1)
# p2 = sd.get_vector(p1.end_point, angle + 72, length, 5)
# sd.line(start_point=p1.end_point, end_point=p2.end_point, color=sd.COLOR_YELLOW, width=1)
# p3 = sd.get_vector(p2.end_point, angle + 144, length, 5)
# sd.line(start_point=p2.end_point, end_point=p3.end_point, color=sd.COLOR_YELLOW, width=1)
# p4 = sd.get_vector(p3.end_point, angle + 216, length, 5)
# sd.line(start_point=p3.end_point, end_point=p4.end_point, color=sd.COLOR_YELLOW, width=1)
# # p5 = sd.get_vector(p4.end_point, angle + 288, length, 5)
# sd.line(start_point=p4.end_point, end_point=point, color=sd.COLOR_YELLOW, width=1)
#
#
# def hexagon(point, angle, length):
# h1 = sd.get_vector(point, angle, length, 5)
# sd.line(start_point=point, end_point=h1.end_point, color=sd.COLOR_YELLOW, width=1)
# h2 = sd.get_vector(h1.end_point, angle + 60, length, 5)
# sd.line(start_point=h1.end_point, end_point=h2.end_point, color=sd.COLOR_YELLOW, width=1)
# h3 = sd.get_vector(h2.end_point, angle + 120, length, 5)
# sd.line(start_point=h2.end_point, end_point=h3.end_point, color=sd.COLOR_YELLOW, width=1)
# h4 = sd.get_vector(h3.end_point, angle + 180, length, 5)
# sd.line(start_point=h3.end_point, end_point=h4.end_point, color=sd.COLOR_YELLOW, width=1)
# h5 = sd.get_vector(h4.end_point, angle + 240, length, 5)
# sd.line(start_point=h4.end_point, end_point=h5.end_point, color=sd.COLOR_YELLOW, width=1)
# # h6 = sd.get_vector(h5.end_point, angle + 300, length, 5)
# sd.line(start_point=h5.end_point, end_point=point, color=sd.COLOR_YELLOW, width=1)
#
#
# point_work = sd.get_point(100, 100)
# triangle(point_work, 15, 150)
#
# point_work = sd.get_point(350, 100)
# square(point_work, 15, 150)
#
# point_work = sd.get_point(100, 350)
# pentagon(point_work, 15, 100)
#
# point_work = sd.get_point(350, 350)
# hexagon(point_work, 15, 100)
# Часть 1-бис.
# Попробуйте прикинуть обьем работы, если нужно будет внести изменения в этот код.
# Скажем, связывать точки не линиями, а дугами. Или двойными линиями. Или рисовать круги в угловых точках. Или...
# А если таких функций не 4, а 44?
# ___ Если использовать принцип "Copy-Paste" получится просто невероятное количество строк кода,
# ___ равно как и ошибок в них.
# Часть 2 (делается после зачета первой части)
#
# Надо сформировать функцию, параметризированную в местах где была "небольшая правка".
# Это называется "Выделить общую часть алгоритма в отдельную функцию"
# Потом надо изменить функции рисования конкретных фигур - вызывать общую функцию вместо "почти" одинакового кода.
#
# В итоге должно получиться:
# - одна общая функция со множеством параметров,
# - все функции отрисовки треугольника/квадрата/етс берут 3 параметра и внутри себя ВЫЗЫВАЮТ общую функцию.
#
# Не забудте в этой общей функции придумать, как устранить разрыв
# в начальной/конечной точках рисуемой фигуры (если он есть)
def vector(vector_start, length, angle):
v = sd.get_vector(vector_start, angle, length)
return v.end_point
def polygon(point, heads, length):
angle = 0
angle_start = 15
angle_polygon = 360 / heads
point_polygon = point
for _ in range(heads):
if _ == 0:
angle = angle_start
else:
angle += angle_polygon
if _ < (heads - 1):
end_point = vector(point, length, angle)
else:
end_point = point_polygon
sd.line(start_point=point, end_point=end_point, color=sd.COLOR_YELLOW, width=1)
point = end_point
# t2 = sd.get_vector(t1.end_point, angle + 120, length, 5)
# sd.line(start_point=t1.end_point, end_point=t2.end_point, color=sd.COLOR_YELLOW, width=1)
# sd.line(start_point=t2.end_point, end_point=point, color=sd.COLOR_YELLOW, width=1)
# (point_start_x, point_start_y, length_start, type_of_polygon)
start_point = [(100, 100, 150, 3), (350, 100, 150, 4), (100, 350, 100, 5), (350, 350, 100, 6)]
for _ in start_point:
point_start = sd.get_point(_[0], _[1])
length_start = _[2]
heads_start = _[3]
polygon(point_start, heads_start, length_start)
# Часть 2-бис.
# А теперь - сколько надо работы что бы внести изменения в код? Выгода на лицо :)
# Поэтому среди программистов есть принцип D.R.Y. https://clck.ru/GEsA9
# Будьте ленивыми, не используйте копи-пасту!
# ___ Не то слово, через вызов функции в цикле читаем из списка параметры многоугольника
sd.pause()
|
from app import db
class Cities(db.Model):
id = db.Column(db.Integer, primary_key=True)
city = db.Column(db.String(32), unique=True, nullable=False)
regions = db.relationship("Regions", backref="city", cascade="delete")
class Regions(db.Model):
id = db.Column(db.Integer, primary_key=True)
city_id = db.Column(db.Integer, db.ForeignKey("cities.id"), nullable=False)
region = db.Column(db.String(64), nullable=False, unique=True)
shops = db.relationship("Shops", backref="region", cascade="delete")
class Shops(db.Model):
id = db.Column(db.Integer, primary_key=True)
region_id = db.Column(db.Integer, db.ForeignKey("regions.id"), nullable=False)
name = db.Column(db.String(64))
shop_url = db.Column(db.String(128), nullable=True)
|
#!/usr/bin/python
import argparse
import configparser
import time
from distutils.util import strtobool
import os.path
import sys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
class SetCamApp(object):
RC_SUCCESS = 0
RC_FAILURE = 1
def __init__(self):
self.configure_argparse()
def configure_argparse(self):
a = argparse.ArgumentParser()
boolhelp = '''If set, enables or disables {0}. By default, the setting
for {0} will not be changed.'''
boolmetavar = 'on/off'
a.add_argument(
'--camera', dest='camera_hostnames', action='append', default=['*'],
required=False,
help='''The hostname or IP address of the camera. May be specified
multiple times to operate on multiple cameras. Specify * for all
cameras. Defaults to *.'''
)
a.add_argument(
'-c', '--config-path', dest='config_path', required=True,
help='The path to a file containing credential configuration.'
)
a.add_argument(
'--motion-events', dest='motion_events', default=None,
type=strtobool, metavar=boolmetavar,
help=boolhelp.format('motion events')
)
a.add_argument(
'--continuous-recording', dest='continuous_recording', default=None,
type=strtobool, metavar=boolmetavar,
help=boolhelp.format('continuous recording')
)
self.argparser = a
def get_camera_list(self):
return self.config.sections()
def get_password(self, hostname):
return self.config[hostname]['password']
def run(self, argv):
args = self.argparser.parse_args(argv)
self.config = configparser.ConfigParser()
self.config.read(args.config_path)
camera_list = args.camera_hostnames
rc = self.RC_SUCCESS
errmsg = ''
if '*' in camera_list:
camera_list = self.get_camera_list()
for hostname in camera_list:
try:
password = self.get_password(hostname)
camera = Samsung_SNHP6410BN(hostname)
camera.login(password)
camera.set(
motion_events=args.motion_events,
continuous_recording=args.continuous_recording
)
except ConfigurationFailedException as e:
errmsg += ('Configuration did not change on camera {0}? '
'Maybe a camera glitch.\n').format(hostname)
rc = self.RC_FAILURE
except Exception as e:
errmsg += 'Failed while setting camera options on {0}: {1}\n'\
.format(hostname, str(e))
rc = self.RC_FAILURE
camera.logout()
if rc != self.RC_SUCCESS:
print(errmsg.rstrip(), file=sys.stderr)
return rc
class Samsung_SNHP6410BN(object):
def __init__(self, hostname, timeout=5):
self.hostname = hostname
self._driver = webdriver.PhantomJS(service_log_path=os.path.devnull)
# For boolean controls, the value is a tuple containing the element
# names to click for (on, off).
self._bool_element_ids = {
'motion_events': ('ea1', 'ea2'),
# These elements actually control sdcard mounting since there isn't
# an option directly on the web interface to control continuous
# recording
'continuous_recording': ('sdcard_mnt1', 'sdcard_mnt2')
}
self.timeout = timeout
def _get_setup_page(self):
self._driver.get(
'http://{}/pages/page_setup_video.php?login=true'
.format(self.hostname)
)
WebDriverWait(self._driver, self.timeout,).until(
expected_conditions.presence_of_all_elements_located
)
def getbool(self, element_ids):
self._get_setup_page()
on_element = element_ids[0]
return self._driver.find_element_by_id(on_element).is_selected()
def get(self):
values = dict()
for k, v in self._bool_element_ids.items():
values[k] = self.getbool(v)
return values
def login(self, password):
self._driver.get('http://{}'.format(self.hostname))
WebDriverWait(self._driver, self.timeout,).until(
expected_conditions.presence_of_all_elements_located
)
# Frames?! Who wrote this trash?
self._driver.switch_to_frame('mainFrame')
password_field = self._driver.find_element(
by=By.ID, value='private_key'
)
password_field.send_keys(password)
# The submit button doesn't have an ID. :-/
submit_button = self._driver.find_element(
by=By.CLASS_NAME, value='btn_ty2'
)
submit_button.click()
def logout(self):
self._driver.close()
def set(self, motion_events=None, continuous_recording=None, verify=True):
args = locals()
options = {
o: args[o] for o in args
if o not in ('self', 'args', 'verify')
}
self._get_setup_page()
for option in options:
if options[option] is not None:
self.setbool(self._bool_element_ids[option], options[option])
savebutton = self._driver.find_element(by=By.CLASS_NAME, value='bType1')
savebutton.click()
if verify:
# HACK: We need to wait a little bit for the settings to be saved.
#
# Can we use WebDriverWait to wait until the form is submitted?
time.sleep(1)
values = self.get()
for option in [o for o in options]:
# Skip checking options that we did not set
if options[option] is None:
continue
if options[option] != values[option]:
raise ConfigurationFailedException(
'Option {0} not set'.format(option)
)
def setbool(self, element_ids, setting):
if setting:
element_id = element_ids[0]
else:
element_id = element_ids[1]
element = self._driver.find_element(by=By.ID, value=element_id)
element.click()
class ConfigurationFailedException(Exception): pass
if __name__ == '__main__':
exit(SetCamApp().run(sys.argv[1:]))
|
from flask import Flask
app = Flask(__name__)
# app.config['DEBUG'] = True
from flask import render_template
# from app import app
import os
import json
import re
from flask import request
from whoosh.lang.morph_en import variations
from flask import jsonify
@app.route('/')
def my_form():
return render_template("index.html")
@app.route('/jsondata',methods=['GET','POST'])
def jsondata():
line = ' '
keyword = request.form['text']
vari = list(variations(keyword))
vari = map(lambda x:x.lower(),vari)
key_list = []
stem_list = []
path = os.getcwd()+"/templates/file_storage"
for k in range(len(vari)):
file_list = []
for file in os.listdir(path):
if file.endswith(".txt"):
path_file = path+'/'+file
line = open(path_file,'r').read().lower()
line = re.sub('[!@#$".(),\']', '', line)
freq = []
if vari[k] in line:
lines = line.splitlines()
no_of_lines = len(lines)
word_count = 0
for i in range(no_of_lines):
select_line = lines[i].split()
enum = list(enumerate(select_line,1))
for j in range(len(enum)):
if enum[j][1] == vari[k]:
word_count = word_count+1
freq.append(dict({"name":word_count,"parent":file}))
file_list.append(dict({"name":file,"parent":file,"children":freq}))
stem_list.append(dict({"name":vari[k],"parent":file,"children":file_list}))
key_list.append(dict({"name":keyword,"parent":"null","children":stem_list}))
return render_template('d3.html',result = json.dumps(key_list))
@app.route('/file1')
def file_one():
return render_template("file_storage/Dear_John.txt")
@app.route('/file2')
def file_two():
return render_template("file_storage/a_walk_to_remember.txt")
@app.route('/file3')
def file_three():
return render_template("file_storage/the_last_song.txt")
@app.route('/file4')
def file_four():
return render_template("file_storage/the_notebook.txt")
|
# -*- coding: utf-8 -*-
"""
Cleaner modules, it cleans raw text data
"""
from os import listdir
from os.path import isfile, join
import os, io
class Cleaner:
def findDoubleSign(self, line, simbleStart, simbleEnd):
i = 0
start = -1
end = -1
char = simbleStart
flag = False
startFind = False
while(i + 1 < len(line) and flag == False):
if(line[i] == char and line[i + 1] == char):
if char == simbleStart and startFind == False:
start = i
char = simbleEnd
startFind = True
elif char == simbleEnd and startFind == True:
end = i + 2
flag = True
i += 1
return start, end
def findSquareBrackets(self, line, simbleStart, simbleEnd):
i = 0
start = -1
end = -1
char = simbleStart
flag = False
nested = False
startFind = False
if line.startswith("[[File:") or line.startswith("[[Image:") or line.startswith("[[Category:"):
return -2, -2
while(i + 1 < len(line) and flag == False):
if(line[i] == char and line[i + 1] == char):
if char == simbleStart:
if startFind == False:
start = i
char = simbleEnd
startFind = True
else:
nested = True
else:
if char == simbleEnd and startFind == True and nested == False:
end = i + 2
flag = True
else:
if char == simbleEnd and nested == True:
nested = False
i += 1
return start, end
# input: substring, string between [[ ]] with brackets included
def twoWordInSquareBrackets(self, line):
for c in line:
if c == '|':
return line.index(c)
return -1
# input: substring, string between [[ ]] with brackets included
def wordToReplace(self, line):
index = self.twoWordInSquareBrackets(line)
if index == -1:
return line[2 : -2]
else:
return line[index + 1 : -2]
def findTripleApostrophe(self, line):
i = 0
start = -1
end = -1
char = "'"
flag = False
flagStart = False
while(i + 2 < len(line) and flag == False):
if(line[i] == char and line[i + 1] == char and line[i + 2] == char):
if flagStart == False:
start = i
flagStart = True
else:
end = i + 3
flag = True
i += 1
return start, end
def getWordTripleApostrophe(self, line):
return line[3 : -3]
def getWordDoubleSign(self, line):
return line[2 : -2]
def page_cleaner(self, text):
buff = io.StringIO(text)
data = ""
for line in buff:
if line[0] == '|' or (line[0] == ' ' and line[1] == '|'):
line = ""
start, end = self.findSquareBrackets(line, '[', ']')
startTA, endTA = self.findTripleApostrophe(line)
startDA, endDA = self.findDoubleSign(line, "'", "'")
startEq, endEq = self.findDoubleSign(line, '=', '=')
startBr, endBr = self.findDoubleSign(line, '{', '}')
flag = False
while((start != -1 and end != -1) or (startTA != -1 and endTA != -1) \
or (startDA != -1 and endDA != -1) or (startEq != -1 and endEq != -1) or (startBr != -1 and endBr != -1)):
start, end = self.findSquareBrackets(line, '[', ']')
if start == -2 and end == -2:
line = line.replace(line, "")
start, end = self.findSquareBrackets(line, '[', ']')
elif start != -1 and end != -1:
word = line[start : end]
replace = self.wordToReplace(line[start : end])
if replace == "":
flag = True
line = line.replace(word, replace)
start, end = self.findSquareBrackets(line, '[', ']')
startTA, endTA = self.findTripleApostrophe(line)
if startTA != -1 and endTA != -1:
# print(line[startTA : endTA])
# print(self.getWordTripleApostrophe(line[startTA : endTA]))
line = line.replace(line[startTA : endTA], self.getWordTripleApostrophe(line[startTA : endTA]))
startTA, endTA = self.findTripleApostrophe(line)
# print("LINE " + line)
startDA, endDA = self.findDoubleSign(line, "'", "'")
if startDA != -1 and endDA != -1:
line = line.replace(line[startDA : endDA], self.getWordDoubleSign(line[startDA : endDA]))
# print(self.getWordDoubleSign(line[startDA : endDA]))
startDA, endDA = self.findDoubleSign(line, "'", "'")
startEq, endEq = self.findDoubleSign(line, '=', '=')
if startEq != -1 and endEq != -1:
line = line.replace(line[startEq : endEq], self.getWordDoubleSign(line[startEq : endEq]))
startEq, endEq = self.findDoubleSign(line, '=', '=')
startBr, endBr = self.findDoubleSign(line, '{', '}')
if startBr != -1 and endBr != -1:
# print(line[startBr : endBr])
line = line.replace(line[startBr : endBr], "")
startBr, endBr = self.findDoubleSign(line, '{', '}')
if(line == '\n'): # tolto and flag == True per togliere le righe vuote
flag = False
elif line != '\n' and line != "*\n" and line != "* \n":
data += line
data = data.replace("()", "")
data = data.replace("( )", "")
data = data.replace("===", "")
# Per togliere {{ }} su più righe
startBr, endBr = self.findDoubleSign(data, '{', '}')
while(startBr != -1 and endBr != -1):
# print(data[startBr : endBr])
data = data.replace(data[startBr : endBr], "")
startBr, endBr = self.findDoubleSign(data, '{', '}')
return data
|
"""
Name: delete_files_from_folder.py
Purpose: Delete all the files from the directory and sub-directories
Usage: python delete_files_from_folder.py <source>
Author: Rohan Nagalkar
Created: 23/06/2016
Version: 0.1 Rohan Nagalkar Created.
"""
import os
import sys
def usage():
"""
Usage of the process
:return:
"""
print """
Usage : python delete_files_from_folder.py <path/to/directory>
"""
return True
def main():
"""
Main
:return: True on success else False
"""
if len(sys.argv) != 2:
usage()
return False
directory = sys.argv[1]
if not os.path.exists(directory):
print "The following path does not exist."
return False
for root_dir, subfolders, files in os.walk(directory):
if len(files) > 0:
for dir_file in files:
print "Deleting : %s" % os.path.join(root_dir, dir_file)
os.remove(os.path.join(root_dir, dir_file))
else:
print "%s is empty." % root_dir
return True
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from datetime import datetime, date
from copy import deepcopy
from django.http import HttpResponse
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import render, get_list_or_404,\
get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.core.paginator import Paginator, EmptyPage, InvalidPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.forms.models import inlineformset_factory, formset_factory, modelformset_factory
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required, login_required
from .models import Payment
from .forms import PaymentForm, PaymentShowForm, DateForm
from apps.mo.models import MO, MaxFlatPrice
from apps.user.models import CustomUser
from apps.core.views import to_xls
from apps.build.models import Contract
@login_required
def add_payment(request, contract=None):
template = 'payment_creation.html'
context = {'title': _(u'Добавление платежа')}
prefix = 'pay'
if request.method == "POST":
form = PaymentForm(request.POST, request.FILES, prefix=prefix, initial={'user_mo': request.user.customuser.mo})
if form.is_valid():
form.save()
return redirect('payments')
else:
if contract:
contract = Contract.objects.get(pk=contract)
form = PaymentForm(prefix=prefix, initial={'user_mo': request.user.customuser.mo, 'contract': contract})
else:
form = PaymentForm(prefix=prefix, initial={'user_mo': request.user.customuser.mo})
context.update({'form': form, 'prefix': prefix})
return render_to_response(template, context, context_instance=RequestContext(request))
def recount_accounting(mo, user=None, context=None, accounting=None, update=None, dates=None, payment_dates=None):
'''
dates - {'date__range': (timestamp, timestamp)}, from which get mo agreements
payment_dates - {'date__range': (timestamp, timestamp)}, from which get mo payments by agreements
user - request.user, if we don`t have from_dt
update - True if we need to update mo accounting(each 00:00 by cron)
'''
objects = []
accounting = accounting if accounting else {}
context = context if context else {}
if isinstance(mo, MO):
agreements = mo.departamentagreement_set.all()
if not update:
objects = Payment.objects.filter(subvention__in=[dep.subvention for dep in agreements])
kw = {}
if dates:
kw = dates
if not dates and user and hasattr(user, 'customuser'):
from_dt = user.customuser.get_user_date()
to_dt = datetime(from_dt.year + 1, 1, 1)
kw = {'date__range': (from_dt, to_dt)}
agreements = agreements.filter(**kw)
if not update:
if not payment_dates:
objects = objects.filter(**kw)
else:
objects = objects.filter(**payment_dates)
if agreements:
amount = sum([float(dep.subvention.amount) for dep in agreements if dep.subvention.amount])
home_amount = 0.0
adm_amount = 0.0
for dep in agreements:
if dep.subvention.reg_budget:
if dep.subvention.reg_budget.sub_orph_home:
home_amount += dep.subvention.reg_budget.sub_orph_home
if dep.subvention.reg_budget.adm_coef:
adm_amount += dep.subvention.reg_budget.adm_coef
if dep.subvention.fed_budget:
if dep.subvention.fed_budget.sub_orph_home:
home_amount += dep.subvention.fed_budget.sub_orph_home
if dep.subvention.fed_budget.adm_coef:
adm_amount += dep.subvention.fed_budget.adm_coef
reg_spent = sum([float(payment.amount) for payment in objects.filter(payment_state=1, payment_budget_state=2) if payment.amount])
fed_spent = sum([float(payment.amount) for payment in objects.filter(payment_state=1, payment_budget_state=1) if payment.amount])
reg_adm_spent = sum([float(payment.amount) for payment in objects.filter(payment_state=2, payment_budget_state=2) if payment.amount])
fed_adm_spent = sum([float(payment.amount) for payment in objects.filter(payment_state=2, payment_budget_state=1) if payment.amount])
adm_spent = reg_adm_spent + fed_adm_spent
spent = reg_spent + fed_spent + adm_spent
percent = round((float(spent/amount) * 100), 3) if amount else 0
object_kwargs = {'start_year__lt': user.customuser.get_user_date(),
'finish_year__gt': user.customuser.get_user_date()}
max_flat_price = MaxFlatPrice.objects.get(year=from_dt.year)
query = mo.contract_set.filter(**object_kwargs).values("flats_amount", "summa",
"summ_without_mo_money", "summ_mo_money")
# Количество жилых помещений
contracts_flats_amount = 0
contracts_summ = 0
query = mo.contract_set.filter(**object_kwargs).values("flats_amount", "summa",
"summ_without_mo_money", "summ_mo_money")
for contract in query:
if contract["flats_amount"]:
contracts_flats_amount += contract["flats_amount"]
# Сумма по заключенным контрактам ИТОГО
if contract["summa"]:
contracts_summ += contract["summa"]
contracts_economy = contracts_flats_amount * max_flat_price.max_price - contracts_summ
contracts_economy = contracts_economy if contracts_economy > 0 else 0
# economy = sum([float(auction.start_price) for auction in mo.auction_set.all() if auction.start_price]) - spent
economy = contracts_economy
accounting.update({'mo': mo, 'spent': spent, 'saved': amount - spent, 'percent': percent,
'sub_amount': amount, 'economy': economy, 'home_amount': home_amount,
'adm_amount': adm_amount, 'adm_spent': adm_spent,
'reg_spent': reg_spent, 'fed_spent': fed_spent})
context.update({'accounting': accounting})
if update and accounting:
mo.update(**accounting)
else:
all_payments = []
c_kwargs = deepcopy(payment_dates)
for one_mo in mo:
accounting = {}
agreements = one_mo.departamentagreement_set.filter(**dates)
contracts = one_mo.contract_set.filter(**c_kwargs)
if agreements:
amount = sum([float(dep.subvention.amount) for dep in agreements if dep.subvention.amount])
accounting.update({'sub_amount': amount})
if contracts:
spent = sum([float(contract.summa) for contract in contracts if contract.summa])
percent = round(((float(spent)/amount) * 100), 3) if spent and amount else 0
economy = sum([float(auction.start_price) for auction in one_mo.auction_set.filter(**payment_dates)
if auction.start_price]) - spent
payment_dates.update({'contract__in': [contract.id for contract in contracts if contract]})
payments = Payment.objects.filter(**payment_dates)
payment = sum([float(payment.amount) for payment in payments])
all_payments = all_payments + list(payments)
accounting.update({'payment': payment, 'spent': spent, 'saved': amount - spent,
'percent': percent, 'economy': economy})
objects.append({'mo': one_mo, 'accounting': accounting})
context.update({'accountings': objects, 'payment_list': all_payments, 'show_accounting_payments': True})
return objects
@login_required
def get_payments(request, mo=None, all=False, xls=False):
context = {'title': _(u'Платежи')}
template = 'payments.html'
prefix = 'acc_date'
objects = []
if Payment.objects.all().exists():
if all:
context = {'title': _(u'Все платежи')}
if hasattr(request.user, 'customuser'):
from_dt = request.user.customuser.get_user_date()
if from_dt:
to_dt = datetime(from_dt.year + 1, 1, 1)
objects = Payment.objects.filter(date__range=(from_dt, to_dt))
else:
objects = Payment.objects.all()
else:
objects = Payment.objects.all()
elif hasattr(request.user, 'customuser') or mo:
mo = request.user.customuser.mo if request.user.customuser.mo else MO.objects.get(pk=mo)
context = {'title': _(u'Платежи %s') % mo.name}
objects = recount_accounting(mo, user=request.user, context=context)
if xls:
return to_xls(request, objects={PaymentForm: objects}, fk_forms = False)
form = DateForm(prefix=prefix)
context.update({'date_form': form})
page = request.GET.get('page', '1')
paginator = Paginator(objects, 50)
try:
objects_list = paginator.page(page)
except PageNotAnInteger:
objects_list = paginator.page(1)
except EmptyPage:
objects_list = paginator.page(paginator.num_pages)
context.update({'payment_list': objects_list})
return render(request, template, context, context_instance=RequestContext(request))
@login_required
def get_accounting(request, select=None):
template = 'payments.html'
context = {'title': _(u'Платежи')}
prefix = 'acc_date'
mos = MO.objects.all()
kwargs = {}
agr_kwargs = {}
form = DateForm(prefix=prefix)
context.update({'date_form': form})
from_dt = request.user.customuser.get_user_date() if hasattr(request.user, 'customuser') else None
if select and int(select) in [1, 2, 3, 4]:
state = int(select)
if state == 4:
dt = datetime(datetime.now().year, 12, 31)
dt = dt.replace(year=dt.year-1)
prev = dt.replace(year=dt.year-1)
elif state == 3:
dt = datetime(datetime.now().year, 12, 31)
prev = dt.replace(year=dt.year-1)
elif state == 2:
dt = datetime(datetime.now().year, datetime.now().month, 28)
prev = dt.replace(month=dt.month-1) if dt.month > 1 else dt.replace(month=12)
elif state == 1:
dt = datetime.now()
prev = dt.replace(day=dt.day-1)
kwargs.update({'date__range': (prev, dt)})
agr_kwargs.update({'date__range': (datetime(dt.year, 1, 1), datetime(dt.year, 12, 31))})
elif not select and request.method == 'POST' and 'date_select' in request.POST:
form = DateForm(request.POST, prefix=prefix)
if form.is_valid():
dt, prev = form.cleaned_data.get('dt'), form.cleaned_data.get('prev')
kwargs.update({'date__range': (prev, dt)})
agr_kwargs.update({'date__range': (prev, dt)})
else:
context.update({'date_form': form})
elif not select and from_dt:
to_dt = datetime(from_dt.year, 12, 31)
kwargs.update({'date__range': (from_dt, to_dt)})
agr_kwargs.update({'date__range': (from_dt, to_dt)})
recount_accounting(mos, context=context, dates=agr_kwargs, payment_dates=kwargs)
context.update({'hide_paginator': True})
return render(request, template, context, context_instance=RequestContext(request))
@login_required
def get_payment(request, pk, extra=None):
context = {'title': _(u'Платежи')}
payment = Payment.objects.get(pk=pk)
form = PaymentShowForm(instance=payment)
context.update({'object': payment, 'form': form})
return render(request, 'payment.html', context, context_instance=RequestContext(request))
@login_required
def update_payment(request, pk, extra=None):
context = {'title': _(u'Параметры платежа')}
payment = Payment.objects.get(pk=pk)
prefix = 'pay'
if request.method == "POST":
form = PaymentForm(request.POST, request.FILES, instance=payment, prefix=prefix)
context.update({'object': payment, 'form': form, 'prefix': prefix})
if form.is_valid():
form.save()
return redirect('payments')
else:
context.update({'object': payment, 'form': form, 'prefix': prefix})
return render(request, 'payment_updating.html', context, context_instance=RequestContext(request))
else:
form = PaymentForm(instance=payment, prefix=prefix)
context.update({'object': payment, 'form': form, 'prefix': prefix})
return render(request, 'payment_updating.html', context, context_instance=RequestContext(request))
@login_required
def pre_delete_payment(request, pk):
context = {'title': _(u'Удаление платежа')}
payment = Payment.objects.get(pk=pk)
context.update({'object': payment})
return render_to_response("payment_deleting.html", context, context_instance=RequestContext(request))
@login_required
def delete_payment(request, pk):
context = {'title': _(u'Удаление платежа')}
payment = Payment.objects.get(pk=pk)
if payment and 'delete' in request.POST:
payment.delete()
return redirect('payments')
elif 'cancel' in request.POST:
return redirect('payments')
else:
context.update({'error': _(u'Возникла ошибка при удалении платежа!')})
return render_to_response("payment_deleting.html", context, context_instance=RequestContext(request))
|
n=int(input())
m=[int(input()) for _ in range(n)]
m.sort(reverse=True)
for i in range(n):
m[i]*=(i+1)
print (max(m))
|
from Database import enc_table
# p = number.getPrime(512)
# q = number.getPrime(512)
# p = 59
# q = 53
# e = 65537 # usually a large prime number, or calculated using gcd(e,phi(pq))=1
# n=p*q
def retrieve_from_db():
"""
Retrieves the public key and the prime numbers needed for the encryption
:return: a tuple, that consists in the two primes,the product and the public key
"""
doc_pass = enc_table.find_one({'_id': 1})
p = doc_pass["p"]
q = doc_pass["q"]
e = doc_pass["e"]
n = p * q
return p, q, n, e
p, q, n, e = retrieve_from_db()
def calculate_phi():
return (p - 1) * (q - 1)
def generate_private_key():
"""
Generates the private key, based on the public key
:return: The private key
"""
phi = calculate_phi()
# print("phi: ", phi)
return pow(e, -1, phi)
def crypt(plaintext):
"""
The encryption function, that first transforms the input in a list of integers and then encrypts every element of
the list
:param plaintext: The bytes to encrypt
:return: The encrypted list
"""
plaintext = plaintext.decode("iso-8859-1")
# print(plaintext)
ch_list = [ord(b) for b in plaintext]
# print(ch_list)
enc_list = [(ch ** e) % n for ch in ch_list]
# print(enc_list)
# print(enc_list)
return enc_list
def decrypt(cripttext_list): # using TCR (faster than normal decryption)
"""
The decryption function that uses the Chinese Remainder Theorem to decrypt the list, and then transforms the list
back into the decrypted string
:param cripttext_list: A list that contains encrypted elements
:return: The decrypted string, encoded in bytes
"""
d = generate_private_key()
def TCR(a, n):
m1 = p
m2 = q
n1 = n % (m1 - 1)
n2 = n % (m2 - 1)
m1_mod_inverse = pow(m1, -1, m2)
x1 = ((a % m1) ** n1) % m1
x2 = ((a % m2) ** n2) % m2
return x1 + m1 * (((x2 - x1) * m1_mod_inverse) % m2)
txt_list = [chr(TCR(cripttext_list[i], d)) for i in range(len(cripttext_list))]
plaintext = ""
for ch in txt_list:
plaintext = plaintext + ch
return plaintext.encode("iso-8859-1")
# def decrypt(cripttext):
# """
# An alternative decryption function
# """
# return (cripttext ** generate_private_key()) % n
|
import sqlite3 as sq
class PhoneBook:
con = sq.connect('phonebook.sqlite3')
def __init__(self):
try:
self.cur = self.con.cursor()
self.cur.execute(
'Create table if not exists PHONEBOOK(name text,address text,mobile text unique) ')
print(' Table created / already exists ')
except Exception as e:
print(f'Please fix this {e}')
def addData(self, name, address, mobile):
try:
self.cur.execute(
'insert into PHONEBOOK values(?,?,?)', (name, address, mobile))
print('successfully added data ')
except Exception as e:
print(f'Please fix this {e}')
def getData(self, mobile):
try:
self.cur.execute(
' select name,address from PHONEBOOK where mobile=(?)', (mobile,))
self.data = self.cur.fetchone()
print(self.data)
print(' retrieved some value ')
return self.data
except Exception as e:
print(f'Please fix this {e}')
# if __name__ == '__main__':
# phoneDiary = PhoneBook()
# phoneDiary.addData('rahul', 'kasipur', '8210638822')
# print(phoneDiary.getData('8210638822'))
|
__author__ = 'Noblesse Oblige'
from tqdm import tqdm
import numpy as np
import nltk
from nltk.corpus import cmudict
from nltk.corpus import opinion_lexicon
from nltk.corpus import wordnet as wn
import string
import os
from nltk.parse import stanford
from nltk.util import ngrams
from nltk.util import skipgrams
from nltk.sentiment.util import *
from nltk.sentiment.sentiment_analyzer import *
from FakeNews.fnc_1.scorer import FNCException, LABELS
from FakeNews.utils.score import *
from sklearn.feature_extraction.text import *
from sklearn.externals import joblib
from sklearn.feature_selection import chi2,SelectKBest
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import StratifiedKFold,StratifiedShuffleSplit
from sklearn.ensemble import GradientBoostingClassifier
import csv
def report_score(actual,predicted):
score,cm = score_submission(actual,predicted)
best_score, _ = score_submission(actual,actual)
print_confusion_matrix(cm)
print("Score: " +str(score) + " out of " + str(best_score) + "\t("+str(score*100/best_score) + "%)")
return score*100/best_score
def load_data(filename):
data = None
try:
with open(filename,encoding='utf-8') as fh:
reader = csv.DictReader(fh)
data = list(reader)
if data is None:
error = 'ERROR: No data found in: {}'.format(filename)
raise FNCException(error)
except FileNotFoundError:
error = "ERROR: Could not find file: {}".format(filename)
raise FNCException(error)
return data
def CollectData(text):
stances=load_data("fnc_1/"+text+"_stances.csv")
body=load_data("fnc_1/"+text+"_bodies.csv")
stance_body=dict()
for article in body:
stance_body[int(article['Body ID'])] = article['articleBody']
stan=[]
for s in stances:#merged so as to be able to do stratisfied folding
s['articleBody']=stance_body[int(s['Body ID'])]
stan.append(LABELS.index(s["Stance"]))
return stances,stan
##Collection Class
class Collect_features:
def __init__(self,text):
self.bod_text=[]
self.head_text=[]
for sent in text:
self.bod_text.append(sent["articleBody"].lower())
self.head_text.append(sent["Headline"].lower())
def FeatureList(self):
a=[]
for line in range(len(self.bod_text)):
for i, (headline, body) in tqdm(enumerate(zip(self.head_text, self.bod_text))):
b=[]
bod=self.bod_text[line]
head=self.head_text[line]
b.append(sum([1 if (token in bod and token not in ENGLISH_STOP_WORDS) else 0 for token in self.word_token(head)]))
b.append(sum([1 if token in bod else 0 for token in self.word_token(head)]))
b.append(sum([ 1 if " ".join(gram) in bod else 0 for gram in self.Word_k_skip_n_gram(0,2,head)]))
b.append(sum([ 1 if " ".join(gram) in bod else 0 for gram in self.Word_k_skip_n_gram(0,3,head)]))
b.append(sum([ 1 if " ".join(gram) in bod else 0 for gram in self.Word_k_skip_n_gram(0,4,head)]))
b.append(sum([ 1 if " ".join(gram) in bod else 0 for gram in self.Word_k_skip_n_gram(0,5,head)]))
c2=self.Word_k_skip_n_gram(1,2,bod)
c3=self.Word_k_skip_n_gram(1,3,bod)
c4=self.Word_k_skip_n_gram(1,4,bod)
c5=self.Word_k_skip_n_gram(1,5,bod)
b.append(sum([ 1 if gram in c2 else 0 for gram in self.Word_k_skip_n_gram(1,2,head)]))
b.append(sum([ 1 if gram in c3 else 0 for gram in self.Word_k_skip_n_gram(1,3,head)]))
b.append(sum([ 1 if gram in c4 else 0 for gram in self.Word_k_skip_n_gram(1,4,head)]))
b.append(sum([ 1 if gram in c5 else 0 for gram in self.Word_k_skip_n_gram(1,5,head)]))
b.append(self.ratio_Overlap_Words(head,bod))
b.extend([self.Sentiment(head),self.Sentiment(bod)])
a.append(b)
return a
def word_token(self,line):
return nltk.word_tokenize(line.lower())
_wnl = nltk.WordNetLemmatizer()
def normalize_word(self,w,pos):
pos=self.pos_to_wn(pos)
return self._wnl.lemmatize(w,pos).lower()
def token_lemmas(self,s):
po=nltk.pos_tag(nltk.word_tokenize(s.lower())) #part_Of_Speach tagger
s=[self.word_token(t) for t in nltk.sent_tokenize(s.lower())] #tokenise by sentece then word
p=nltk.pos_tag_sents(s)
return [self.normalize_word(t,pos) for t,pos in po]
def pos_to_wn(self,tag):#curtasy of bogs answer to https://stackoverflow.com/questions/25534214/nltk-wordnet-lemmatizer-shouldnt-it-lemmatize-all-inflections-of-a-word
if tag in ['JJ', 'JJR', 'JJS']:
return wn.ADJ
#elif tag in ['NN', 'NNS', 'NNP', 'NNPS']:
# return wn.NOUN
elif tag in ['RB', 'RBR', 'RBS']:
return wn.ADV
elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:
return wn.VERB
return wn.NOUN
def Word_k_skip_n_gram(self,k,n,words):
words=self.word_token(words)
if k==0:
ret=list(ngrams(words,n))
else:
ret=list(skipgrams(words,n,k))
return ret
def ratio_Overlap_Words(self,head,body):
#A normalised calculation of the number of words shared between the headline and body
no_int=len(set(self.token_lemmas(head)).intersection(set(self.token_lemmas(body))))
no_un=len(set(self.token_lemmas(head)).union(set(self.token_lemmas(body))))
return no_int/no_un
def Sentiment(self,s,plot=False):
#baisic sentitment analyser courtesy of nltk.sentiment.util.demo_liu_hu_lexicon
#breifly modified for additional negation calculation and so it gives a return value
y = []
token=[self.word_token(t) for t in nltk.sent_tokenize(s.lower())]
for sent in token:
neg=0
for word in sent:
if NEGATION_RE.search(word):
neg+=1
if word in opinion_lexicon.positive():
y.append(1) # positive
elif word in opinion_lexicon.negative():
y.append(-1) # negative
else:
y.append(0) # neutral
if neg%2 != 0: y.append(-1)
return sum(y)/len(token)
def tdif(self,doc_train,doc_test): #incorperated as it was seen the 3rd place of the FNC-1 leader board put much weight behind it
for i, (headline, body) in tqdm(enumerate(zip(doc_train, doc_test))):
#TF
tDoc=TfidfVectorizer(max_df=0.5,decode_error='ignore',ngram_range=(1,5),stop_words='english',use_idf=False)
D_train=tDoc.fit_transform(doc_train)
D_test=tDoc.transform(doc_test)
Dfeature_name=tDoc.get_feature_names()
ch_best_doc=SelectKBest(chi2,k=len(Dfeature_name)*0.1)
D_train=ch_best_doc.fit_transform(D_train)
D_test=ch_best_doc.transform(D_test)
Dfeature_name=[Dfeature_name[i] for i in ch_best_doc.get_support(indices=True)]
#TF-IDF
tiDoc=TfidfVectorizer(max_df=0.5,decode_error='ignore',ngram_range=(1,5),stop_words='english',use_idf=True,norm='l2')
D_itrain=tiDoc.fit_transform(doc_train)
D_itest=tiDoc.transform(doc_test)
Difeature_name=tiDoc.get_feature_names()
print(Difeature_name)
return D_train,D_test,D_itrain,D_itest
def cosSim(self,X,Y):
return cosine_similarity(X,Y)
def Train_Test(F_train,F_test,name):
feat_train=Collect_features(F_train)
feat_test=Collect_features(F_test)
#collect normal features
X_train=feat_train.FeatureList()
X_test=feat_test.FeatureList()
print("1")
if not os.path.isfile("features/Train."+name+".npy") and os.path.isfile("features/Test."+name+".npy"):
#get the TF and TF-IDF of the body and headlines
BTrain, BTest,BiTrain,BiTest=feat_train.tdif(feat_train.bod_text,feat_test.bod_text)
HTrain, HTest,HiTrain,HiTest=feat_train.tdif(feat_train.head_text,feat_test.head_text)
print("2")##Get Cosine similarity of TF-IDF of head and body
iTest=feat_train.cosSim(HiTest,BiTest)
iTrain=feat_train.cosSim(HiTrain,BiTrain)
print("3")##collect together the various metrics
X_train=np._c[X_train,HTrain,BTrain,iTrain]
X_test=np._c[X_test,HTest,BTest,iTest]
np.save("features/Train."+name+".npy", X_train)
np.save("features/Test."+name+".npy", X_test)
return np.load("features/Train."+name+".npy"),np.load("features/Test."+name+".npy")
if __name__ == "__main__":
#Collect Data
d_data,d_target=CollectData("train")
d_data=np.array(d_data)
d_target=np.array(d_target)
print("A")
#Collect features from Compitition
c_data,c_target=CollectData("competition_test")
c_data=np.array(c_data)
c_target=np.array(c_target)
X_data,X_competition=Train_Test(d_data,c_data,"COMPETITION")
best_score=0
best_fold=None
try:
best_fold=joblib.load('trainedML.pkl')
except FileNotFoundError:
ss=StratifiedShuffleSplit(n_splits=3,test_size=0.2,train_size=0.8,random_state=1148925)
dev=0
for train,test in ss.split(d_data,d_target):
hand,hold=d_data[train],d_data[test]
hand_stances,hold_stances=d_target[train],d_target[test]
#Collect the Development set features
dev+=1
X_hand,X_holdout=Train_Test(hand,hold,"DEVELOPMENT_"+dev)
# clf.fit(X_hand, hand_stances)
# joblib.dump(clf, 'trainedML.pkl')
# print("B")
# predicted = [LABELS[int(a)] for a in clf.predict(X_holdout)]
# actual = [LABELS[int(a)] for a in hold_stances]
# print("Scores on the dev set")
# report_score(actual,predicted)
# print("")
# print("")
# #Run on competition dataset
# predicted = [LABELS[int(a)] for a in best_fold.predict(X_competition)]
# actual = [LABELS[int(a)] for a in c_target]
# print("Scores on the test set")
# report_score(actual,predicted)
sub_score=0
fold=0
kf=StratifiedKFold(n_splits=10)
for train_index, test_index in kf.split(hand,hand_stances):
F_train,F_test=hand[train_index],hand[test_index]
y_train,y_test=hand_stances[train_index],hand_stances[test_index]
#print("C")
#Collect features for fold
fold+=1
X_train,X_test=Train_Test(F_train,F_test,"FOLD_"+fold)
clf = GradientBoostingClassifier(n_estimators=200, random_state=None, verbose=True)
clf.fit(X_train, y_train)
#results of this fold
predicted = [LABELS[int(a)] for a in clf.predict(X_test)]
actual = [LABELS[int(a)] for a in y_test]
fold_score, _ = score_submission(actual, predicted)
max_fold_score, _ = score_submission(actual, actual)
score = fold_score/max_fold_score
print("Score for fold "+ str(test) + " was - " + str(score))
sub_score +=score
# The Cross validation analysis
if best_score<sub_score/10:
best_score=sub_score/10
joblib.dump(clf, 'trainedML.pkl')
best_fold=clf
print("CHANGED")
#Run on Dev Set
predicted = [LABELS[int(a)] for a in clf.predict(X_holdout)]
actual = [LABELS[int(a)] for a in hold_stances]
print("Scores on the dev set")
report_score(actual,predicted)
print("")
print("")
#Run on competition dataset
best_fold=joblib.load('trainedML.pkl')
predicted = [LABELS[int(a)] for a in best_fold.predict(X_competition)]
actual = [LABELS[int(a)] for a in c_target]
print("Scores on the test set")
report_score(actual,predicted) |
# -*- coding: utf-8 -*-
# 性别0男1女
import pymysql
from bs4 import BeautifulSoup
db = pymysql.connect("172.16.155.12","root","myzszx002","zszx2017",charset = "utf8")
cursor = db.cursor()
sql = "select id,Sex from expert2017_copy"
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
if row[1]==0:
update_sql = """update expert2017_copy set gender = "%s" where id = %d """%('男', row[0])
else:
update_sql = """update expert2017_copy set gender = "%s" where id = %d """%('女', row[0])
try:
cursor.execute(update_sql)
db.commit()
except:
print("error")
db.rollback()
except:
print("error")
db.rollback()
cursor.close()
db.close()
|
from model.common_dao import sql_execute, _insert_item, _get_item, _update_item_using_id, _delete_item_using_id, _delete_item_using_condition
class ScenarioGroupDao:
def create_scenario_group(scenario_group_name):
condition = {'name': scenario_group_name}
scenario_group_id = _insert_item('SCENARIO_GROUP', condition)
return scenario_group_id
def get_scenario_group_list():
condition = {}
return _get_item('SCENARIO_GROUP', condition)
def delete_scenario_group(scenario_group_id):
_delete_item_using_id('SCENARIO_GROUP', scenario_group_id)
class ScenarioDao:
def create_scenario(scenario_group_id):
condition = {'scenario_group_id': scenario_group_id}
scenario_id = _insert_item('SCENARIO', condition)
return scenario_id
def get_scenario_list(scenario_group_id):
condition = {'scenario_group_id': scenario_group_id}
return _get_item('SCENARIO', condition)
def get_scenario_query(scenario_id):
condition = {'scenario_id': scenario_id}
return _get_item('SCENARIO_QUERY', condition)
def get_scenario_response(scenario_id):
condition = {'scenario_id': scenario_id}
return _get_item('SCENARIO_RESPONSE', condition)
def get_entire_scenario_query_response():
condition = {}
return _get_item('SCENARIO_QUERY', condition), _get_item('SCENARIO_RESPONSE', condition)
def update_scenario(id, scenario_query=[], scenario_response=[]):
condition = {'scenario_id': id}
_delete_item_using_condition('SCENARIO_QUERY', condition)
_delete_item_using_condition('SCENARIO_RESPONSE', condition)
for query in scenario_query:
query_condition = {'scenario_id':id, 'text':query }
_insert_item('SCENARIO_QUERY', query_condition)
for query in scenario_response:
query_condition = {'scenario_id':id, 'text':query }
_insert_item('SCENARIO_RESPONSE', query_condition)
def delete_scenario(scenario_id):
condition = {'id': scenario_id}
_delete_item_using_condition('SCENARIO', condition) |
from socket import *
import time
import sys
import helper
import threading
import random
#python3 sender.py 127.0.0.1 6060 test.txt 100 50 0.6 0.1 5
#header src port num(2byte) dest port num(2byte) seq num(4 byte) ack num(4byte) 1bit syn
#1 bit ack 1 bit fin
#s is the socket for sending and receiving
s = socket(AF_INET, SOCK_DGRAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
#set port seed pdrop
port = int(sys.argv[2])
ip = sys.argv[1]
pdrop = float(sys.argv[7])
seed = int(sys.argv[8])
random.seed(seed)
#the log class
log = helper.log()
#lock for the window
t_lock=threading.Condition()
#global window
window = helper.window(0, 0, 1, 1)
seg_count = 0
seg_drop = 0
re_trans = 0
dup_ack_count = 0
def drop():
global seg_drop
random_num = random.random()
#print(f"the random number is {random_num}")
if (random_num > pdrop):
return False
else:
seg_drop += 1
return True
def hand_shake(max_window):
syn_header = helper.Header(0, 0, 1, 0, 0, int(max_window))
print("start handshake")
s.sendto(syn_header.bits(), (ip, int(port)))
log.add_log("snd", "S", 0, 0, 0)
try:
reply, address = s.recvfrom(1024)
rev = helper.bits_to_header(reply)
log.add_log("rcv", "SA", rev.seq_num, 0, rev.ack_num)
#not a ack
if(rev.syn != 1):
print("handshake fail")
sys.exit()
ack_header = helper.Header(rev.ack_num, rev.seq_num + 1, 0, 1, 0, max_window)
s.sendto(ack_header.bits(), (ip, port))
log.add_log("snd", "A", ack_header.seq_num, 0, ack_header.ack_num)
except:
print("can not establish the connection")
sys.exit()
#take the data mws mss and perform the send
def send_data(data, mws, mss, timeout):
finish = []
global window
window = helper.window(mws, timeout, 1, 1)
recv_thread=threading.Thread(name="RecvHandler", target=recv_handler, args = [finish])
recv_thread.daemon=True
recv_thread.start()
send_thread=threading.Thread(name="SendHandler",target=send_handler, args=[data, mss, finish])
send_thread.daemon=True
send_thread.start()
time_out_thread=threading.Thread(name="TimeOutHandler",target=time_out_handler, args=[timeout])
time_out_thread.daemon=True
time_out_thread.start()
while len(finish) == 0:
#print(f"finish is {len(finish)}")
time.sleep(0.1)
return (window.seq_num, window.ack_num)
def time_out_handler(timeout):
while True:
with t_lock:
#print(f"something timed out")
if(len(window.current_window) > 0):
check_pkt = window.current_window[0]
#print(f"something timed out")
if (time.process_time() - check_pkt.time_sent)*1000 >= timeout:
# print(f"something timed out")
packet = window.current_window.pop(0)
window.space_left += packet.size
window.retransmit.append(packet)
t_lock.notify
def recv_handler(finish):
global dup_ack_count
global window
last_ack = 1
dup_count = 0
#print("receving")
while len(finish) == 0:
#print(f"receving {len(finish)}")
reply, address = s.recvfrom(1024)
header = helper.bits_to_header(reply)
if header.fin != 1:
with t_lock:
log.add_log("rcv", "A", header.seq_num, 0, header.ack_num)
#log.add_log("rev", "F", header.seq_num, 0, header.ack_num)
if header.ack_num == last_ack:
#print(f"we have a dup ack ack is:{header.ack_num}")
dup_count += 1
dup_ack_count += 1
if dup_count == 3 and len(window.current_window) > 0:
#if we got 3 same ack it must be the first on in the window is lost
packet = window.current_window[0]
if packet.header.seq_num == header.ack_num:
window.current_window.pop(0)
window.space_left += packet.size
window.retransmit.append(packet)
dup_count = 0
#for i in list(window.current_window):
# if i.header.seq_num == header.ack_num:
# window.space_left += i.size
# window.retransmit.append(i)
# print(f"packet{i.header.seq_num} is added to retransmit")
# window.current_window.remove(i)
#dup_count = 0
else:
# and every thing before this
window.rec_ack(header.ack_num)
last_ack = header.ack_num
t_lock.notify()
def send_handler(data, mss, finish):
base = 0
current_packet_size = mss
global window
global t_lock
global seg_count
global re_trans
while len(finish) == 0:
with t_lock:
#print(f"windows spaceleft is {window.check_space_left()} waiting for ack is {len(window.current_window)} waiting retrans is {len(window.retransmit)}")
if len(window.retransmit) != 0:
#print(f"sender is retransmiting")
to_retransmit = window.retransmit.pop(0)
if window.space_left >= to_retransmit.size:
if not drop():
s.sendto(to_retransmit.to_bits(), (ip, port))
log.add_log("snd", "D", to_retransmit.header.seq_num, to_retransmit.size, to_retransmit.header.ack_num)
else:
log.add_log("drop", "D", to_retransmit.header.seq_num, to_retransmit.size, to_retransmit.header.ack_num)
to_retransmit.time_sent = time.process_time()
re_trans += 1
window.current_window.insert(0, to_retransmit)
window.space_left -= to_retransmit.size
#window.add_packet(to_retransmit)
else:
window.retransmit.insert(0, to_retransmit)
else:
if base < (len(data) - 1):
#print(f"base is {base} size is {len(data)}")
if (base + mss) < len(data):
this_size = mss
else:
this_size = len(data) - base
if window.space_left >= this_size:
header = helper.Header(window.seq_num, window.ack_num, 0, 0, 0, 0)
pac = helper.packet(header, this_size, data[base:base+this_size], time.process_time())
if not drop():
s.sendto(pac.to_bits(), (ip, port))
log.add_log("snd", "D", header.seq_num, pac.size, header.ack_num)
else:
log.add_log("drop", "D", header.seq_num, pac.size, header.ack_num)
window.add_packet(pac)
seg_count += 1
base += this_size
window.seq_num += this_size
#print(f"the base is {base} the data length is {len(data)} the current window len is {len(window.current_window)} the retransmit window length is {len(window.retransmit)}")
if base >= len(data) - 1 and len(window.current_window) == 0 and len(window.retransmit) == 0:
finish.append(1)
#print("finallllllllllllllly")
t_lock.notify()
def close_connection(fin_seq):
#send F
print(f"this is fin_seq{fin_seq}")
header = helper.Header(fin_seq[0], fin_seq[1], 0, 0, 1, 0)
s.sendto(header.bits(), (ip, port))
log.add_log("snd", "F", header.seq_num, 0, header.ack_num)
#receive FA
reply, address = s.recvfrom(1024)
rcv_header = helper.bits_to_header(reply)
log.add_log("rcv", "FA", rcv_header.seq_num, 0, rcv_header.ack_num)
print(f"the f is {rcv_header.fin} the a is {rcv_header.ack} seq is {rcv_header.seq_num} ack is {rcv_header.ack_num}")
final_header = helper.Header(rcv_header.ack_num, rcv_header.seq_num + 1, 0, 1, 0, 0)
s.sendto(final_header.bits(), (ip, port))
log.add_log("snd", "A", final_header.seq_num, 0, final_header.ack_num)
def main():
#receiver_host_ip = sys.argv[1]
file_to_send = sys.argv[3]
mws = int(sys.argv[4])
mss = int(sys.argv[5])
timeout = sys.argv[6]
hand_shake(mws)
print("handshake done")
#open the file and send it
try:
file_descriptor = open(file_to_send, "r")
buffer = file_descriptor.read()
size = len(buffer)
file_descriptor.close()
except:
sys.exit("can not read file " + file_to_send)
fin_seq = send_data(buffer, int(mws), int(mss), float(timeout))
close_connection(fin_seq)
log.finish(size, seg_count, seg_drop, re_trans, dup_ack_count)
print("all finish")
if __name__=="__main__":
main()
|
'''
Task: You are given the year, and you have to write a function to check if the year is leap or not.
--------------------------------------------------------
Sample Input: 1990
Sample Output: False
'''
def is_leap(year):
leap = False
# Write your logic here
if year%4 == 0:
leap = True
if year%100 == 0:
leap = False
if year%400 == 0:
leap = True
return leap
year = int(input())
print(is_leap(year)) |
from typing import Optional, List, Union
from dataclasses import dataclass
from enum import Enum
from elftools.construct.lib import Container
from elftools.common.utils import struct_parse
class SymbolType(Enum):
FUNCTION = 'STT_FUNC'
VARIABLE = 'STT_OBJECT'
NOTYPE = 'STT_NOTYPE'
class VisibilityType(Enum):
DEFAULT = 'STV_DEFAULT'
class BindType(Enum):
LOCAL = 'STB_LOCAL'
GLOBAL = 'STB_GLOBAL'
@dataclass
class Symbol:
name: Optional[bytes]=None
type_: Optional[SymbolType]=None
value: Optional[int]=None
section_idx: Optional[int]=None
size: int = 0
visibility: Union[VisibilityType, int] = VisibilityType.DEFAULT
bind: Union[BindType, int] = BindType.GLOBAL
def serialize(self, helper, strtab_list, initial_size):
def enum_union_val(var: Union[Enum, int]):
if isinstance(var, Enum):
return var.value
return var
c = Container()
c['st_name'] = self.calculate_strtab_idx(strtab_list, initial_size) if self.name else 0
c['st_value'] = self.value
c['st_size'] = self.size
c['st_info'] = Container()
c['st_info']['bind'] = enum_union_val(self.bind)
c['st_info']['type'] = enum_union_val(self.type_)
c['st_other'] = Container()
c['st_other']['visibility'] = enum_union_val(self.visibility)
c['st_shndx'] = helper.va_to_section_idx(self.value) if not self.section_idx else self.section_idx
return helper.file.structs.Elf_Sym.build(c)
def calculate_strtab_idx(self, strings: List[str], initial_size: int):
idx = strings.index(self.name)
return sum(map(len, strings[:idx])) + idx + initial_size
|
class Solution:
def alphabetBoardPath(self, target):
"""
Time Complexity: O(N)
Space Complexity: O(N)
"""
m = {c: [i // 5, i % 5] for i, c in enumerate("abcdefghijklmnopqrstuvwxyz")}
x0, y0 = 0, 0
res = []
for c in target:
x, y = m[c]
if y < y0:
res.append("L" * (y0 - y))
if x < x0:
res.append("U" * (x0 - x))
if x > x0:
res.append("D" * (x - x0))
if y > y0:
res.append("R" * (y - y0))
res.append("!")
x0, y0 = x, y
return "".join(res)
def stringToString(input):
import json
return json.loads(input)
def main():
import sys
import io
def readlines():
for line in io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8"):
yield line.strip("\n")
lines = readlines()
while True:
try:
line = next(lines)
#target = stringToString(line)
target = line
ret = Solution().alphabetBoardPath(target)
out = ret
print(out)
break
except StopIteration:
break
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.